repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
nickmckay/LiPD-utilities
Python/lipd/inferred_data.py
_get_inferred_data_res
def _get_inferred_data_res(column, age): """ Calculate Resolution and m/m/m/m for column values. :param dict column: Column data :param list age: Age values :return dict column: Column data - modified """ try: with warnings.catch_warnings(): warnings.simplefilter("ignore") # Get the values for this column values = column["values"] # Make sure that age and values are numpy arrays _values = np.array(copy.copy(values), dtype=float) # _values = _values[np.where(~np.isnan(_values))[0]] _age = np.array(age, dtype=float) # _age = _age[np.where(~np.isnan(_age))[0]] # If we have values, keep going if len(_values) != 0: # Get the resolution for this age and column values data res = _get_resolution(_age, _values) # If we have successful resolution data, keep going if len(res) != 0: column["hasResolution"] = __get_inferred_data_res_2(res) # Remove the NaNs from the values list. _values = _values[np.where(~np.isnan(_values))[0]] # Calculate column non-resolution data, update the column with the results. column.update(__get_inferred_data_res_2(_values)) except KeyError as e: logger_inferred_data.debug("get_inferred_data_column: KeyError: {}".format(e)) except Exception as e: logger_inferred_data.debug("get_inferred_data_column: Exception: {}".format(e)) return column
python
def _get_inferred_data_res(column, age): """ Calculate Resolution and m/m/m/m for column values. :param dict column: Column data :param list age: Age values :return dict column: Column data - modified """ try: with warnings.catch_warnings(): warnings.simplefilter("ignore") # Get the values for this column values = column["values"] # Make sure that age and values are numpy arrays _values = np.array(copy.copy(values), dtype=float) # _values = _values[np.where(~np.isnan(_values))[0]] _age = np.array(age, dtype=float) # _age = _age[np.where(~np.isnan(_age))[0]] # If we have values, keep going if len(_values) != 0: # Get the resolution for this age and column values data res = _get_resolution(_age, _values) # If we have successful resolution data, keep going if len(res) != 0: column["hasResolution"] = __get_inferred_data_res_2(res) # Remove the NaNs from the values list. _values = _values[np.where(~np.isnan(_values))[0]] # Calculate column non-resolution data, update the column with the results. column.update(__get_inferred_data_res_2(_values)) except KeyError as e: logger_inferred_data.debug("get_inferred_data_column: KeyError: {}".format(e)) except Exception as e: logger_inferred_data.debug("get_inferred_data_column: Exception: {}".format(e)) return column
[ "def", "_get_inferred_data_res", "(", "column", ",", "age", ")", ":", "try", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "# Get the values for this column", "values", "=", "column", "[",...
Calculate Resolution and m/m/m/m for column values. :param dict column: Column data :param list age: Age values :return dict column: Column data - modified
[ "Calculate", "Resolution", "and", "m", "/", "m", "/", "m", "/", "m", "for", "column", "values", ".", ":", "param", "dict", "column", ":", "Column", "data", ":", "param", "list", "age", ":", "Age", "values", ":", "return", "dict", "column", ":", "Colu...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/inferred_data.py#L187-L223
nickmckay/LiPD-utilities
Python/lipd/inferred_data.py
_get_inferred_data_column
def _get_inferred_data_column(column): """ Calculate the m/m/m/m for column values. :param dict column: Column data :return dict column: Column data - modified """ try: with warnings.catch_warnings(): warnings.simplefilter("ignore") # Get the values for this column values = column["values"] # Make sure that age and values are numpy arrays _values = np.array(copy.copy(values), dtype=float) # If we have values, keep going if len(_values) != 0: # Remove the NaNs from the values list. _values = _values[np.where(~np.isnan(_values))[0]] # Use the values to create new entries and data column.update(__get_inferred_data_res_2(_values)) # Even though we're not calculating resolution, still add it with "NaN" placeholders. column["hasResolution"] = __get_inferred_data_res_2(None, calc=False) except KeyError as e: logger_inferred_data.debug("get_inferred_data_column: KeyError: {}".format(e)) except Exception as e: logger_inferred_data.debug("get_inferred_data_column: Exception: {}".format(e)) return column
python
def _get_inferred_data_column(column): """ Calculate the m/m/m/m for column values. :param dict column: Column data :return dict column: Column data - modified """ try: with warnings.catch_warnings(): warnings.simplefilter("ignore") # Get the values for this column values = column["values"] # Make sure that age and values are numpy arrays _values = np.array(copy.copy(values), dtype=float) # If we have values, keep going if len(_values) != 0: # Remove the NaNs from the values list. _values = _values[np.where(~np.isnan(_values))[0]] # Use the values to create new entries and data column.update(__get_inferred_data_res_2(_values)) # Even though we're not calculating resolution, still add it with "NaN" placeholders. column["hasResolution"] = __get_inferred_data_res_2(None, calc=False) except KeyError as e: logger_inferred_data.debug("get_inferred_data_column: KeyError: {}".format(e)) except Exception as e: logger_inferred_data.debug("get_inferred_data_column: Exception: {}".format(e)) return column
[ "def", "_get_inferred_data_column", "(", "column", ")", ":", "try", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "# Get the values for this column", "values", "=", "column", "[", "\"values\...
Calculate the m/m/m/m for column values. :param dict column: Column data :return dict column: Column data - modified
[ "Calculate", "the", "m", "/", "m", "/", "m", "/", "m", "for", "column", "values", ".", ":", "param", "dict", "column", ":", "Column", "data", ":", "return", "dict", "column", ":", "Column", "data", "-", "modified" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/inferred_data.py#L226-L254
nickmckay/LiPD-utilities
Python/lipd/inferred_data.py
get_inferred_data_table
def get_inferred_data_table(table, pc): """ Table level: Dive down, calculate data, then return the new table with the inferred data. :param str pc: paleo or chron :param dict table: Metadata :return dict table: Metadata """ age = None if pc == "paleo": # Get the age values data first, since it's needed to calculate the other column data. age = _get_age(table["columns"]) try: # If age values were not found, then skip resolution. if age: # Loop for all the columns in the table for var, col in table["columns"].items(): # Special cases # We do not calculate data for each of the keys below, and we cannot calculate any "string" data if "age" in var or "year" in var: # Calculate m/m/m/m, but not resolution table["columns"][var] = _get_inferred_data_column(col) elif not all(isinstance(i, str) for i in col["values"]): # Calculate m/m/m/m and resolution table["columns"][var] = _get_inferred_data_res(col, age) else: # Fall through case. No calculations made. logger_inferred_data.info("get_inferred_data_table: " "Not calculating inferred data for variableName: {}".format(var)) # If there isn't an age, still calculate the m/m/m/m for the column values. else: for var, col in table["columns"].items(): if not all(isinstance(i, str) for i in col["values"]): # Calculate m/m/m/m and resolution table["columns"][var] = _get_inferred_data_column(col) else: # Fall through case. No calculations made. logger_inferred_data.info("get_inferred_data_table: " "Not calculating inferred data for variableName: {}".format(var)) except AttributeError as e: logger_inferred_data.warn("get_inferred_data_table: AttributeError: {}".format(e)) except Exception as e: logger_inferred_data.warn("get_inferred_data_table: Exception: {}".format(e)) table["columns"] = _fix_numeric_types(table["columns"]) return table
python
def get_inferred_data_table(table, pc): """ Table level: Dive down, calculate data, then return the new table with the inferred data. :param str pc: paleo or chron :param dict table: Metadata :return dict table: Metadata """ age = None if pc == "paleo": # Get the age values data first, since it's needed to calculate the other column data. age = _get_age(table["columns"]) try: # If age values were not found, then skip resolution. if age: # Loop for all the columns in the table for var, col in table["columns"].items(): # Special cases # We do not calculate data for each of the keys below, and we cannot calculate any "string" data if "age" in var or "year" in var: # Calculate m/m/m/m, but not resolution table["columns"][var] = _get_inferred_data_column(col) elif not all(isinstance(i, str) for i in col["values"]): # Calculate m/m/m/m and resolution table["columns"][var] = _get_inferred_data_res(col, age) else: # Fall through case. No calculations made. logger_inferred_data.info("get_inferred_data_table: " "Not calculating inferred data for variableName: {}".format(var)) # If there isn't an age, still calculate the m/m/m/m for the column values. else: for var, col in table["columns"].items(): if not all(isinstance(i, str) for i in col["values"]): # Calculate m/m/m/m and resolution table["columns"][var] = _get_inferred_data_column(col) else: # Fall through case. No calculations made. logger_inferred_data.info("get_inferred_data_table: " "Not calculating inferred data for variableName: {}".format(var)) except AttributeError as e: logger_inferred_data.warn("get_inferred_data_table: AttributeError: {}".format(e)) except Exception as e: logger_inferred_data.warn("get_inferred_data_table: Exception: {}".format(e)) table["columns"] = _fix_numeric_types(table["columns"]) return table
[ "def", "get_inferred_data_table", "(", "table", ",", "pc", ")", ":", "age", "=", "None", "if", "pc", "==", "\"paleo\"", ":", "# Get the age values data first, since it's needed to calculate the other column data.", "age", "=", "_get_age", "(", "table", "[", "\"columns\"...
Table level: Dive down, calculate data, then return the new table with the inferred data. :param str pc: paleo or chron :param dict table: Metadata :return dict table: Metadata
[ "Table", "level", ":", "Dive", "down", "calculate", "data", "then", "return", "the", "new", "table", "with", "the", "inferred", "data", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/inferred_data.py#L257-L305
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/functions/functions.py
register_func
def register_func(env, need_function): """ Registers a new sphinx-needs function for the given sphinx environment. :param env: Sphinx environment :param need_function: Python method :return: None """ if not hasattr(env, 'needs_functions'): env.needs_functions = {} func_name = need_function.__name__ if func_name in env.needs_functions.keys(): raise SphinxError('sphinx-needs: Function name {} already registered.'.format(func_name)) env.needs_functions[func_name] = { 'name': func_name, 'function': need_function }
python
def register_func(env, need_function): """ Registers a new sphinx-needs function for the given sphinx environment. :param env: Sphinx environment :param need_function: Python method :return: None """ if not hasattr(env, 'needs_functions'): env.needs_functions = {} func_name = need_function.__name__ if func_name in env.needs_functions.keys(): raise SphinxError('sphinx-needs: Function name {} already registered.'.format(func_name)) env.needs_functions[func_name] = { 'name': func_name, 'function': need_function }
[ "def", "register_func", "(", "env", ",", "need_function", ")", ":", "if", "not", "hasattr", "(", "env", ",", "'needs_functions'", ")", ":", "env", ".", "needs_functions", "=", "{", "}", "func_name", "=", "need_function", ".", "__name__", "if", "func_name", ...
Registers a new sphinx-needs function for the given sphinx environment. :param env: Sphinx environment :param need_function: Python method :return: None
[ "Registers", "a", "new", "sphinx", "-", "needs", "function", "for", "the", "given", "sphinx", "environment", ".", ":", "param", "env", ":", "Sphinx", "environment", ":", "param", "need_function", ":", "Python", "method", ":", "return", ":", "None" ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/functions/functions.py#L26-L45
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/functions/functions.py
execute_func
def execute_func(env, need, func_string): """ Executes a given function string. :param env: Sphinx environment :param need: Actual need, which contains the found function string :param func_string: string of the found function. Without [[ ]] :return: return value of executed function """ func_name, func_args, func_kwargs = _analyze_func_string(func_string) if func_name not in env.needs_functions.keys(): raise SphinxError('Unknown dynamic sphinx-needs function: {}. Found in need: {}'.format(func_name, need['id'])) func = env.needs_functions[func_name]['function'] func_return = func(env, need, env.needs_all_needs, *func_args, **func_kwargs) if not isinstance(func_return, (str, int, float, list, unicode)) and func_return is not None: raise SphinxError('Return value of function {} is of type {}. Allowed are str, int, float'.format( func_name, type(func_return))) if isinstance(func_return, list): for element in func_return: if not isinstance(element, (str, int, float, unicode)): raise SphinxError('Element of return list of function {} is of type {}. ' 'Allowed are str, int, float'.format(func_name, type(func_return))) return func_return
python
def execute_func(env, need, func_string): """ Executes a given function string. :param env: Sphinx environment :param need: Actual need, which contains the found function string :param func_string: string of the found function. Without [[ ]] :return: return value of executed function """ func_name, func_args, func_kwargs = _analyze_func_string(func_string) if func_name not in env.needs_functions.keys(): raise SphinxError('Unknown dynamic sphinx-needs function: {}. Found in need: {}'.format(func_name, need['id'])) func = env.needs_functions[func_name]['function'] func_return = func(env, need, env.needs_all_needs, *func_args, **func_kwargs) if not isinstance(func_return, (str, int, float, list, unicode)) and func_return is not None: raise SphinxError('Return value of function {} is of type {}. Allowed are str, int, float'.format( func_name, type(func_return))) if isinstance(func_return, list): for element in func_return: if not isinstance(element, (str, int, float, unicode)): raise SphinxError('Element of return list of function {} is of type {}. ' 'Allowed are str, int, float'.format(func_name, type(func_return))) return func_return
[ "def", "execute_func", "(", "env", ",", "need", ",", "func_string", ")", ":", "func_name", ",", "func_args", ",", "func_kwargs", "=", "_analyze_func_string", "(", "func_string", ")", "if", "func_name", "not", "in", "env", ".", "needs_functions", ".", "keys", ...
Executes a given function string. :param env: Sphinx environment :param need: Actual need, which contains the found function string :param func_string: string of the found function. Without [[ ]] :return: return value of executed function
[ "Executes", "a", "given", "function", "string", ".", ":", "param", "env", ":", "Sphinx", "environment", ":", "param", "need", ":", "Actual", "need", "which", "contains", "the", "found", "function", "string", ":", "param", "func_string", ":", "string", "of", ...
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/functions/functions.py#L48-L73
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/functions/functions.py
find_and_replace_node_content
def find_and_replace_node_content(node, env, need): """ Search inside a given node and its children for nodes of type Text, if found check if it contains a function string and run/replace it. :param node: Node to analyse :return: None """ new_children = [] if not node.children: if isinstance(node, nodes.Text): func_match = func_pattern.findall(node) new_text = node for func_string in func_match: if not is_python3: func_string = func_string.encode('utf-8') # sphinx is replacing ' and " with language specific quotation marks (up and down), which makes # it impossible for the later used AST render engine to detect a python function call in the given # string. Therefor a replacement is needed for the execution of the found string. func_string_org = func_string[:] func_string = func_string.replace('„', '"') func_string = func_string.replace('“', '"') func_string = func_string.replace('”', '"') func_string = func_string.replace('”', '"') func_string = func_string.replace('‘', '"') func_string = func_string.replace('’', '"') func_return = execute_func(env, need, func_string) if not is_python3: new_text = new_text.replace(u'[[{}]]'.format(func_string_org.decode('utf-8')), func_return) else: new_text = new_text.replace(u'[[{}]]'.format(func_string_org), func_return) node = nodes.Text(new_text, new_text) return node else: for child in node.children: new_child = find_and_replace_node_content(child, env, need) new_children.append(new_child) node.children = new_children return node
python
def find_and_replace_node_content(node, env, need): """ Search inside a given node and its children for nodes of type Text, if found check if it contains a function string and run/replace it. :param node: Node to analyse :return: None """ new_children = [] if not node.children: if isinstance(node, nodes.Text): func_match = func_pattern.findall(node) new_text = node for func_string in func_match: if not is_python3: func_string = func_string.encode('utf-8') # sphinx is replacing ' and " with language specific quotation marks (up and down), which makes # it impossible for the later used AST render engine to detect a python function call in the given # string. Therefor a replacement is needed for the execution of the found string. func_string_org = func_string[:] func_string = func_string.replace('„', '"') func_string = func_string.replace('“', '"') func_string = func_string.replace('”', '"') func_string = func_string.replace('”', '"') func_string = func_string.replace('‘', '"') func_string = func_string.replace('’', '"') func_return = execute_func(env, need, func_string) if not is_python3: new_text = new_text.replace(u'[[{}]]'.format(func_string_org.decode('utf-8')), func_return) else: new_text = new_text.replace(u'[[{}]]'.format(func_string_org), func_return) node = nodes.Text(new_text, new_text) return node else: for child in node.children: new_child = find_and_replace_node_content(child, env, need) new_children.append(new_child) node.children = new_children return node
[ "def", "find_and_replace_node_content", "(", "node", ",", "env", ",", "need", ")", ":", "new_children", "=", "[", "]", "if", "not", "node", ".", "children", ":", "if", "isinstance", "(", "node", ",", "nodes", ".", "Text", ")", ":", "func_match", "=", "...
Search inside a given node and its children for nodes of type Text, if found check if it contains a function string and run/replace it. :param node: Node to analyse :return: None
[ "Search", "inside", "a", "given", "node", "and", "its", "children", "for", "nodes", "of", "type", "Text", "if", "found", "check", "if", "it", "contains", "a", "function", "string", "and", "run", "/", "replace", "it", "." ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/functions/functions.py#L79-L118
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/functions/functions.py
resolve_dynamic_values
def resolve_dynamic_values(env): """ Resolve dynamic values inside need data. Rough workflow: #. Parse all needs and their data for a string like [[ my_func(a,b,c) ]] #. Extract function name and call parameters #. Execute registered function name with extracted call parameters #. Replace original string with return value :param env: Sphinx environment :return: return value of given function """ # Only perform calculation if not already done yet if env.needs_workflow['dynamic_values_resolved']: return needs = env.needs_all_needs for key, need in needs.items(): for need_option in need: if need_option in ['docname', 'lineno', 'target_node', 'content']: # dynamic values in this data are not allowed. continue if not isinstance(need[need_option], (list, set)): func_call = True while func_call: try: func_call, func_return = _detect_and_execute(need[need_option], need, env) except FunctionParsingException: raise SphinxError("Function definition of {option} in file {file}:{line} has " "unsupported parameters. " "supported are str, int/float, list".format(option=need_option, file=need['docname'], line=need['lineno'])) if func_call is None: continue # Replace original function string with return value of function call if func_return is None: need[need_option] = need[need_option].replace('[[{}]]'.format(func_call), '') else: need[need_option] = need[need_option].replace('[[{}]]'.format(func_call), str(func_return)) if need[need_option] == '': need[need_option] = None else: new_values = [] for element in need[need_option]: try: func_call, func_return = _detect_and_execute(element, need, env) except FunctionParsingException: raise SphinxError("Function definition of {option} in file {file}:{line} has " "unsupported parameters. " "supported are str, int/float, list".format(option=need_option, file=need['docname'], line=need['lineno'])) if func_call is None: new_values.append(element) else: # Replace original function string with return value of function call if isinstance(need[need_option], (str, int, float)): new_values.append(element.replace('[[{}]]'.format(func_call), str(func_return))) else: if isinstance(need[need_option], (list, set)): new_values += func_return need[need_option] = new_values # Finally set a flag so that this function gets not executed several times env.needs_workflow['dynamic_values_resolved'] = True
python
def resolve_dynamic_values(env): """ Resolve dynamic values inside need data. Rough workflow: #. Parse all needs and their data for a string like [[ my_func(a,b,c) ]] #. Extract function name and call parameters #. Execute registered function name with extracted call parameters #. Replace original string with return value :param env: Sphinx environment :return: return value of given function """ # Only perform calculation if not already done yet if env.needs_workflow['dynamic_values_resolved']: return needs = env.needs_all_needs for key, need in needs.items(): for need_option in need: if need_option in ['docname', 'lineno', 'target_node', 'content']: # dynamic values in this data are not allowed. continue if not isinstance(need[need_option], (list, set)): func_call = True while func_call: try: func_call, func_return = _detect_and_execute(need[need_option], need, env) except FunctionParsingException: raise SphinxError("Function definition of {option} in file {file}:{line} has " "unsupported parameters. " "supported are str, int/float, list".format(option=need_option, file=need['docname'], line=need['lineno'])) if func_call is None: continue # Replace original function string with return value of function call if func_return is None: need[need_option] = need[need_option].replace('[[{}]]'.format(func_call), '') else: need[need_option] = need[need_option].replace('[[{}]]'.format(func_call), str(func_return)) if need[need_option] == '': need[need_option] = None else: new_values = [] for element in need[need_option]: try: func_call, func_return = _detect_and_execute(element, need, env) except FunctionParsingException: raise SphinxError("Function definition of {option} in file {file}:{line} has " "unsupported parameters. " "supported are str, int/float, list".format(option=need_option, file=need['docname'], line=need['lineno'])) if func_call is None: new_values.append(element) else: # Replace original function string with return value of function call if isinstance(need[need_option], (str, int, float)): new_values.append(element.replace('[[{}]]'.format(func_call), str(func_return))) else: if isinstance(need[need_option], (list, set)): new_values += func_return need[need_option] = new_values # Finally set a flag so that this function gets not executed several times env.needs_workflow['dynamic_values_resolved'] = True
[ "def", "resolve_dynamic_values", "(", "env", ")", ":", "# Only perform calculation if not already done yet", "if", "env", ".", "needs_workflow", "[", "'dynamic_values_resolved'", "]", ":", "return", "needs", "=", "env", ".", "needs_all_needs", "for", "key", ",", "need...
Resolve dynamic values inside need data. Rough workflow: #. Parse all needs and their data for a string like [[ my_func(a,b,c) ]] #. Extract function name and call parameters #. Execute registered function name with extracted call parameters #. Replace original string with return value :param env: Sphinx environment :return: return value of given function
[ "Resolve", "dynamic", "values", "inside", "need", "data", "." ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/functions/functions.py#L121-L191
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/functions/functions.py
_analyze_func_string
def _analyze_func_string(func_string): """ Analyze given functiion string an extract: * function name * function arguments * function keyword arguments All given arguments must by of type string, int/float or list. :param func_string: string of the function :return: function name, arguments, keyword arguments """ func = ast.parse(func_string) try: func_call = func.body[0].value func_name = func_call.func.id except AttributeError: raise SphinxError("Given dynamic function string is not a valid python call. Got: {}".format(func_string)) func_args = [] for arg in func_call.args: if isinstance(arg, ast.Num): func_args.append(arg.n) elif isinstance(arg, ast.Str): func_args.append(arg.s) elif isinstance(arg, ast.BoolOp): func_args.append(arg.s) elif isinstance(arg, ast.List): arg_list = [] for element in arg.elts: if isinstance(element, ast.Num): arg_list.append(element.n) elif isinstance(element, ast.Str): arg_list.append(element.s) func_args.append(arg_list) else: raise FunctionParsingException() func_kargs = {} for keyword in func_call.keywords: kvalue = keyword.value kkey = keyword.arg if isinstance(kvalue, ast.Num): func_kargs[kkey] = kvalue.n elif isinstance(kvalue, ast.Str): func_kargs[kkey] = kvalue.s elif isinstance(kvalue, ast_boolean): # Check if Boolean if is_python3: func_kargs[kkey] = kvalue.value else: func_kargs[kkey] = kvalue.id elif isinstance(kvalue, ast.List): arg_list = [] for element in kvalue.elts: if isinstance(element, ast.Num): arg_list.append(element.n) elif isinstance(element, ast.Str): arg_list.append(element.s) func_kargs[kkey] = arg_list else: raise FunctionParsingException() return func_name, func_args, func_kargs
python
def _analyze_func_string(func_string): """ Analyze given functiion string an extract: * function name * function arguments * function keyword arguments All given arguments must by of type string, int/float or list. :param func_string: string of the function :return: function name, arguments, keyword arguments """ func = ast.parse(func_string) try: func_call = func.body[0].value func_name = func_call.func.id except AttributeError: raise SphinxError("Given dynamic function string is not a valid python call. Got: {}".format(func_string)) func_args = [] for arg in func_call.args: if isinstance(arg, ast.Num): func_args.append(arg.n) elif isinstance(arg, ast.Str): func_args.append(arg.s) elif isinstance(arg, ast.BoolOp): func_args.append(arg.s) elif isinstance(arg, ast.List): arg_list = [] for element in arg.elts: if isinstance(element, ast.Num): arg_list.append(element.n) elif isinstance(element, ast.Str): arg_list.append(element.s) func_args.append(arg_list) else: raise FunctionParsingException() func_kargs = {} for keyword in func_call.keywords: kvalue = keyword.value kkey = keyword.arg if isinstance(kvalue, ast.Num): func_kargs[kkey] = kvalue.n elif isinstance(kvalue, ast.Str): func_kargs[kkey] = kvalue.s elif isinstance(kvalue, ast_boolean): # Check if Boolean if is_python3: func_kargs[kkey] = kvalue.value else: func_kargs[kkey] = kvalue.id elif isinstance(kvalue, ast.List): arg_list = [] for element in kvalue.elts: if isinstance(element, ast.Num): arg_list.append(element.n) elif isinstance(element, ast.Str): arg_list.append(element.s) func_kargs[kkey] = arg_list else: raise FunctionParsingException() return func_name, func_args, func_kargs
[ "def", "_analyze_func_string", "(", "func_string", ")", ":", "func", "=", "ast", ".", "parse", "(", "func_string", ")", "try", ":", "func_call", "=", "func", ".", "body", "[", "0", "]", ".", "value", "func_name", "=", "func_call", ".", "func", ".", "id...
Analyze given functiion string an extract: * function name * function arguments * function keyword arguments All given arguments must by of type string, int/float or list. :param func_string: string of the function :return: function name, arguments, keyword arguments
[ "Analyze", "given", "functiion", "string", "an", "extract", ":" ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/functions/functions.py#L210-L272
nickmckay/LiPD-utilities
Python/lipd/__init__.py
run
def run(): """ Initialize and start objects. This is called automatically when importing the package. :return none: """ # GLOBALS global cwd, files, logger_start, logger_benchmark, settings, _timeseries_data _timeseries_data = {} # files = {".lpd": [ {"full_path", "filename_ext", "filename_no_ext", "dir"} ], ".xls": [...], ".txt": [...]} settings = {"note_update": True, "note_validate": True, "verbose": True} cwd = os.getcwd() # logger created in whatever directory lipd is called from logger_start = create_logger("start") logger_benchmark = create_benchmark("benchmarks", "benchmark.log") files = {".txt": [], ".lpd": [], ".xls": []} return
python
def run(): """ Initialize and start objects. This is called automatically when importing the package. :return none: """ # GLOBALS global cwd, files, logger_start, logger_benchmark, settings, _timeseries_data _timeseries_data = {} # files = {".lpd": [ {"full_path", "filename_ext", "filename_no_ext", "dir"} ], ".xls": [...], ".txt": [...]} settings = {"note_update": True, "note_validate": True, "verbose": True} cwd = os.getcwd() # logger created in whatever directory lipd is called from logger_start = create_logger("start") logger_benchmark = create_benchmark("benchmarks", "benchmark.log") files = {".txt": [], ".lpd": [], ".xls": []} return
[ "def", "run", "(", ")", ":", "# GLOBALS", "global", "cwd", ",", "files", ",", "logger_start", ",", "logger_benchmark", ",", "settings", ",", "_timeseries_data", "_timeseries_data", "=", "{", "}", "# files = {\".lpd\": [ {\"full_path\", \"filename_ext\", \"filename_no_ext\...
Initialize and start objects. This is called automatically when importing the package. :return none:
[ "Initialize", "and", "start", "objects", ".", "This", "is", "called", "automatically", "when", "importing", "the", "package", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L28-L45
nickmckay/LiPD-utilities
Python/lipd/__init__.py
readLipd
def readLipd(usr_path=""): """ Read LiPD file(s). Enter a file path, directory path, or leave args blank to trigger gui. :param str usr_path: Path to file / directory (optional) :return dict _d: Metadata """ global cwd, settings, files if settings["verbose"]: __disclaimer(opt="update") start = clock() files[".lpd"] = [] __read(usr_path, ".lpd") _d = __read_lipd_contents() end = clock() logger_benchmark.info(log_benchmark("readLipd", start, end)) return _d
python
def readLipd(usr_path=""): """ Read LiPD file(s). Enter a file path, directory path, or leave args blank to trigger gui. :param str usr_path: Path to file / directory (optional) :return dict _d: Metadata """ global cwd, settings, files if settings["verbose"]: __disclaimer(opt="update") start = clock() files[".lpd"] = [] __read(usr_path, ".lpd") _d = __read_lipd_contents() end = clock() logger_benchmark.info(log_benchmark("readLipd", start, end)) return _d
[ "def", "readLipd", "(", "usr_path", "=", "\"\"", ")", ":", "global", "cwd", ",", "settings", ",", "files", "if", "settings", "[", "\"verbose\"", "]", ":", "__disclaimer", "(", "opt", "=", "\"update\"", ")", "start", "=", "clock", "(", ")", "files", "["...
Read LiPD file(s). Enter a file path, directory path, or leave args blank to trigger gui. :param str usr_path: Path to file / directory (optional) :return dict _d: Metadata
[ "Read", "LiPD", "file", "(", "s", ")", ".", "Enter", "a", "file", "path", "directory", "path", "or", "leave", "args", "blank", "to", "trigger", "gui", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L48-L65
nickmckay/LiPD-utilities
Python/lipd/__init__.py
readExcel
def readExcel(usr_path=""): """ Read Excel file(s) Enter a file path, directory path, or leave args blank to trigger gui. :param str usr_path: Path to file / directory (optional) :return str cwd: Current working directory """ global cwd, files start = clock() files[".xls"] = [] __read(usr_path, ".xls") end = clock() logger_benchmark.info(log_benchmark("readExcel", start, end)) return cwd
python
def readExcel(usr_path=""): """ Read Excel file(s) Enter a file path, directory path, or leave args blank to trigger gui. :param str usr_path: Path to file / directory (optional) :return str cwd: Current working directory """ global cwd, files start = clock() files[".xls"] = [] __read(usr_path, ".xls") end = clock() logger_benchmark.info(log_benchmark("readExcel", start, end)) return cwd
[ "def", "readExcel", "(", "usr_path", "=", "\"\"", ")", ":", "global", "cwd", ",", "files", "start", "=", "clock", "(", ")", "files", "[", "\".xls\"", "]", "=", "[", "]", "__read", "(", "usr_path", ",", "\".xls\"", ")", "end", "=", "clock", "(", ")"...
Read Excel file(s) Enter a file path, directory path, or leave args blank to trigger gui. :param str usr_path: Path to file / directory (optional) :return str cwd: Current working directory
[ "Read", "Excel", "file", "(", "s", ")", "Enter", "a", "file", "path", "directory", "path", "or", "leave", "args", "blank", "to", "trigger", "gui", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L68-L82
nickmckay/LiPD-utilities
Python/lipd/__init__.py
excel
def excel(): """ Convert Excel files to LiPD files. LiPD data is returned directly from this function. | Example | 1: lipd.readExcel() | 2: D = lipd.excel() :return dict _d: Metadata """ global files, cwd, settings _d = {} # Turn off verbose. We don't want to clutter the console with extra reading/writing output statements settings["verbose"] = False # Find excel files print("Found " + str(len(files[".xls"])) + " Excel files") logger_start.info("found excel files: {}".format(len(files[".xls"]))) # Start the clock start = clock() # Loop for each excel file for file in files[".xls"]: # Convert excel file to LiPD dsn = excel_main(file) try: # Read the new LiPD file back in, to get fixes, inferred calculations, updates, etc. _d[dsn] = readLipd(os.path.join(file["dir"], dsn + ".lpd")) # Write the modified LiPD file back out again. writeLipd(_d[dsn], cwd) except Exception as e: logger_start.error("excel: Unable to read new LiPD file, {}".format(e)) print("Error: Unable to read new LiPD file: {}, {}".format(dsn, e)) # Time! end = clock() logger_benchmark.info(log_benchmark("excel", start, end)) # Start printing stuff again. settings["verbose"] = True return _d
python
def excel(): """ Convert Excel files to LiPD files. LiPD data is returned directly from this function. | Example | 1: lipd.readExcel() | 2: D = lipd.excel() :return dict _d: Metadata """ global files, cwd, settings _d = {} # Turn off verbose. We don't want to clutter the console with extra reading/writing output statements settings["verbose"] = False # Find excel files print("Found " + str(len(files[".xls"])) + " Excel files") logger_start.info("found excel files: {}".format(len(files[".xls"]))) # Start the clock start = clock() # Loop for each excel file for file in files[".xls"]: # Convert excel file to LiPD dsn = excel_main(file) try: # Read the new LiPD file back in, to get fixes, inferred calculations, updates, etc. _d[dsn] = readLipd(os.path.join(file["dir"], dsn + ".lpd")) # Write the modified LiPD file back out again. writeLipd(_d[dsn], cwd) except Exception as e: logger_start.error("excel: Unable to read new LiPD file, {}".format(e)) print("Error: Unable to read new LiPD file: {}, {}".format(dsn, e)) # Time! end = clock() logger_benchmark.info(log_benchmark("excel", start, end)) # Start printing stuff again. settings["verbose"] = True return _d
[ "def", "excel", "(", ")", ":", "global", "files", ",", "cwd", ",", "settings", "_d", "=", "{", "}", "# Turn off verbose. We don't want to clutter the console with extra reading/writing output statements", "settings", "[", "\"verbose\"", "]", "=", "False", "# Find excel fi...
Convert Excel files to LiPD files. LiPD data is returned directly from this function. | Example | 1: lipd.readExcel() | 2: D = lipd.excel() :return dict _d: Metadata
[ "Convert", "Excel", "files", "to", "LiPD", "files", ".", "LiPD", "data", "is", "returned", "directly", "from", "this", "function", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L125-L161
nickmckay/LiPD-utilities
Python/lipd/__init__.py
noaa
def noaa(D="", path="", wds_url="", lpd_url="", version=""): """ Convert between NOAA and LiPD files | Example: LiPD to NOAA converter | 1: L = lipd.readLipd() | 2: lipd.noaa(L, "/Users/someuser/Desktop", "https://www1.ncdc.noaa.gov/pub/data/paleo/pages2k/NAm2kHydro-2017/noaa-templates/data-version-1.0.0", "https://www1.ncdc.noaa.gov/pub/data/paleo/pages2k/NAm2kHydro-2017/data-version-1.0.0", "v1-1.0.0") | Example: NOAA to LiPD converter | 1: lipd.readNoaa() | 2: lipd.noaa() :param dict D: Metadata :param str path: Path where output files will be written to :param str wds_url: WDSPaleoUrl, where NOAA template file will be stored on NOAA's FTP server :param str lpd_url: URL where LiPD file will be stored on NOAA's FTP server :param str version: Version of the dataset :return none: """ global files, cwd # When going from NOAA to LPD, use the global "files" variable. # When going from LPD to NOAA, use the data from the LiPD Library. # Choose the mode _mode = noaa_prompt() start = clock() # LiPD mode: Convert LiPD files to NOAA files if _mode == "1": # _project, _version = noaa_prompt_1() if not version or not lpd_url: print("Missing parameters: Please try again and provide all parameters.") return if not D: print("Error: LiPD data must be provided for LiPD -> NOAA conversions") else: if "paleoData" in D: _d = copy.deepcopy(D) D = lpd_to_noaa(_d, wds_url, lpd_url, version, path) else: # For each LiPD file in the LiPD Library for dsn, dat in D.items(): _d = copy.deepcopy(dat) # Process this data through the converter _d = lpd_to_noaa(_d, wds_url, lpd_url, version, path) # Overwrite the data in the LiPD object with our new data. D[dsn] = _d # If no wds url is provided, then remove instances from jsonld metadata if not wds_url: D = rm_wds_url(D) # Write out the new LiPD files, since they now contain the new NOAA URL data if(path): writeLipd(D, path) else: print("Path not provided. Writing to CWD...") writeLipd(D, cwd) # NOAA mode: Convert NOAA files to LiPD files elif _mode == "2": # Pass through the global files list. Use NOAA files directly on disk. noaa_to_lpd(files) else: print("Invalid input. Try again.") end = clock() logger_benchmark.info(log_benchmark("noaa", start, end)) return
python
def noaa(D="", path="", wds_url="", lpd_url="", version=""): """ Convert between NOAA and LiPD files | Example: LiPD to NOAA converter | 1: L = lipd.readLipd() | 2: lipd.noaa(L, "/Users/someuser/Desktop", "https://www1.ncdc.noaa.gov/pub/data/paleo/pages2k/NAm2kHydro-2017/noaa-templates/data-version-1.0.0", "https://www1.ncdc.noaa.gov/pub/data/paleo/pages2k/NAm2kHydro-2017/data-version-1.0.0", "v1-1.0.0") | Example: NOAA to LiPD converter | 1: lipd.readNoaa() | 2: lipd.noaa() :param dict D: Metadata :param str path: Path where output files will be written to :param str wds_url: WDSPaleoUrl, where NOAA template file will be stored on NOAA's FTP server :param str lpd_url: URL where LiPD file will be stored on NOAA's FTP server :param str version: Version of the dataset :return none: """ global files, cwd # When going from NOAA to LPD, use the global "files" variable. # When going from LPD to NOAA, use the data from the LiPD Library. # Choose the mode _mode = noaa_prompt() start = clock() # LiPD mode: Convert LiPD files to NOAA files if _mode == "1": # _project, _version = noaa_prompt_1() if not version or not lpd_url: print("Missing parameters: Please try again and provide all parameters.") return if not D: print("Error: LiPD data must be provided for LiPD -> NOAA conversions") else: if "paleoData" in D: _d = copy.deepcopy(D) D = lpd_to_noaa(_d, wds_url, lpd_url, version, path) else: # For each LiPD file in the LiPD Library for dsn, dat in D.items(): _d = copy.deepcopy(dat) # Process this data through the converter _d = lpd_to_noaa(_d, wds_url, lpd_url, version, path) # Overwrite the data in the LiPD object with our new data. D[dsn] = _d # If no wds url is provided, then remove instances from jsonld metadata if not wds_url: D = rm_wds_url(D) # Write out the new LiPD files, since they now contain the new NOAA URL data if(path): writeLipd(D, path) else: print("Path not provided. Writing to CWD...") writeLipd(D, cwd) # NOAA mode: Convert NOAA files to LiPD files elif _mode == "2": # Pass through the global files list. Use NOAA files directly on disk. noaa_to_lpd(files) else: print("Invalid input. Try again.") end = clock() logger_benchmark.info(log_benchmark("noaa", start, end)) return
[ "def", "noaa", "(", "D", "=", "\"\"", ",", "path", "=", "\"\"", ",", "wds_url", "=", "\"\"", ",", "lpd_url", "=", "\"\"", ",", "version", "=", "\"\"", ")", ":", "global", "files", ",", "cwd", "# When going from NOAA to LPD, use the global \"files\" variable.",...
Convert between NOAA and LiPD files | Example: LiPD to NOAA converter | 1: L = lipd.readLipd() | 2: lipd.noaa(L, "/Users/someuser/Desktop", "https://www1.ncdc.noaa.gov/pub/data/paleo/pages2k/NAm2kHydro-2017/noaa-templates/data-version-1.0.0", "https://www1.ncdc.noaa.gov/pub/data/paleo/pages2k/NAm2kHydro-2017/data-version-1.0.0", "v1-1.0.0") | Example: NOAA to LiPD converter | 1: lipd.readNoaa() | 2: lipd.noaa() :param dict D: Metadata :param str path: Path where output files will be written to :param str wds_url: WDSPaleoUrl, where NOAA template file will be stored on NOAA's FTP server :param str lpd_url: URL where LiPD file will be stored on NOAA's FTP server :param str version: Version of the dataset :return none:
[ "Convert", "between", "NOAA", "and", "LiPD", "files" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L164-L229
nickmckay/LiPD-utilities
Python/lipd/__init__.py
validate
def validate(D, detailed=True): """ Use the Validator API for lipd.net to validate all LiPD files in the LiPD Library. Display the PASS/FAIL results. Display detailed results if the option is chosen. :param dict D: Metadata (single or multiple datasets) :param bool detailed: Show or hide the detailed results of each LiPD file. Shows warnings and errors :return none: """ start = clock() print("\n") # Fetch new results by calling lipd.net/api/validator (costly, may take a while) print("Fetching results from validator at lipd.net/validator... this may take a few moments.\n") try: results = [] # Get the validator-formatted data for each dataset. if "paleoData" in D: _api_data = get_validator_format(D) # A list of lists of LiPD-content metadata results.append(call_validator_api(D["dataSetName"], _api_data)) else: for dsn, dat in D.items(): _api_data = get_validator_format(dat) # A list of lists of LiPD-content metadata results.append(call_validator_api(dsn, _api_data)) display_results(results, detailed) except Exception as e: print("Error: validate: {}".format(e)) end = clock() logger_benchmark.info(log_benchmark("validate", start, end)) return
python
def validate(D, detailed=True): """ Use the Validator API for lipd.net to validate all LiPD files in the LiPD Library. Display the PASS/FAIL results. Display detailed results if the option is chosen. :param dict D: Metadata (single or multiple datasets) :param bool detailed: Show or hide the detailed results of each LiPD file. Shows warnings and errors :return none: """ start = clock() print("\n") # Fetch new results by calling lipd.net/api/validator (costly, may take a while) print("Fetching results from validator at lipd.net/validator... this may take a few moments.\n") try: results = [] # Get the validator-formatted data for each dataset. if "paleoData" in D: _api_data = get_validator_format(D) # A list of lists of LiPD-content metadata results.append(call_validator_api(D["dataSetName"], _api_data)) else: for dsn, dat in D.items(): _api_data = get_validator_format(dat) # A list of lists of LiPD-content metadata results.append(call_validator_api(dsn, _api_data)) display_results(results, detailed) except Exception as e: print("Error: validate: {}".format(e)) end = clock() logger_benchmark.info(log_benchmark("validate", start, end)) return
[ "def", "validate", "(", "D", ",", "detailed", "=", "True", ")", ":", "start", "=", "clock", "(", ")", "print", "(", "\"\\n\"", ")", "# Fetch new results by calling lipd.net/api/validator (costly, may take a while)", "print", "(", "\"Fetching results from validator at lipd...
Use the Validator API for lipd.net to validate all LiPD files in the LiPD Library. Display the PASS/FAIL results. Display detailed results if the option is chosen. :param dict D: Metadata (single or multiple datasets) :param bool detailed: Show or hide the detailed results of each LiPD file. Shows warnings and errors :return none:
[ "Use", "the", "Validator", "API", "for", "lipd", ".", "net", "to", "validate", "all", "LiPD", "files", "in", "the", "LiPD", "Library", ".", "Display", "the", "PASS", "/", "FAIL", "results", ".", "Display", "detailed", "results", "if", "the", "option", "i...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L262-L293
nickmckay/LiPD-utilities
Python/lipd/__init__.py
tsToDf
def tsToDf(tso): """ Create Pandas DataFrame from TimeSeries object. Use: Must first extractTs to get a time series. Then pick one item from time series and pass it through :param dict tso: Time series entry :return dict dfs: Pandas dataframes """ dfs = {} try: dfs = ts_to_df(tso) except Exception as e: print("Error: Unable to create data frame") logger_start.warn("ts_to_df: tso malformed: {}".format(e)) return dfs
python
def tsToDf(tso): """ Create Pandas DataFrame from TimeSeries object. Use: Must first extractTs to get a time series. Then pick one item from time series and pass it through :param dict tso: Time series entry :return dict dfs: Pandas dataframes """ dfs = {} try: dfs = ts_to_df(tso) except Exception as e: print("Error: Unable to create data frame") logger_start.warn("ts_to_df: tso malformed: {}".format(e)) return dfs
[ "def", "tsToDf", "(", "tso", ")", ":", "dfs", "=", "{", "}", "try", ":", "dfs", "=", "ts_to_df", "(", "tso", ")", "except", "Exception", "as", "e", ":", "print", "(", "\"Error: Unable to create data frame\"", ")", "logger_start", ".", "warn", "(", "\"ts_...
Create Pandas DataFrame from TimeSeries object. Use: Must first extractTs to get a time series. Then pick one item from time series and pass it through :param dict tso: Time series entry :return dict dfs: Pandas dataframes
[ "Create", "Pandas", "DataFrame", "from", "TimeSeries", "object", ".", "Use", ":", "Must", "first", "extractTs", "to", "get", "a", "time", "series", ".", "Then", "pick", "one", "item", "from", "time", "series", "and", "pass", "it", "through" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L357-L371
nickmckay/LiPD-utilities
Python/lipd/__init__.py
extractTs
def extractTs(d, whichtables="meas", mode="paleo"): """ Create a time series using LiPD data (uses paleoData by default) | Example : (default) paleoData and meas tables | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D) | Example : chronData and all tables | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D, "all", "chron") :param dict d: Metadata :param str whichtables: "all", "summ", "meas", "ens" - The tables that you would like in the timeseries :param str mode: "paleo" or "chron" mode :return list l: Time series """ # instead of storing each raw dataset per tso, store it once in the global scope. saves memory global _timeseries_data _l = [] start = clock() try: if not d: print("Error: LiPD data not provided. Pass LiPD data into the function.") else: print(mode_ts("extract", mode)) if "paleoData" in d: # One dataset: Process directly on file, don't loop try: _dsn = get_dsn(d) _timeseries_data[start] = {} _timeseries_data[start][_dsn] = d # Use the LiPD data given to start time series extract print("extracting: {}".format(_dsn)) # Copy, so we don't affect the original data _v = copy.deepcopy(d) # Start extract... _l = (extract(_v, whichtables, mode, start)) except Exception as e: print("Error: Unable to extractTs for dataset: {}: {}".format(_dsn, e)) logger_start.debug("extractTs: Exception: {}, {}".format(_dsn, e)) else: _timeseries_data[start] = d # Multiple datasets: Loop and append for each file for k, v in d.items(): try: # Use the LiPD data given to start time series extract print("extracting: {}".format(k)) # Copy, so we don't affect the original data _v = copy.deepcopy(v) # Start extract... _l += (extract(_v, whichtables, mode, start)) except Exception as e: print("Error: Unable to extractTs for dataset: {}: {}".format(k, e)) logger_start.debug("extractTs: Exception: {}".format(e)) print("Created time series: {} entries".format(len(_l))) except Exception as e: print("Error: Unable to extractTs: {}".format(e)) logger_start.error("extractTs: Exception: {}".format(e)) end = clock() logger_benchmark.info(log_benchmark("extractTs", start, end)) return _l
python
def extractTs(d, whichtables="meas", mode="paleo"): """ Create a time series using LiPD data (uses paleoData by default) | Example : (default) paleoData and meas tables | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D) | Example : chronData and all tables | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D, "all", "chron") :param dict d: Metadata :param str whichtables: "all", "summ", "meas", "ens" - The tables that you would like in the timeseries :param str mode: "paleo" or "chron" mode :return list l: Time series """ # instead of storing each raw dataset per tso, store it once in the global scope. saves memory global _timeseries_data _l = [] start = clock() try: if not d: print("Error: LiPD data not provided. Pass LiPD data into the function.") else: print(mode_ts("extract", mode)) if "paleoData" in d: # One dataset: Process directly on file, don't loop try: _dsn = get_dsn(d) _timeseries_data[start] = {} _timeseries_data[start][_dsn] = d # Use the LiPD data given to start time series extract print("extracting: {}".format(_dsn)) # Copy, so we don't affect the original data _v = copy.deepcopy(d) # Start extract... _l = (extract(_v, whichtables, mode, start)) except Exception as e: print("Error: Unable to extractTs for dataset: {}: {}".format(_dsn, e)) logger_start.debug("extractTs: Exception: {}, {}".format(_dsn, e)) else: _timeseries_data[start] = d # Multiple datasets: Loop and append for each file for k, v in d.items(): try: # Use the LiPD data given to start time series extract print("extracting: {}".format(k)) # Copy, so we don't affect the original data _v = copy.deepcopy(v) # Start extract... _l += (extract(_v, whichtables, mode, start)) except Exception as e: print("Error: Unable to extractTs for dataset: {}: {}".format(k, e)) logger_start.debug("extractTs: Exception: {}".format(e)) print("Created time series: {} entries".format(len(_l))) except Exception as e: print("Error: Unable to extractTs: {}".format(e)) logger_start.error("extractTs: Exception: {}".format(e)) end = clock() logger_benchmark.info(log_benchmark("extractTs", start, end)) return _l
[ "def", "extractTs", "(", "d", ",", "whichtables", "=", "\"meas\"", ",", "mode", "=", "\"paleo\"", ")", ":", "# instead of storing each raw dataset per tso, store it once in the global scope. saves memory", "global", "_timeseries_data", "_l", "=", "[", "]", "start", "=", ...
Create a time series using LiPD data (uses paleoData by default) | Example : (default) paleoData and meas tables | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D) | Example : chronData and all tables | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D, "all", "chron") :param dict d: Metadata :param str whichtables: "all", "summ", "meas", "ens" - The tables that you would like in the timeseries :param str mode: "paleo" or "chron" mode :return list l: Time series
[ "Create", "a", "time", "series", "using", "LiPD", "data", "(", "uses", "paleoData", "by", "default", ")" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L393-L455
nickmckay/LiPD-utilities
Python/lipd/__init__.py
collapseTs
def collapseTs(ts=None): """ Collapse a time series back into LiPD record form. | Example | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D) | 3. New_D = lipd.collapseTs(ts) _timeseries_data is sorted by time_id, and then by dataSetName _timeseries_data[10103341]["ODP1098B"] = {data} :param list ts: Time series :return dict: Metadata """ # Retrieve the associated raw data according to the "time_id" found in each object. Match it in _timeseries_data global _timeseries_data _d = {} if not ts: print("Error: Time series data not provided. Pass time series into the function.") else: # Send time series list through to be collapsed. try: _raw = _timeseries_data[ts[0]["time_id"]] print(mode_ts("collapse", mode="", ts=ts)) _d = collapse(ts, _raw) _d = rm_empty_fields(_d) except Exception as e: print("Error: Unable to collapse the time series: {}".format(e)) logger_start.error("collapseTs: unable to collapse the time series: {}".format(e)) return _d
python
def collapseTs(ts=None): """ Collapse a time series back into LiPD record form. | Example | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D) | 3. New_D = lipd.collapseTs(ts) _timeseries_data is sorted by time_id, and then by dataSetName _timeseries_data[10103341]["ODP1098B"] = {data} :param list ts: Time series :return dict: Metadata """ # Retrieve the associated raw data according to the "time_id" found in each object. Match it in _timeseries_data global _timeseries_data _d = {} if not ts: print("Error: Time series data not provided. Pass time series into the function.") else: # Send time series list through to be collapsed. try: _raw = _timeseries_data[ts[0]["time_id"]] print(mode_ts("collapse", mode="", ts=ts)) _d = collapse(ts, _raw) _d = rm_empty_fields(_d) except Exception as e: print("Error: Unable to collapse the time series: {}".format(e)) logger_start.error("collapseTs: unable to collapse the time series: {}".format(e)) return _d
[ "def", "collapseTs", "(", "ts", "=", "None", ")", ":", "# Retrieve the associated raw data according to the \"time_id\" found in each object. Match it in _timeseries_data", "global", "_timeseries_data", "_d", "=", "{", "}", "if", "not", "ts", ":", "print", "(", "\"Error: Ti...
Collapse a time series back into LiPD record form. | Example | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D) | 3. New_D = lipd.collapseTs(ts) _timeseries_data is sorted by time_id, and then by dataSetName _timeseries_data[10103341]["ODP1098B"] = {data} :param list ts: Time series :return dict: Metadata
[ "Collapse", "a", "time", "series", "back", "into", "LiPD", "record", "form", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L458-L488
nickmckay/LiPD-utilities
Python/lipd/__init__.py
filterTs
def filterTs(ts, expressions): """ Create a new time series that only contains entries that match the given expression. | Example: | D = lipd.loadLipd() | ts = lipd.extractTs(D) | new_ts = filterTs(ts, "archiveType == marine sediment") | new_ts = filterTs(ts, ["paleoData_variableName == sst", "archiveType == marine sediment"]) | Expressions should use underscores to denote data nesting. | Ex: paleoData_hasResolution_hasMedian or :param list OR str expressions: Expressions :param list ts: Time series :return list new_ts: Filtered time series that matches the expression """ # Make a copy of the ts. We're going to work directly on it. new_ts = ts[:] # User provided a single query string if isinstance(expressions, str): # Use some magic to turn the given string expression into a machine-usable comparative expression. expr_lst = translate_expression(expressions) # Only proceed if the translation resulted in a usable expression. if expr_lst: # Return the new filtered time series. This will use the same time series # that filters down each loop. new_ts, _idx = get_matches(expr_lst, new_ts) # User provided a list of multiple queries elif isinstance(expressions, list): # Loop for each query for expr in expressions: # Use some magic to turn the given string expression into a machine-usable comparative expression. expr_lst = translate_expression(expr) # Only proceed if the translation resulted in a usable expression. if expr_lst: # Return the new filtered time series. This will use the same time series # that filters down each loop. new_ts, _idx = get_matches(expr_lst, new_ts) return new_ts
python
def filterTs(ts, expressions): """ Create a new time series that only contains entries that match the given expression. | Example: | D = lipd.loadLipd() | ts = lipd.extractTs(D) | new_ts = filterTs(ts, "archiveType == marine sediment") | new_ts = filterTs(ts, ["paleoData_variableName == sst", "archiveType == marine sediment"]) | Expressions should use underscores to denote data nesting. | Ex: paleoData_hasResolution_hasMedian or :param list OR str expressions: Expressions :param list ts: Time series :return list new_ts: Filtered time series that matches the expression """ # Make a copy of the ts. We're going to work directly on it. new_ts = ts[:] # User provided a single query string if isinstance(expressions, str): # Use some magic to turn the given string expression into a machine-usable comparative expression. expr_lst = translate_expression(expressions) # Only proceed if the translation resulted in a usable expression. if expr_lst: # Return the new filtered time series. This will use the same time series # that filters down each loop. new_ts, _idx = get_matches(expr_lst, new_ts) # User provided a list of multiple queries elif isinstance(expressions, list): # Loop for each query for expr in expressions: # Use some magic to turn the given string expression into a machine-usable comparative expression. expr_lst = translate_expression(expr) # Only proceed if the translation resulted in a usable expression. if expr_lst: # Return the new filtered time series. This will use the same time series # that filters down each loop. new_ts, _idx = get_matches(expr_lst, new_ts) return new_ts
[ "def", "filterTs", "(", "ts", ",", "expressions", ")", ":", "# Make a copy of the ts. We're going to work directly on it.", "new_ts", "=", "ts", "[", ":", "]", "# User provided a single query string", "if", "isinstance", "(", "expressions", ",", "str", ")", ":", "# Us...
Create a new time series that only contains entries that match the given expression. | Example: | D = lipd.loadLipd() | ts = lipd.extractTs(D) | new_ts = filterTs(ts, "archiveType == marine sediment") | new_ts = filterTs(ts, ["paleoData_variableName == sst", "archiveType == marine sediment"]) | Expressions should use underscores to denote data nesting. | Ex: paleoData_hasResolution_hasMedian or :param list OR str expressions: Expressions :param list ts: Time series :return list new_ts: Filtered time series that matches the expression
[ "Create", "a", "new", "time", "series", "that", "only", "contains", "entries", "that", "match", "the", "given", "expression", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L491-L532
nickmckay/LiPD-utilities
Python/lipd/__init__.py
queryTs
def queryTs(ts, expression): """ Find the indices of the time series entries that match the given expression. | Example: | D = lipd.loadLipd() | ts = lipd.extractTs(D) | matches = queryTs(ts, "archiveType == marine sediment") | matches = queryTs(ts, "geo_meanElev <= 2000") :param str expression: Expression :param list ts: Time series :return list _idx: Indices of entries that match the criteria """ # Make a copy of the ts. We're going to work directly on it. _idx = [] # User provided a single query string if isinstance(expressions, str): # Use some magic to turn the given string expression into a machine-usable comparative expression. expr_lst = translate_expression(expressions) # Only proceed if the translation resulted in a usable expression. if expr_lst: # Return the new filtered time series. This will use the same time series # that filters down each loop. new_ts, _idx = get_matches(expr_lst, new_ts) # User provided a list of multiple queries elif isinstance(expressions, list): # Loop for each query for expr in expressions: # Use some magic to turn the given string expression into a machine-usable comparative expression. expr_lst = translate_expression(expr) # Only proceed if the translation resulted in a usable expression. if expr_lst: # Return the new filtered time series. This will use the same time series # that filters down each loop. new_ts, _idx = get_matches(expr_lst, new_ts) return _idx
python
def queryTs(ts, expression): """ Find the indices of the time series entries that match the given expression. | Example: | D = lipd.loadLipd() | ts = lipd.extractTs(D) | matches = queryTs(ts, "archiveType == marine sediment") | matches = queryTs(ts, "geo_meanElev <= 2000") :param str expression: Expression :param list ts: Time series :return list _idx: Indices of entries that match the criteria """ # Make a copy of the ts. We're going to work directly on it. _idx = [] # User provided a single query string if isinstance(expressions, str): # Use some magic to turn the given string expression into a machine-usable comparative expression. expr_lst = translate_expression(expressions) # Only proceed if the translation resulted in a usable expression. if expr_lst: # Return the new filtered time series. This will use the same time series # that filters down each loop. new_ts, _idx = get_matches(expr_lst, new_ts) # User provided a list of multiple queries elif isinstance(expressions, list): # Loop for each query for expr in expressions: # Use some magic to turn the given string expression into a machine-usable comparative expression. expr_lst = translate_expression(expr) # Only proceed if the translation resulted in a usable expression. if expr_lst: # Return the new filtered time series. This will use the same time series # that filters down each loop. new_ts, _idx = get_matches(expr_lst, new_ts) return _idx
[ "def", "queryTs", "(", "ts", ",", "expression", ")", ":", "# Make a copy of the ts. We're going to work directly on it.", "_idx", "=", "[", "]", "# User provided a single query string", "if", "isinstance", "(", "expressions", ",", "str", ")", ":", "# Use some magic to tur...
Find the indices of the time series entries that match the given expression. | Example: | D = lipd.loadLipd() | ts = lipd.extractTs(D) | matches = queryTs(ts, "archiveType == marine sediment") | matches = queryTs(ts, "geo_meanElev <= 2000") :param str expression: Expression :param list ts: Time series :return list _idx: Indices of entries that match the criteria
[ "Find", "the", "indices", "of", "the", "time", "series", "entries", "that", "match", "the", "given", "expression", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L535-L573
nickmckay/LiPD-utilities
Python/lipd/__init__.py
viewTs
def viewTs(ts): """ View the contents of one time series entry in a nicely formatted way | Example | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D) | 3. viewTs(ts[0]) :param dict ts: One time series entry :return none: """ _ts = ts if isinstance(ts, list): _ts = ts[0] print("It looks like you input a full time series. It's best to view one entry at a time.\n" "I'll show you the first entry...") _tmp_sort = OrderedDict() _tmp_sort["ROOT"] = {} _tmp_sort["PUBLICATION"] = {} _tmp_sort["GEO"] = {} _tmp_sort["OTHERS"] = {} _tmp_sort["DATA"] = {} # Organize the data by section for k,v in _ts.items(): if not any(i == k for i in ["paleoData", "chronData", "mode", "@context"]): if k in ["archiveType", "dataSetName", "googleSpreadSheetKey", "metadataMD5", "tagMD5", "googleMetadataWorksheet", "lipdVersion"]: _tmp_sort["ROOT"][k] = v elif "pub" in k: _tmp_sort["PUBLICATION"][k] = v elif "geo" in k: _tmp_sort["GEO"][k] = v elif "paleoData_" in k or "chronData_" in k: if isinstance(v, list) and len(v) > 2: _tmp_sort["DATA"][k] = "[{}, {}, {}, ...]".format(v[0], v[1], v[2]) else: _tmp_sort["DATA"][k] = v else: if isinstance(v, list) and len(v) > 2: _tmp_sort["OTHERS"][k] = "[{}, {}, {}, ...]".format(v[0], v[1], v[2]) else: _tmp_sort["OTHERS"][k] = v # Start printing the data to console for k1, v1 in _tmp_sort.items(): print("\n{}\n===============".format(k1)) for k2, v2 in v1.items(): print("{} : {}".format(k2, v2)) return
python
def viewTs(ts): """ View the contents of one time series entry in a nicely formatted way | Example | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D) | 3. viewTs(ts[0]) :param dict ts: One time series entry :return none: """ _ts = ts if isinstance(ts, list): _ts = ts[0] print("It looks like you input a full time series. It's best to view one entry at a time.\n" "I'll show you the first entry...") _tmp_sort = OrderedDict() _tmp_sort["ROOT"] = {} _tmp_sort["PUBLICATION"] = {} _tmp_sort["GEO"] = {} _tmp_sort["OTHERS"] = {} _tmp_sort["DATA"] = {} # Organize the data by section for k,v in _ts.items(): if not any(i == k for i in ["paleoData", "chronData", "mode", "@context"]): if k in ["archiveType", "dataSetName", "googleSpreadSheetKey", "metadataMD5", "tagMD5", "googleMetadataWorksheet", "lipdVersion"]: _tmp_sort["ROOT"][k] = v elif "pub" in k: _tmp_sort["PUBLICATION"][k] = v elif "geo" in k: _tmp_sort["GEO"][k] = v elif "paleoData_" in k or "chronData_" in k: if isinstance(v, list) and len(v) > 2: _tmp_sort["DATA"][k] = "[{}, {}, {}, ...]".format(v[0], v[1], v[2]) else: _tmp_sort["DATA"][k] = v else: if isinstance(v, list) and len(v) > 2: _tmp_sort["OTHERS"][k] = "[{}, {}, {}, ...]".format(v[0], v[1], v[2]) else: _tmp_sort["OTHERS"][k] = v # Start printing the data to console for k1, v1 in _tmp_sort.items(): print("\n{}\n===============".format(k1)) for k2, v2 in v1.items(): print("{} : {}".format(k2, v2)) return
[ "def", "viewTs", "(", "ts", ")", ":", "_ts", "=", "ts", "if", "isinstance", "(", "ts", ",", "list", ")", ":", "_ts", "=", "ts", "[", "0", "]", "print", "(", "\"It looks like you input a full time series. It's best to view one entry at a time.\\n\"", "\"I'll show y...
View the contents of one time series entry in a nicely formatted way | Example | 1. D = lipd.readLipd() | 2. ts = lipd.extractTs(D) | 3. viewTs(ts[0]) :param dict ts: One time series entry :return none:
[ "View", "the", "contents", "of", "one", "time", "series", "entry", "in", "a", "nicely", "formatted", "way" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L576-L626
nickmckay/LiPD-utilities
Python/lipd/__init__.py
showLipds
def showLipds(D=None): """ Display the dataset names of a given LiPD data | Example | lipd.showLipds(D) :pararm dict D: LiPD data :return none: """ if not D: print("Error: LiPD data not provided. Pass LiPD data into the function.") else: print(json.dumps(D.keys(), indent=2)) return
python
def showLipds(D=None): """ Display the dataset names of a given LiPD data | Example | lipd.showLipds(D) :pararm dict D: LiPD data :return none: """ if not D: print("Error: LiPD data not provided. Pass LiPD data into the function.") else: print(json.dumps(D.keys(), indent=2)) return
[ "def", "showLipds", "(", "D", "=", "None", ")", ":", "if", "not", "D", ":", "print", "(", "\"Error: LiPD data not provided. Pass LiPD data into the function.\"", ")", "else", ":", "print", "(", "json", ".", "dumps", "(", "D", ".", "keys", "(", ")", ",", "i...
Display the dataset names of a given LiPD data | Example | lipd.showLipds(D) :pararm dict D: LiPD data :return none:
[ "Display", "the", "dataset", "names", "of", "a", "given", "LiPD", "data" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L649-L665
nickmckay/LiPD-utilities
Python/lipd/__init__.py
showMetadata
def showMetadata(dat): """ Display the metadata specified LiPD in pretty print | Example | showMetadata(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict dat: Metadata :return none: """ _tmp = rm_values_fields(copy.deepcopy(dat)) print(json.dumps(_tmp, indent=2)) return
python
def showMetadata(dat): """ Display the metadata specified LiPD in pretty print | Example | showMetadata(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict dat: Metadata :return none: """ _tmp = rm_values_fields(copy.deepcopy(dat)) print(json.dumps(_tmp, indent=2)) return
[ "def", "showMetadata", "(", "dat", ")", ":", "_tmp", "=", "rm_values_fields", "(", "copy", ".", "deepcopy", "(", "dat", ")", ")", "print", "(", "json", ".", "dumps", "(", "_tmp", ",", "indent", "=", "2", ")", ")", "return" ]
Display the metadata specified LiPD in pretty print | Example | showMetadata(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict dat: Metadata :return none:
[ "Display", "the", "metadata", "specified", "LiPD", "in", "pretty", "print" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L668-L680
nickmckay/LiPD-utilities
Python/lipd/__init__.py
showDfs
def showDfs(d): """ Display the available data frame names in a given data frame collection :param dict d: Dataframe collection :return none: """ if "metadata" in d: print("metadata") if "paleoData" in d: try: for k, v in d["paleoData"].items(): print(k) except KeyError: pass except AttributeError: pass if "chronData" in d: try: for k, v in d["chronData"].items(): print(k) except KeyError: pass except AttributeError: pass # print("Process Complete") return
python
def showDfs(d): """ Display the available data frame names in a given data frame collection :param dict d: Dataframe collection :return none: """ if "metadata" in d: print("metadata") if "paleoData" in d: try: for k, v in d["paleoData"].items(): print(k) except KeyError: pass except AttributeError: pass if "chronData" in d: try: for k, v in d["chronData"].items(): print(k) except KeyError: pass except AttributeError: pass # print("Process Complete") return
[ "def", "showDfs", "(", "d", ")", ":", "if", "\"metadata\"", "in", "d", ":", "print", "(", "\"metadata\"", ")", "if", "\"paleoData\"", "in", "d", ":", "try", ":", "for", "k", ",", "v", "in", "d", "[", "\"paleoData\"", "]", ".", "items", "(", ")", ...
Display the available data frame names in a given data frame collection :param dict d: Dataframe collection :return none:
[ "Display", "the", "available", "data", "frame", "names", "in", "a", "given", "data", "frame", "collection" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L683-L709
nickmckay/LiPD-utilities
Python/lipd/__init__.py
getLipdNames
def getLipdNames(D=None): """ Get a list of all LiPD names in the library | Example | names = lipd.getLipdNames(D) :return list f_list: File list """ _names = [] try: if not D: print("Error: LiPD data not provided. Pass LiPD data into the function.") else: _names = D.keys() except Exception: pass return _names
python
def getLipdNames(D=None): """ Get a list of all LiPD names in the library | Example | names = lipd.getLipdNames(D) :return list f_list: File list """ _names = [] try: if not D: print("Error: LiPD data not provided. Pass LiPD data into the function.") else: _names = D.keys() except Exception: pass return _names
[ "def", "getLipdNames", "(", "D", "=", "None", ")", ":", "_names", "=", "[", "]", "try", ":", "if", "not", "D", ":", "print", "(", "\"Error: LiPD data not provided. Pass LiPD data into the function.\"", ")", "else", ":", "_names", "=", "D", ".", "keys", "(", ...
Get a list of all LiPD names in the library | Example | names = lipd.getLipdNames(D) :return list f_list: File list
[ "Get", "a", "list", "of", "all", "LiPD", "names", "in", "the", "library" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L714-L731
nickmckay/LiPD-utilities
Python/lipd/__init__.py
getMetadata
def getMetadata(L): """ Get metadata from a LiPD data in memory | Example | m = lipd.getMetadata(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict L: One LiPD record :return dict d: LiPD record (metadata only) """ _l = {} try: # Create a copy. Do not affect the original data. _l = copy.deepcopy(L) # Remove values fields _l = rm_values_fields(_l) except Exception as e: # Input likely not formatted correctly, though other problems can occur. print("Error: Unable to get data. Please check that input is LiPD data: {}".format(e)) return _l
python
def getMetadata(L): """ Get metadata from a LiPD data in memory | Example | m = lipd.getMetadata(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict L: One LiPD record :return dict d: LiPD record (metadata only) """ _l = {} try: # Create a copy. Do not affect the original data. _l = copy.deepcopy(L) # Remove values fields _l = rm_values_fields(_l) except Exception as e: # Input likely not formatted correctly, though other problems can occur. print("Error: Unable to get data. Please check that input is LiPD data: {}".format(e)) return _l
[ "def", "getMetadata", "(", "L", ")", ":", "_l", "=", "{", "}", "try", ":", "# Create a copy. Do not affect the original data.", "_l", "=", "copy", ".", "deepcopy", "(", "L", ")", "# Remove values fields", "_l", "=", "rm_values_fields", "(", "_l", ")", "except"...
Get metadata from a LiPD data in memory | Example | m = lipd.getMetadata(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict L: One LiPD record :return dict d: LiPD record (metadata only)
[ "Get", "metadata", "from", "a", "LiPD", "data", "in", "memory" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L734-L753
nickmckay/LiPD-utilities
Python/lipd/__init__.py
getCsv
def getCsv(L=None): """ Get CSV from LiPD metadata | Example | c = lipd.getCsv(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict L: One LiPD record :return dict d: CSV data """ _c = {} try: if not L: print("Error: LiPD data not provided. Pass LiPD data into the function.") else: _j, _c = get_csv_from_metadata(L["dataSetName"], L) except KeyError as ke: print("Error: Unable to get data. Please check that input is one LiPD dataset: {}".format(ke)) except Exception as e: print("Error: Unable to get data. Something went wrong: {}".format(e)) logger_start.warn("getCsv: Exception: Unable to process lipd data: {}".format(e)) return _c
python
def getCsv(L=None): """ Get CSV from LiPD metadata | Example | c = lipd.getCsv(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict L: One LiPD record :return dict d: CSV data """ _c = {} try: if not L: print("Error: LiPD data not provided. Pass LiPD data into the function.") else: _j, _c = get_csv_from_metadata(L["dataSetName"], L) except KeyError as ke: print("Error: Unable to get data. Please check that input is one LiPD dataset: {}".format(ke)) except Exception as e: print("Error: Unable to get data. Something went wrong: {}".format(e)) logger_start.warn("getCsv: Exception: Unable to process lipd data: {}".format(e)) return _c
[ "def", "getCsv", "(", "L", "=", "None", ")", ":", "_c", "=", "{", "}", "try", ":", "if", "not", "L", ":", "print", "(", "\"Error: LiPD data not provided. Pass LiPD data into the function.\"", ")", "else", ":", "_j", ",", "_c", "=", "get_csv_from_metadata", "...
Get CSV from LiPD metadata | Example | c = lipd.getCsv(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict L: One LiPD record :return dict d: CSV data
[ "Get", "CSV", "from", "LiPD", "metadata" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L756-L777
nickmckay/LiPD-utilities
Python/lipd/__init__.py
writeLipd
def writeLipd(dat, path=""): """ Write LiPD data to file(s) :param dict dat: Metadata :param str path: Destination (optional) :return none: """ global settings start = clock() __write_lipd(dat, path) end = clock() logger_benchmark.info(log_benchmark("writeLipd", start, end)) return
python
def writeLipd(dat, path=""): """ Write LiPD data to file(s) :param dict dat: Metadata :param str path: Destination (optional) :return none: """ global settings start = clock() __write_lipd(dat, path) end = clock() logger_benchmark.info(log_benchmark("writeLipd", start, end)) return
[ "def", "writeLipd", "(", "dat", ",", "path", "=", "\"\"", ")", ":", "global", "settings", "start", "=", "clock", "(", ")", "__write_lipd", "(", "dat", ",", "path", ")", "end", "=", "clock", "(", ")", "logger_benchmark", ".", "info", "(", "log_benchmark...
Write LiPD data to file(s) :param dict dat: Metadata :param str path: Destination (optional) :return none:
[ "Write", "LiPD", "data", "to", "file", "(", "s", ")" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L782-L795
nickmckay/LiPD-utilities
Python/lipd/__init__.py
__universal_read
def __universal_read(file_path, file_type): """ Use a file path to create file metadata and load a file in the appropriate way, according to the provided file type. :param str file_path: Path to file :param str file_type: One of approved file types: xls, xlsx, txt, lpd :return none: """ global files, cwd, settings # check that we are using the correct function to load this file type. (i.e. readNoaa for a .txt file) correct_ext = load_fn_matches_ext(file_path, file_type) # Check that this path references a file valid_path = path_type(file_path, "file") # is the path a file? if valid_path and correct_ext: # get file metadata for one file file_meta = collect_metadata_file(file_path) # append to global files, then load in D if file_type == ".lpd": # add meta to global file meta files[".lpd"].append(file_meta) # append to global files elif file_type in [".xls", ".xlsx"]: print("reading: {}".format(print_filename(file_meta["full_path"]))) files[".xls"].append(file_meta) # append to global files elif file_type == ".txt": print("reading: {}".format(print_filename(file_meta["full_path"]))) files[".txt"].append(file_meta) # we want to move around with the files we load # change dir into the dir of the target file cwd = file_meta["dir"] if cwd: os.chdir(cwd) return
python
def __universal_read(file_path, file_type): """ Use a file path to create file metadata and load a file in the appropriate way, according to the provided file type. :param str file_path: Path to file :param str file_type: One of approved file types: xls, xlsx, txt, lpd :return none: """ global files, cwd, settings # check that we are using the correct function to load this file type. (i.e. readNoaa for a .txt file) correct_ext = load_fn_matches_ext(file_path, file_type) # Check that this path references a file valid_path = path_type(file_path, "file") # is the path a file? if valid_path and correct_ext: # get file metadata for one file file_meta = collect_metadata_file(file_path) # append to global files, then load in D if file_type == ".lpd": # add meta to global file meta files[".lpd"].append(file_meta) # append to global files elif file_type in [".xls", ".xlsx"]: print("reading: {}".format(print_filename(file_meta["full_path"]))) files[".xls"].append(file_meta) # append to global files elif file_type == ".txt": print("reading: {}".format(print_filename(file_meta["full_path"]))) files[".txt"].append(file_meta) # we want to move around with the files we load # change dir into the dir of the target file cwd = file_meta["dir"] if cwd: os.chdir(cwd) return
[ "def", "__universal_read", "(", "file_path", ",", "file_type", ")", ":", "global", "files", ",", "cwd", ",", "settings", "# check that we are using the correct function to load this file type. (i.e. readNoaa for a .txt file)", "correct_ext", "=", "load_fn_matches_ext", "(", "fi...
Use a file path to create file metadata and load a file in the appropriate way, according to the provided file type. :param str file_path: Path to file :param str file_type: One of approved file types: xls, xlsx, txt, lpd :return none:
[ "Use", "a", "file", "path", "to", "create", "file", "metadata", "and", "load", "a", "file", "in", "the", "appropriate", "way", "according", "to", "the", "provided", "file", "type", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L801-L843
nickmckay/LiPD-utilities
Python/lipd/__init__.py
__read
def __read(usr_path, file_type): """ Determine what path needs to be taken to read in file(s) :param str usr_path: Path (optional) :param str file_type: File type to read :return none: """ # is there a file path specified ? if usr_path: # Is this a URL? Download the file and return the local path is_url = re.match(re_url, usr_path) if is_url: # The usr_path will now be a local path to a single file. It will trigger the "elif" statement below usr_path = download_from_url(usr_path, get_download_path()) # Directory path if os.path.isdir(usr_path): __read_directory(usr_path, file_type) # File path elif os.path.isfile(usr_path): __read_file(usr_path, file_type) # Invalid path given else: print("Error: Path given is invalid") # no path specified. ask if they want to load dir or file else: choice = "" count = 3 while not choice: try: print("Choose a read option:\n1. One file\n2. Multi-file select\n3. Directory") choice = input("Option: ") print("\n") # now use the given file type and prompt answer to call _read_file or _read_dir if choice in ["1", "2", "3"]: # open directory picker if choice == "3": __read_directory(usr_path, file_type) else: # open a file picker __read_file(usr_path, file_type) break else: count -= 1 if count == 0: print("Error: Too many failed attempts") break except Exception as e: print("Error: Invalid input: {}".format(e)) return
python
def __read(usr_path, file_type): """ Determine what path needs to be taken to read in file(s) :param str usr_path: Path (optional) :param str file_type: File type to read :return none: """ # is there a file path specified ? if usr_path: # Is this a URL? Download the file and return the local path is_url = re.match(re_url, usr_path) if is_url: # The usr_path will now be a local path to a single file. It will trigger the "elif" statement below usr_path = download_from_url(usr_path, get_download_path()) # Directory path if os.path.isdir(usr_path): __read_directory(usr_path, file_type) # File path elif os.path.isfile(usr_path): __read_file(usr_path, file_type) # Invalid path given else: print("Error: Path given is invalid") # no path specified. ask if they want to load dir or file else: choice = "" count = 3 while not choice: try: print("Choose a read option:\n1. One file\n2. Multi-file select\n3. Directory") choice = input("Option: ") print("\n") # now use the given file type and prompt answer to call _read_file or _read_dir if choice in ["1", "2", "3"]: # open directory picker if choice == "3": __read_directory(usr_path, file_type) else: # open a file picker __read_file(usr_path, file_type) break else: count -= 1 if count == 0: print("Error: Too many failed attempts") break except Exception as e: print("Error: Invalid input: {}".format(e)) return
[ "def", "__read", "(", "usr_path", ",", "file_type", ")", ":", "# is there a file path specified ?", "if", "usr_path", ":", "# Is this a URL? Download the file and return the local path", "is_url", "=", "re", ".", "match", "(", "re_url", ",", "usr_path", ")", "if", "is...
Determine what path needs to be taken to read in file(s) :param str usr_path: Path (optional) :param str file_type: File type to read :return none:
[ "Determine", "what", "path", "needs", "to", "be", "taken", "to", "read", "in", "file", "(", "s", ")" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L846-L897
nickmckay/LiPD-utilities
Python/lipd/__init__.py
__read_lipd_contents
def __read_lipd_contents(): """ Use the file metadata to read in the LiPD file contents as a dataset library :return dict: Metadata """ global files, settings _d = {} try: if len(files[".lpd"]) == 1: _d = lipd_read(files[".lpd"][0]["full_path"]) if settings["verbose"]: print("Finished read: 1 record") else: for file in files[".lpd"]: _d[file["filename_no_ext"]] = lipd_read(file["full_path"]) if settings["verbose"]: print("Finished read: {} records".format(len(_d))) except Exception as e: print("Error: read_lipd_contents: {}".format(e)) return _d
python
def __read_lipd_contents(): """ Use the file metadata to read in the LiPD file contents as a dataset library :return dict: Metadata """ global files, settings _d = {} try: if len(files[".lpd"]) == 1: _d = lipd_read(files[".lpd"][0]["full_path"]) if settings["verbose"]: print("Finished read: 1 record") else: for file in files[".lpd"]: _d[file["filename_no_ext"]] = lipd_read(file["full_path"]) if settings["verbose"]: print("Finished read: {} records".format(len(_d))) except Exception as e: print("Error: read_lipd_contents: {}".format(e)) return _d
[ "def", "__read_lipd_contents", "(", ")", ":", "global", "files", ",", "settings", "_d", "=", "{", "}", "try", ":", "if", "len", "(", "files", "[", "\".lpd\"", "]", ")", "==", "1", ":", "_d", "=", "lipd_read", "(", "files", "[", "\".lpd\"", "]", "["...
Use the file metadata to read in the LiPD file contents as a dataset library :return dict: Metadata
[ "Use", "the", "file", "metadata", "to", "read", "in", "the", "LiPD", "file", "contents", "as", "a", "dataset", "library" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L900-L920
nickmckay/LiPD-utilities
Python/lipd/__init__.py
__read_file
def __read_file(usr_path, file_type): """ Universal read file. Given a path and a type, it will do the appropriate read actions :param str usr_path: Path to file :param str file_type: One of approved file types: xls, xlsx, txt, lpd :return none: """ global files # no path provided. start gui browse if not usr_path: # src files could be a list of one, or a list of many. depending how many files the user selects src_dir, src_files = get_src_or_dst("read", "file") # check if src_files is a list of multiple files if src_files: for file_path in src_files: __universal_read(file_path, file_type) else: print("No file(s) chosen") else: __universal_read(usr_path, file_type) return
python
def __read_file(usr_path, file_type): """ Universal read file. Given a path and a type, it will do the appropriate read actions :param str usr_path: Path to file :param str file_type: One of approved file types: xls, xlsx, txt, lpd :return none: """ global files # no path provided. start gui browse if not usr_path: # src files could be a list of one, or a list of many. depending how many files the user selects src_dir, src_files = get_src_or_dst("read", "file") # check if src_files is a list of multiple files if src_files: for file_path in src_files: __universal_read(file_path, file_type) else: print("No file(s) chosen") else: __universal_read(usr_path, file_type) return
[ "def", "__read_file", "(", "usr_path", ",", "file_type", ")", ":", "global", "files", "# no path provided. start gui browse", "if", "not", "usr_path", ":", "# src files could be a list of one, or a list of many. depending how many files the user selects", "src_dir", ",", "src_fil...
Universal read file. Given a path and a type, it will do the appropriate read actions :param str usr_path: Path to file :param str file_type: One of approved file types: xls, xlsx, txt, lpd :return none:
[ "Universal", "read", "file", ".", "Given", "a", "path", "and", "a", "type", "it", "will", "do", "the", "appropriate", "read", "actions" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L923-L946
nickmckay/LiPD-utilities
Python/lipd/__init__.py
__read_directory
def __read_directory(usr_path, file_type): """ Universal read directory. Given a path and a type, it will do the appropriate read actions :param str usr_path: Path to directory :param str file_type: .xls, .xlsx, .txt, .lpd :return none: """ # no path provided. start gui browse if not usr_path: # got dir path usr_path, src_files = get_src_or_dst("read", "directory") # Check if this is a valid directory path valid_path = path_type(usr_path, "directory") # If dir path is valid if valid_path: # List all files of target type in dir files_found = [] # Extra case for xlsx excel files if file_type == ".xls": files_found += list_files(".xlsx", usr_path) files_found += list_files(file_type, usr_path) # notify how many files were found print("Found: {} {} file(s)".format(len(files_found), FILE_TYPE_MAP[file_type]["file_type"])) # Loop for each file found for file_path in files_found: # Call read lipd for each file found __read_file(file_path, file_type) else: print("Directory path is not valid: {}".format(usr_path)) return
python
def __read_directory(usr_path, file_type): """ Universal read directory. Given a path and a type, it will do the appropriate read actions :param str usr_path: Path to directory :param str file_type: .xls, .xlsx, .txt, .lpd :return none: """ # no path provided. start gui browse if not usr_path: # got dir path usr_path, src_files = get_src_or_dst("read", "directory") # Check if this is a valid directory path valid_path = path_type(usr_path, "directory") # If dir path is valid if valid_path: # List all files of target type in dir files_found = [] # Extra case for xlsx excel files if file_type == ".xls": files_found += list_files(".xlsx", usr_path) files_found += list_files(file_type, usr_path) # notify how many files were found print("Found: {} {} file(s)".format(len(files_found), FILE_TYPE_MAP[file_type]["file_type"])) # Loop for each file found for file_path in files_found: # Call read lipd for each file found __read_file(file_path, file_type) else: print("Directory path is not valid: {}".format(usr_path)) return
[ "def", "__read_directory", "(", "usr_path", ",", "file_type", ")", ":", "# no path provided. start gui browse", "if", "not", "usr_path", ":", "# got dir path", "usr_path", ",", "src_files", "=", "get_src_or_dst", "(", "\"read\"", ",", "\"directory\"", ")", "# Check if...
Universal read directory. Given a path and a type, it will do the appropriate read actions :param str usr_path: Path to directory :param str file_type: .xls, .xlsx, .txt, .lpd :return none:
[ "Universal", "read", "directory", ".", "Given", "a", "path", "and", "a", "type", "it", "will", "do", "the", "appropriate", "read", "actions" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L949-L981
nickmckay/LiPD-utilities
Python/lipd/__init__.py
__write_lipd
def __write_lipd(dat, usr_path): """ Write LiPD data to file, provided an output directory and dataset name. :param dict dat: Metadata :param str usr_path: Destination path :param str dsn: Dataset name of one specific file to write :return none: """ global settings # no path provided. start gui browse if not usr_path: # got dir path usr_path, _ignore = get_src_or_dst("write", "directory") # Check if this is a valid directory path valid_path = path_type(usr_path, "directory") # If dir path is valid if valid_path: # Filename is given, write out one file if "paleoData" in dat: try: if settings["verbose"]: print("writing: {}".format(dat["dataSetName"])) lipd_write(dat, usr_path) except KeyError as ke: print("Error: Unable to write file: unknown, {}".format(ke)) except Exception as e: print("Error: Unable to write file: {}, {}".format(dat["dataSetName"], e)) # Filename is not given, write out whole library else: if dat: for name, lipd_dat in dat.items(): try: if settings["verbose"]: print("writing: {}".format(name)) lipd_write(lipd_dat, usr_path) except Exception as e: print("Error: Unable to write file: {}, {}".format(name, e)) return
python
def __write_lipd(dat, usr_path): """ Write LiPD data to file, provided an output directory and dataset name. :param dict dat: Metadata :param str usr_path: Destination path :param str dsn: Dataset name of one specific file to write :return none: """ global settings # no path provided. start gui browse if not usr_path: # got dir path usr_path, _ignore = get_src_or_dst("write", "directory") # Check if this is a valid directory path valid_path = path_type(usr_path, "directory") # If dir path is valid if valid_path: # Filename is given, write out one file if "paleoData" in dat: try: if settings["verbose"]: print("writing: {}".format(dat["dataSetName"])) lipd_write(dat, usr_path) except KeyError as ke: print("Error: Unable to write file: unknown, {}".format(ke)) except Exception as e: print("Error: Unable to write file: {}, {}".format(dat["dataSetName"], e)) # Filename is not given, write out whole library else: if dat: for name, lipd_dat in dat.items(): try: if settings["verbose"]: print("writing: {}".format(name)) lipd_write(lipd_dat, usr_path) except Exception as e: print("Error: Unable to write file: {}, {}".format(name, e)) return
[ "def", "__write_lipd", "(", "dat", ",", "usr_path", ")", ":", "global", "settings", "# no path provided. start gui browse", "if", "not", "usr_path", ":", "# got dir path", "usr_path", ",", "_ignore", "=", "get_src_or_dst", "(", "\"write\"", ",", "\"directory\"", ")"...
Write LiPD data to file, provided an output directory and dataset name. :param dict dat: Metadata :param str usr_path: Destination path :param str dsn: Dataset name of one specific file to write :return none:
[ "Write", "LiPD", "data", "to", "file", "provided", "an", "output", "directory", "and", "dataset", "name", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L984-L1023
nickmckay/LiPD-utilities
Python/lipd/__init__.py
__disclaimer
def __disclaimer(opt=""): """ Print the disclaimers once. If they've already been shown, skip over. :return none: """ global settings if opt is "update": print("Disclaimer: LiPD files may be updated and modified to adhere to standards\n") settings["note_update"] = False if opt is "validate": print("Note: Use lipd.validate() or www.LiPD.net/create " "to ensure that your new LiPD file(s) are valid") settings["note_validate"] = False return
python
def __disclaimer(opt=""): """ Print the disclaimers once. If they've already been shown, skip over. :return none: """ global settings if opt is "update": print("Disclaimer: LiPD files may be updated and modified to adhere to standards\n") settings["note_update"] = False if opt is "validate": print("Note: Use lipd.validate() or www.LiPD.net/create " "to ensure that your new LiPD file(s) are valid") settings["note_validate"] = False return
[ "def", "__disclaimer", "(", "opt", "=", "\"\"", ")", ":", "global", "settings", "if", "opt", "is", "\"update\"", ":", "print", "(", "\"Disclaimer: LiPD files may be updated and modified to adhere to standards\\n\"", ")", "settings", "[", "\"note_update\"", "]", "=", "...
Print the disclaimers once. If they've already been shown, skip over. :return none:
[ "Print", "the", "disclaimers", "once", ".", "If", "they", "ve", "already", "been", "shown", "skip", "over", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L1026-L1040
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/environment.py
safe_add_file
def safe_add_file(filename, app): """ Adds files to builder resources only, if the given filename was not already registered. Needed mainly for tests to avoid multiple registration of the same file and therefore also multiple execution of e.g. a javascript file during page load. :param filename: filename to remove :param app: app object :return: None """ data_file = filename static_data_file = os.path.join("_static", data_file) if data_file.split(".")[-1] == "js": if hasattr(app.builder, "script_files") and static_data_file not in app.builder.script_files: app.add_javascript(data_file) elif data_file.split(".")[-1] == "css": if hasattr(app.builder, "css_files") and static_data_file not in app.builder.css_files: app.add_stylesheet(data_file) else: raise NotImplementedError("File type {} not support by save_add_file".format(data_file.split(".")[-1]))
python
def safe_add_file(filename, app): """ Adds files to builder resources only, if the given filename was not already registered. Needed mainly for tests to avoid multiple registration of the same file and therefore also multiple execution of e.g. a javascript file during page load. :param filename: filename to remove :param app: app object :return: None """ data_file = filename static_data_file = os.path.join("_static", data_file) if data_file.split(".")[-1] == "js": if hasattr(app.builder, "script_files") and static_data_file not in app.builder.script_files: app.add_javascript(data_file) elif data_file.split(".")[-1] == "css": if hasattr(app.builder, "css_files") and static_data_file not in app.builder.css_files: app.add_stylesheet(data_file) else: raise NotImplementedError("File type {} not support by save_add_file".format(data_file.split(".")[-1]))
[ "def", "safe_add_file", "(", "filename", ",", "app", ")", ":", "data_file", "=", "filename", "static_data_file", "=", "os", ".", "path", ".", "join", "(", "\"_static\"", ",", "data_file", ")", "if", "data_file", ".", "split", "(", "\".\"", ")", "[", "-",...
Adds files to builder resources only, if the given filename was not already registered. Needed mainly for tests to avoid multiple registration of the same file and therefore also multiple execution of e.g. a javascript file during page load. :param filename: filename to remove :param app: app object :return: None
[ "Adds", "files", "to", "builder", "resources", "only", "if", "the", "given", "filename", "was", "not", "already", "registered", ".", "Needed", "mainly", "for", "tests", "to", "avoid", "multiple", "registration", "of", "the", "same", "file", "and", "therefore",...
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/environment.py#L19-L39
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/environment.py
safe_remove_file
def safe_remove_file(filename, app): """ Removes a given resource file from builder resources. Needed mostly during test, if multiple sphinx-build are started. During these tests js/cass-files are not cleaned, so a css_file from run A is still registered in run B. :param filename: filename to remove :param app: app object :return: None """ data_file = filename static_data_file = os.path.join("_static", data_file) if data_file.split(".")[-1] == "js": if hasattr(app.builder, "script_files") and static_data_file in app.builder.script_files: app.builder.script_files.remove(static_data_file) elif data_file.split(".")[-1] == "css": if hasattr(app.builder, "css_files") and static_data_file in app.builder.css_files: app.builder.css_files.remove(static_data_file)
python
def safe_remove_file(filename, app): """ Removes a given resource file from builder resources. Needed mostly during test, if multiple sphinx-build are started. During these tests js/cass-files are not cleaned, so a css_file from run A is still registered in run B. :param filename: filename to remove :param app: app object :return: None """ data_file = filename static_data_file = os.path.join("_static", data_file) if data_file.split(".")[-1] == "js": if hasattr(app.builder, "script_files") and static_data_file in app.builder.script_files: app.builder.script_files.remove(static_data_file) elif data_file.split(".")[-1] == "css": if hasattr(app.builder, "css_files") and static_data_file in app.builder.css_files: app.builder.css_files.remove(static_data_file)
[ "def", "safe_remove_file", "(", "filename", ",", "app", ")", ":", "data_file", "=", "filename", "static_data_file", "=", "os", ".", "path", ".", "join", "(", "\"_static\"", ",", "data_file", ")", "if", "data_file", ".", "split", "(", "\".\"", ")", "[", "...
Removes a given resource file from builder resources. Needed mostly during test, if multiple sphinx-build are started. During these tests js/cass-files are not cleaned, so a css_file from run A is still registered in run B. :param filename: filename to remove :param app: app object :return: None
[ "Removes", "a", "given", "resource", "file", "from", "builder", "resources", ".", "Needed", "mostly", "during", "test", "if", "multiple", "sphinx", "-", "build", "are", "started", ".", "During", "these", "tests", "js", "/", "cass", "-", "files", "are", "no...
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/environment.py#L42-L60
dchaplinsky/unshred
unshred/sheet.py
Sheet.get_shreds
def get_shreds(self, feature_extractors, sheet_name): """Detects shreds in the current sheet and constructs Shred instances. Caches the results for further invocations. Args: feature_extractors: iterable of AbstractShredFeature instances to use for shreds feature assignment. sheet_name: string, included in shred attributes. Returns: list of Shred instances. """ if self._shreds is None: shreds = [] _, contours, _ = cv2.findContours(self._foreground_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i, contour in enumerate(contours): shred = self._make_shred(contour, i, feature_extractors, sheet_name) if shred is not None: shreds.append(shred) self._shreds = shreds return self._shreds
python
def get_shreds(self, feature_extractors, sheet_name): """Detects shreds in the current sheet and constructs Shred instances. Caches the results for further invocations. Args: feature_extractors: iterable of AbstractShredFeature instances to use for shreds feature assignment. sheet_name: string, included in shred attributes. Returns: list of Shred instances. """ if self._shreds is None: shreds = [] _, contours, _ = cv2.findContours(self._foreground_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i, contour in enumerate(contours): shred = self._make_shred(contour, i, feature_extractors, sheet_name) if shred is not None: shreds.append(shred) self._shreds = shreds return self._shreds
[ "def", "get_shreds", "(", "self", ",", "feature_extractors", ",", "sheet_name", ")", ":", "if", "self", ".", "_shreds", "is", "None", ":", "shreds", "=", "[", "]", "_", ",", "contours", ",", "_", "=", "cv2", ".", "findContours", "(", "self", ".", "_f...
Detects shreds in the current sheet and constructs Shred instances. Caches the results for further invocations. Args: feature_extractors: iterable of AbstractShredFeature instances to use for shreds feature assignment. sheet_name: string, included in shred attributes. Returns: list of Shred instances.
[ "Detects", "shreds", "in", "the", "current", "sheet", "and", "constructs", "Shred", "instances", "." ]
train
https://github.com/dchaplinsky/unshred/blob/ca9cd6a1c6fb8c77d5424dd660ff5d2f3720c0f8/unshred/sheet.py#L78-L102
dchaplinsky/unshred
unshred/sheet.py
Sheet._make_shred
def _make_shred(self, c, name, feature_extractors, sheet_name): """Creates a Shred instances from a given contour. Args: c: cv2 contour object. name: string shred name within a sheet. feature_extractors: iterable of AbstractShredFeature instances. Returns: A new Shred instance or None on failure. """ height, width, channels = self.orig_img.shape # bounding rect of currrent contour r_x, r_y, r_w, r_h = cv2.boundingRect(c) # Generating simplified contour to use it in html epsilon = 0.01 * cv2.arcLength(c, True) simplified_contour = cv2.approxPolyDP(c, epsilon, True) # filter out too small fragments if self.px_to_mm(r_w) <= 3 or self.px_to_mm(r_h) <= 3: print("Skipping piece #%s as too small (%spx x %s px)" % ( name, r_w, r_h)) return None if self.px_to_mm(r_w) >= 100 and self.px_to_mm(r_h) >= 100: print("Skipping piece #%s as too big (%spx x %s px)" % ( name, r_w, r_h)) return None # position of rect of min area. # this will provide us angle to straighten image box_center, bbox, angle = cv2.minAreaRect(c) # We want our pieces to be "vertical" if bbox[0] > bbox[1]: angle += 90 bbox = (bbox[1], bbox[0]) if bbox[1] / float(bbox[0]) > 70: print("Skipping piece #%s as too too long and narrow" % name) return None # Coords of region of interest using which we should crop piece after # rotation y1 = math.floor(box_center[1] - bbox[1] / 2) x1 = math.floor(box_center[0] - bbox[0] / 2) bbox = tuple(map(int, map(math.ceil, bbox))) # A mask we use to show only piece we are currently working on piece_mask = np.zeros([height, width, 1], dtype=np.uint8) cv2.drawContours(piece_mask, [c], -1, 255, cv2.FILLED) # apply mask to original image img_crp = self.orig_img[r_y:r_y + r_h, r_x:r_x + r_w] piece_in_context = self.save_image( "pieces/%s_ctx" % name, self.orig_img[max(r_y - 10, 0):r_y + r_h + 10, max(r_x - 10, 0):r_x + r_w + 10]) mask = piece_mask[r_y:r_y + r_h, r_x:r_x + r_w] img_roi = cv2.bitwise_and(img_crp, img_crp, mask=mask) # Add alpha layer and set it to the mask img_roi = cv2.cvtColor(img_roi, cv2.COLOR_BGR2BGRA) img_roi[:, :, 3] = mask[:, :, 0] # Straighten it # Because we crop original image before rotation we save us some memory # and a lot of time but we need to adjust coords of the center of # new min area rect M = cv2.getRotationMatrix2D((box_center[0] - r_x, box_center[1] - r_y), angle, 1) # And translate an image a bit to make it fit to the bbox again. # This is done with direct editing of the transform matrix. # (Wooohoo, I know matrix-fu) M[0][2] += r_x - x1 M[1][2] += r_y - y1 # Apply rotation/transform/crop img_roi = cv2.warpAffine(img_roi, M, bbox) piece_fname = self.save_image("pieces/%s" % name, img_roi, "png") # FEATURES MAGIC BELOW # # Get our mask/contour back after the trasnform _, _, _, mask = cv2.split(img_roi) _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(contours) != 1: print("Piece #%s has strange contours after transform" % name) cnt = contours[0] features_fname = self.save_image("pieces/%s_mask" % name, mask, "png") base_features = { # On_sheet_* features describe the min counding box on the sheet. "on_sheet_x": r_x, "on_sheet_y": r_y, "on_sheet_width": r_w, "on_sheet_height": r_h, "on_sheet_angle": angle, "width": img_roi.shape[1], "height": img_roi.shape[0], } tags_suggestions = [] for feat in feature_extractors: fts, tags = feat.get_info(img_roi, cnt, name) base_features.update(fts) tags_suggestions += tags if tags_suggestions: print(name, tags_suggestions) return Shred( contour=c, features=base_features, features_fname=features_fname, img_roi=img_roi, name=name, piece_fname=piece_fname, piece_in_context_fname=piece_in_context, sheet=sheet_name, simplified_contour=simplified_contour, tags_suggestions=tags_suggestions, )
python
def _make_shred(self, c, name, feature_extractors, sheet_name): """Creates a Shred instances from a given contour. Args: c: cv2 contour object. name: string shred name within a sheet. feature_extractors: iterable of AbstractShredFeature instances. Returns: A new Shred instance or None on failure. """ height, width, channels = self.orig_img.shape # bounding rect of currrent contour r_x, r_y, r_w, r_h = cv2.boundingRect(c) # Generating simplified contour to use it in html epsilon = 0.01 * cv2.arcLength(c, True) simplified_contour = cv2.approxPolyDP(c, epsilon, True) # filter out too small fragments if self.px_to_mm(r_w) <= 3 or self.px_to_mm(r_h) <= 3: print("Skipping piece #%s as too small (%spx x %s px)" % ( name, r_w, r_h)) return None if self.px_to_mm(r_w) >= 100 and self.px_to_mm(r_h) >= 100: print("Skipping piece #%s as too big (%spx x %s px)" % ( name, r_w, r_h)) return None # position of rect of min area. # this will provide us angle to straighten image box_center, bbox, angle = cv2.minAreaRect(c) # We want our pieces to be "vertical" if bbox[0] > bbox[1]: angle += 90 bbox = (bbox[1], bbox[0]) if bbox[1] / float(bbox[0]) > 70: print("Skipping piece #%s as too too long and narrow" % name) return None # Coords of region of interest using which we should crop piece after # rotation y1 = math.floor(box_center[1] - bbox[1] / 2) x1 = math.floor(box_center[0] - bbox[0] / 2) bbox = tuple(map(int, map(math.ceil, bbox))) # A mask we use to show only piece we are currently working on piece_mask = np.zeros([height, width, 1], dtype=np.uint8) cv2.drawContours(piece_mask, [c], -1, 255, cv2.FILLED) # apply mask to original image img_crp = self.orig_img[r_y:r_y + r_h, r_x:r_x + r_w] piece_in_context = self.save_image( "pieces/%s_ctx" % name, self.orig_img[max(r_y - 10, 0):r_y + r_h + 10, max(r_x - 10, 0):r_x + r_w + 10]) mask = piece_mask[r_y:r_y + r_h, r_x:r_x + r_w] img_roi = cv2.bitwise_and(img_crp, img_crp, mask=mask) # Add alpha layer and set it to the mask img_roi = cv2.cvtColor(img_roi, cv2.COLOR_BGR2BGRA) img_roi[:, :, 3] = mask[:, :, 0] # Straighten it # Because we crop original image before rotation we save us some memory # and a lot of time but we need to adjust coords of the center of # new min area rect M = cv2.getRotationMatrix2D((box_center[0] - r_x, box_center[1] - r_y), angle, 1) # And translate an image a bit to make it fit to the bbox again. # This is done with direct editing of the transform matrix. # (Wooohoo, I know matrix-fu) M[0][2] += r_x - x1 M[1][2] += r_y - y1 # Apply rotation/transform/crop img_roi = cv2.warpAffine(img_roi, M, bbox) piece_fname = self.save_image("pieces/%s" % name, img_roi, "png") # FEATURES MAGIC BELOW # # Get our mask/contour back after the trasnform _, _, _, mask = cv2.split(img_roi) _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(contours) != 1: print("Piece #%s has strange contours after transform" % name) cnt = contours[0] features_fname = self.save_image("pieces/%s_mask" % name, mask, "png") base_features = { # On_sheet_* features describe the min counding box on the sheet. "on_sheet_x": r_x, "on_sheet_y": r_y, "on_sheet_width": r_w, "on_sheet_height": r_h, "on_sheet_angle": angle, "width": img_roi.shape[1], "height": img_roi.shape[0], } tags_suggestions = [] for feat in feature_extractors: fts, tags = feat.get_info(img_roi, cnt, name) base_features.update(fts) tags_suggestions += tags if tags_suggestions: print(name, tags_suggestions) return Shred( contour=c, features=base_features, features_fname=features_fname, img_roi=img_roi, name=name, piece_fname=piece_fname, piece_in_context_fname=piece_in_context, sheet=sheet_name, simplified_contour=simplified_contour, tags_suggestions=tags_suggestions, )
[ "def", "_make_shred", "(", "self", ",", "c", ",", "name", ",", "feature_extractors", ",", "sheet_name", ")", ":", "height", ",", "width", ",", "channels", "=", "self", ".", "orig_img", ".", "shape", "# bounding rect of currrent contour", "r_x", ",", "r_y", "...
Creates a Shred instances from a given contour. Args: c: cv2 contour object. name: string shred name within a sheet. feature_extractors: iterable of AbstractShredFeature instances. Returns: A new Shred instance or None on failure.
[ "Creates", "a", "Shred", "instances", "from", "a", "given", "contour", "." ]
train
https://github.com/dchaplinsky/unshred/blob/ca9cd6a1c6fb8c77d5424dd660ff5d2f3720c0f8/unshred/sheet.py#L244-L375
mfussenegger/cr8
cr8/fake_providers.py
GeoSpatialProvider.geo_shape
def geo_shape(self, sides=5, center=None, distance=None): """ Return a WKT string for a POLYGON with given amount of sides. The polygon is defined by its center (random point if not provided) and the distance (random distance if not provided; in km) of the points to its center. """ assert isinstance(sides, int) if distance is None: distance = self.random_int(100, 1000) else: # 6371 => earth radius in km # assert that shape radius is maximum half of earth's circumference assert isinstance(distance, int) assert distance <= EARTH_RADIUS * math.pi, \ 'distance must not be greater than half of earth\'s circumference' if center is None: # required minimal spherical distance from north/southpole dp = distance * 180.0 / EARTH_RADIUS / math.pi center = self.geo_point(lat_min=-90.0 + dp, lat_max=90.0 - dp) else: assert -180.0 <= center[0] <= 180.0, 'Longitude out of bounds' assert -90.0 <= center[1] <= 90.0, 'Latitude out of bounds' angles = list(self.random_sample(range(360), sides)) angles.sort() points = [_dest_point(center, distance, bearing, EARTH_RADIUS) for bearing in angles] # close polygon points.append(points[0]) path = ', '.join([' '.join(p) for p in ([str(lon), str(lat)] for lon, lat in points)]) return f'POLYGON (( {path} ))'
python
def geo_shape(self, sides=5, center=None, distance=None): """ Return a WKT string for a POLYGON with given amount of sides. The polygon is defined by its center (random point if not provided) and the distance (random distance if not provided; in km) of the points to its center. """ assert isinstance(sides, int) if distance is None: distance = self.random_int(100, 1000) else: # 6371 => earth radius in km # assert that shape radius is maximum half of earth's circumference assert isinstance(distance, int) assert distance <= EARTH_RADIUS * math.pi, \ 'distance must not be greater than half of earth\'s circumference' if center is None: # required minimal spherical distance from north/southpole dp = distance * 180.0 / EARTH_RADIUS / math.pi center = self.geo_point(lat_min=-90.0 + dp, lat_max=90.0 - dp) else: assert -180.0 <= center[0] <= 180.0, 'Longitude out of bounds' assert -90.0 <= center[1] <= 90.0, 'Latitude out of bounds' angles = list(self.random_sample(range(360), sides)) angles.sort() points = [_dest_point(center, distance, bearing, EARTH_RADIUS) for bearing in angles] # close polygon points.append(points[0]) path = ', '.join([' '.join(p) for p in ([str(lon), str(lat)] for lon, lat in points)]) return f'POLYGON (( {path} ))'
[ "def", "geo_shape", "(", "self", ",", "sides", "=", "5", ",", "center", "=", "None", ",", "distance", "=", "None", ")", ":", "assert", "isinstance", "(", "sides", ",", "int", ")", "if", "distance", "is", "None", ":", "distance", "=", "self", ".", "...
Return a WKT string for a POLYGON with given amount of sides. The polygon is defined by its center (random point if not provided) and the distance (random distance if not provided; in km) of the points to its center.
[ "Return", "a", "WKT", "string", "for", "a", "POLYGON", "with", "given", "amount", "of", "sides", ".", "The", "polygon", "is", "defined", "by", "its", "center", "(", "random", "point", "if", "not", "provided", ")", "and", "the", "distance", "(", "random",...
train
https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/fake_providers.py#L106-L139
mfussenegger/cr8
cr8/run_track.py
run_track
def run_track(track, result_hosts=None, crate_root=None, output_fmt=None, logfile_info=None, logfile_result=None, failfast=False, sample_mode='reservoir'): """Execute a track file""" with Logger(output_fmt=output_fmt, logfile_info=logfile_info, logfile_result=logfile_result) as log: executor = Executor( track_dir=os.path.dirname(track), log=log, result_hosts=result_hosts, crate_root=crate_root, fail_fast=failfast, sample_mode=sample_mode ) error = executor.execute(toml.load(track)) if error: sys.exit(1)
python
def run_track(track, result_hosts=None, crate_root=None, output_fmt=None, logfile_info=None, logfile_result=None, failfast=False, sample_mode='reservoir'): """Execute a track file""" with Logger(output_fmt=output_fmt, logfile_info=logfile_info, logfile_result=logfile_result) as log: executor = Executor( track_dir=os.path.dirname(track), log=log, result_hosts=result_hosts, crate_root=crate_root, fail_fast=failfast, sample_mode=sample_mode ) error = executor.execute(toml.load(track)) if error: sys.exit(1)
[ "def", "run_track", "(", "track", ",", "result_hosts", "=", "None", ",", "crate_root", "=", "None", ",", "output_fmt", "=", "None", ",", "logfile_info", "=", "None", ",", "logfile_result", "=", "None", ",", "failfast", "=", "False", ",", "sample_mode", "="...
Execute a track file
[ "Execute", "a", "track", "file" ]
train
https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_track.py#L104-L126
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.main
def main(self): """ Convert a NOAA text file into a lipds file. CSV files will be created if chronology or data sections are available. :return dict: Metadata Dictionary """ logger_noaa_lpd.info("enter main") # Run the file through the parser # Sets self.metadata, Creates CSVs in dir_tmp os.chdir(self.dir_tmp) os.mkdir("bag") os.chdir(self.dir_root) self.__parse() os.chdir(self.dir_bag) # Dump Metadata JSON to dir_tmp write_json_to_file(self.metadata) # Create a bagit bag finish_bag(self.dir_bag) logger_noaa_lpd.info("exit main") return
python
def main(self): """ Convert a NOAA text file into a lipds file. CSV files will be created if chronology or data sections are available. :return dict: Metadata Dictionary """ logger_noaa_lpd.info("enter main") # Run the file through the parser # Sets self.metadata, Creates CSVs in dir_tmp os.chdir(self.dir_tmp) os.mkdir("bag") os.chdir(self.dir_root) self.__parse() os.chdir(self.dir_bag) # Dump Metadata JSON to dir_tmp write_json_to_file(self.metadata) # Create a bagit bag finish_bag(self.dir_bag) logger_noaa_lpd.info("exit main") return
[ "def", "main", "(", "self", ")", ":", "logger_noaa_lpd", ".", "info", "(", "\"enter main\"", ")", "# Run the file through the parser", "# Sets self.metadata, Creates CSVs in dir_tmp", "os", ".", "chdir", "(", "self", ".", "dir_tmp", ")", "os", ".", "mkdir", "(", "...
Convert a NOAA text file into a lipds file. CSV files will be created if chronology or data sections are available. :return dict: Metadata Dictionary
[ "Convert", "a", "NOAA", "text", "file", "into", "a", "lipds", "file", ".", "CSV", "files", "will", "be", "created", "if", "chronology", "or", "data", "sections", "are", "available", ".", ":", "return", "dict", ":", "Metadata", "Dictionary" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L29-L48
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__parse
def __parse(self): """ Parse Accept the text file. We'll open it, read it, and return a compiled dictionary to write to a json file May write a chronology CSV and a data CSV if those sections are available :return: """ logger_noaa_lpd.info("enter parse") # Strings missing_str = '' data_filename = '' # Counters grant_id = 0 funding_id = 0 data_col_ct = 1 line_num = 0 # Boolean markers description_on = False publication_on = False abstract_on = False site_info_on = False chronology_on = False chron_vals_on = False variables_on = False data_vals_on = False data_on = False # Lists lat = [] lon = [] elev = [] pub = [] funding = [] temp_abstract = [] temp_description = [] data_var_names = [] data_col_list = [] data_tables = [] # All dictionaries needed to create JSON structure temp_funding = OrderedDict() temp_pub = OrderedDict() core_len = OrderedDict() geo_properties = OrderedDict() chron_dict = OrderedDict() data_dict_upper = OrderedDict() final_dict = OrderedDict() try: # Open the text file in read mode. We'll read one line at a time until EOF with open(self.filename_txt, 'r') as f: logger_noaa_lpd.info("opened noaa file: {}".format(self.filename_txt)) for line in iter(f): line_num += 1 # PUBLICATION # There can be multiple publications. Create a dictionary for each one. if publication_on: # End of the section. Add the dictionary for this one publication to the overall list if '-----' in line: temp_pub = self.__reorganize_doi(temp_pub) pub.append(temp_pub.copy()) temp_abstract.clear() temp_pub.clear() publication_on = False logger_noaa_lpd.info("end section: Publication") elif abstract_on: # End of abstract: possibly more variables after. if "#" in line: abstract_on = False temp_pub['abstract'] = ''.join(temp_abstract) logger_noaa_lpd.info("end section: Abstract") line = self.__str_cleanup(line) key, value = self.__slice_key_val(line) temp_pub[self.__camel_case(key)] = value else: temp_abstract.append(self.__str_cleanup(line)) # Add all info into the current publication dictionary else: line = self.__str_cleanup(line) key, value = self.__slice_key_val(line) if key in ("Author", "Authors"): temp_pub["author"] = self.__reorganize_authors(value) else: temp_pub[self.__camel_case(key)] = value if key == 'Abstract': logger_noaa_lpd.info("reading section: Abstract") abstract_on = True temp_abstract.append(value) # DESCRIPTION AND NOTES # Descriptions are often long paragraphs spanning multiple lines, but don't follow the key/value format elif description_on: # End of the section. Turn marker off and combine all the lines in the section if '-------' in line: description_on = False value = ''.join(temp_description) final_dict['description'] = value logger_noaa_lpd.info("end section: Description_and_Notes") # The first line in the section. Split into key, value elif 'Description:' in line: key, val = self.__slice_key_val(line) temp_description.append(val) # Keep a running list of all lines in the section else: line = self.__str_cleanup(line) temp_description.append(line) # SITE INFORMATION (Geo) elif site_info_on: if '-------' in line: site_info_on = False logger_noaa_lpd.info("end section: Site_Information") else: line = self.__str_cleanup(line) key, value = self.__slice_key_val(line) if key.lower() in ["northernmost_latitude", "southernmost_latitude"]: lat.append(self.__convert_num(value)) elif key.lower() in ["easternmost_longitude", "westernmost_longitude"]: lon.append(self.__convert_num(value)) elif key.lower() in ["site_name", "location", "country", "elevation"]: if key.lower() == 'elevation': val, unit = self.__split_name_unit(value) elev.append(val) else: geo_properties[self.__camel_case(key)] = value # CHRONOLOGY elif chronology_on: """ HOW IT WORKS: Chronology will be started at "Chronology:" section header Every line starting with a "#" will be ignored The first line without a "#" is considered the variable header line. Variable names are parsed. Each following line will be considered column data and sorted accordingly. Once the "-----" barrier is reached, we exit the chronology section. """ # When reaching the end of the chron section, set the marker to off and close the CSV file if '-------' in line: # Turn off markers to exit section chronology_on = False chron_vals_on = False try: # If nothing between the chronology start and the end barrier, then there won't be a CSV if chron_start_line != line_num - 1: try: chron_csv.close() logger_noaa_lpd.info("parse: chronology: no data found in chronology section") except NameError: logger_noaa_lpd.debug( "parse: chronology_on: NameError: chron_csv ref before assignment, {}".format( self.filename_txt)) print( "Chronology section is incorrectly formatted. " "Section data will not be converted") logger_noaa_lpd.info("end section: Chronology") except NameError: logger_noaa_lpd.debug( "parse: chronology_on: NameError: chron_start_line ref before assignment, {}".format( self.filename_txt)) print("Chronology section is incorrectly formatted. Section data will not be converted") # Data values line. Split, then write to CSV file elif chron_vals_on: values = line.split() try: cw.writerow(values) except NameError: logger_noaa_lpd.debug( "parse: chronology_on: NameError: csv writer ref before assignment, {}".format( self.filename_txt)) print("Chronology section is incorrectly formatted. Section data will not be converted") else: try: # Chron variable headers line if line and line[0] != "#": chron_filename = self.dsn + '.chron1.measurementTable1.csv' # Organize the var header into a dictionary variables = self.__reorganize_chron_header(line) # Create a dictionary of info for each column chron_col_list = self.__create_chron_cols(variables) chron_dict['filename'] = chron_filename chron_dict['chronTableName'] = 'Chronology' chron_dict['columns'] = chron_col_list # Open CSV for writing csv_path = os.path.join(self.dir_bag, chron_filename) chron_csv = open(csv_path, 'w+', newline='') logger_noaa_lpd.info("opened csv file: {}".format(chron_filename)) cw = csv.writer(chron_csv) # Turn the marker on to start processing the values columns chron_vals_on = True except IndexError: logger_noaa_lpd.debug("parse: chronology: IndexError when attempting chron var header") # VARIABLES elif variables_on: """ HOW IT WORKS: Variable lines are the only lines that have a "##" in front of them. Ignore all lines that don't match the "##" regex. Once there's a match, start parsing the variable lines, and create a column entry for each line. """ process_line = False # End of the section. Turn marker off if "------" in line: variables_on = False logger_noaa_lpd.info("end section: Variables") for item in NOAA_VAR_LINES: if item.lower() in line.lower(): process_line = False for item in NOAA_EMPTY: if item == line: process_line = False m = re.match(re_var, line) if m: process_line = True # If the line isn't in the ignore list, then it's a variable line if process_line: # Split the line items, and cleanup cleaned_line = self.__separate_data_vars(line) # Add the items into a column dictionary data_col_dict = self.__create_paleo_col(cleaned_line, data_col_ct) # Keep a list of all variable names try: # Use this list later to cross check with the variable line in the Data section data_var_names.append(data_col_dict['variableName']) except KeyError: data_var_names.append('') logger_noaa_lpd.warn("parse: variables: " "KeyError: {} not found in {}".format("variableName", "data_col_dict")) # Add the column dictionary into a final dictionary data_col_list.append(data_col_dict) data_col_ct += 1 # DATA # Missing Value, Create data columns, and output Data CSV elif data_on: """ HOW IT WORKS: Capture the "Missing Value" entry, if it exists. Data lines should not have a "#" in front of them. The first line without a "#" should be the variable header line All lines that follow should have column data. """ # Do not process blank or template lines process_line = True for item in NOAA_DATA_LINES: if item in line: process_line = False for item in NOAA_EMPTY: if item == line: process_line = False for item in ALTS_MV: # Missing value found. Store entry if item in line.lower(): process_line = False line = self.__str_cleanup(line) key, missing_str = self.__slice_key_val(line) if process_line: # Split the line at each space (There SHOULD one space between each variable. Not always true) values = line.split() # Write all data values to CSV if data_vals_on: try: dw.writerow(values) except NameError: logger_noaa_lpd.debug( "parse: data_on: NameError: csv writer ref before assignment, {}".format( self.filename_txt)) # Check for the line of variables else: var = self.__str_cleanup(values[0].lstrip()) # Check if a variable name is in the current line if var.lower() in line.lower(): data_vals_on = True logger_noaa_lpd.info("start section: Data_Values") # Open CSV for writing data_filename = "{}.paleoData1.measurementTable1.csv".format(self.dsn) csv_path = os.path.join(self.dir_bag, data_filename) data_csv = open(csv_path, 'w+', newline='') logger_noaa_lpd.info("opened csv file: {}".format(data_filename)) dw = csv.writer(data_csv) # METADATA else: # Line Continuation: Sometimes there are items that span a few lines. # If this happens, we want to combine them all properly into one entry. if '#' not in line and line not in NOAA_EMPTY and old_val: if old_key in ('funding', 'agency'): try: temp_funding[old_key] = old_val + line except KeyError as e: logger_noaa_lpd.debug( "parse: metadata: line continuation: {} not found in {}, {}".format(old_key, "temp_funding", e)) else: try: final_dict[old_key] = old_val + line except KeyError as e: logger_noaa_lpd.debug( "parse: metadata: line continuation: {} not found in {}, {}".format(old_key, "temp_funding", e)) # No Line Continuation: This is the start or a new entry else: line = self.__str_cleanup(line) # Grab the key and value from the current line try: # Split the line into key, value pieces key, value = self.__slice_key_val(line) l_key = key.lower() cc_key= self.__camel_case(key) # If there is no value, then we are at a section header. # Data often has a blank value, so that is a special check. if not value or l_key == 'data': # Turn on markers if we run into section headers if l_key == 'description_and_notes': description_on = True logger_noaa_lpd.info("reading section: Description_and_Notes") elif l_key == 'publication': publication_on = True logger_noaa_lpd.info("reading section: Publication") elif l_key == 'site_information': site_info_on = True logger_noaa_lpd.info("reading section: Site_Information") elif l_key == 'chronology': chronology_on = True logger_noaa_lpd.info("reading section: Chronology") chron_start_line = line_num elif l_key == 'variables': variables_on = True logger_noaa_lpd.info("reading section: Variables") elif l_key == 'data': data_on = True logger_noaa_lpd.info("reading section: Data") # For all else: # Ignore any entries that are specified in the skip list _ignore = [item.lower() for item in NOAA_KEYS_BY_SECTION["Ignore"]] if l_key not in _ignore: # There can be multiple funding agencies and grants. Keep a list of dict entries _funding = [item.lower() for item in NOAA_KEYS_BY_SECTION["Funding_Agency"]] if l_key in _funding: if l_key == 'funding_agency_name': funding_id += 1 key = 'agency' elif l_key == 'grant': grant_id += 1 key = 'grant' temp_funding[key] = value # If both counters are matching, we are ready to add content to the funding list if grant_id == funding_id: funding.append(temp_funding.copy()) temp_funding.clear() else: # There's likely two "Online_Resource"s, and we need both, so check and concat if cc_key == "onlineResource": # If it exists, append. If not, add entry as a list if cc_key in final_dict: final_dict[cc_key].append(value) else: final_dict[cc_key] = [value] else: final_dict[cc_key] = value # Keep track of old key in case we have a line continuation old_key = key old_val = value.strip() except TypeError as e: logger_noaa_lpd.warn( "parse: TypeError: none type received from slice_key_val, {}".format(e)) # Wait to close the data CSV until we reached the end of the text file try: data_csv.close() logger_noaa_lpd.info("end section: Data_Values") logger_noaa_lpd.info("end section: Data") except NameError as e: print("Error: NOAA text file is contains format errors. Unable to process.") logger_noaa_lpd.debug( "parse: NameError: failed to close csv, invalid formatting in NOAA txt file, {}".format(e)) # Piece together measurements block logger_noaa_lpd.info("compiling final paleoData") data_dict_upper['filename'] = data_filename data_dict_upper['paleoDataTableName'] = 'Data' data_dict_upper['missingValue'] = missing_str data_dict_upper['columns'] = data_col_list data_tables.append(data_dict_upper) # Piece together geo block logger_noaa_lpd.info("compiling final geo") geo = self.__create_coordinates(lat, lon, elev) geo['properties'] = geo_properties # Piece together final dictionary logger_noaa_lpd.info("compiling final master") final_dict['pub'] = pub final_dict['funding'] = funding final_dict['geo'] = geo final_dict['coreLength'] = core_len final_dict['chronData'] = [{"chronMeasurementTable": chron_dict}] final_dict['paleoData'] = data_tables self.metadata = final_dict logger_noaa_lpd.info("final dictionary compiled") # Start cleaning up the metadata logger_noaa_lpd.info("removing empty fields") self.metadata = rm_empty_fields(self.metadata) logger_noaa_lpd.info("removing empty doi") self.metadata = rm_empty_doi(self.metadata) logger_noaa_lpd.info("removing irrelevant keys") self.__remove_irr_fields() except Exception as e: logger_noaa_lpd.debug("parse: {}".format(e)) logger_noaa_lpd.info("exit parse") return
python
def __parse(self): """ Parse Accept the text file. We'll open it, read it, and return a compiled dictionary to write to a json file May write a chronology CSV and a data CSV if those sections are available :return: """ logger_noaa_lpd.info("enter parse") # Strings missing_str = '' data_filename = '' # Counters grant_id = 0 funding_id = 0 data_col_ct = 1 line_num = 0 # Boolean markers description_on = False publication_on = False abstract_on = False site_info_on = False chronology_on = False chron_vals_on = False variables_on = False data_vals_on = False data_on = False # Lists lat = [] lon = [] elev = [] pub = [] funding = [] temp_abstract = [] temp_description = [] data_var_names = [] data_col_list = [] data_tables = [] # All dictionaries needed to create JSON structure temp_funding = OrderedDict() temp_pub = OrderedDict() core_len = OrderedDict() geo_properties = OrderedDict() chron_dict = OrderedDict() data_dict_upper = OrderedDict() final_dict = OrderedDict() try: # Open the text file in read mode. We'll read one line at a time until EOF with open(self.filename_txt, 'r') as f: logger_noaa_lpd.info("opened noaa file: {}".format(self.filename_txt)) for line in iter(f): line_num += 1 # PUBLICATION # There can be multiple publications. Create a dictionary for each one. if publication_on: # End of the section. Add the dictionary for this one publication to the overall list if '-----' in line: temp_pub = self.__reorganize_doi(temp_pub) pub.append(temp_pub.copy()) temp_abstract.clear() temp_pub.clear() publication_on = False logger_noaa_lpd.info("end section: Publication") elif abstract_on: # End of abstract: possibly more variables after. if "#" in line: abstract_on = False temp_pub['abstract'] = ''.join(temp_abstract) logger_noaa_lpd.info("end section: Abstract") line = self.__str_cleanup(line) key, value = self.__slice_key_val(line) temp_pub[self.__camel_case(key)] = value else: temp_abstract.append(self.__str_cleanup(line)) # Add all info into the current publication dictionary else: line = self.__str_cleanup(line) key, value = self.__slice_key_val(line) if key in ("Author", "Authors"): temp_pub["author"] = self.__reorganize_authors(value) else: temp_pub[self.__camel_case(key)] = value if key == 'Abstract': logger_noaa_lpd.info("reading section: Abstract") abstract_on = True temp_abstract.append(value) # DESCRIPTION AND NOTES # Descriptions are often long paragraphs spanning multiple lines, but don't follow the key/value format elif description_on: # End of the section. Turn marker off and combine all the lines in the section if '-------' in line: description_on = False value = ''.join(temp_description) final_dict['description'] = value logger_noaa_lpd.info("end section: Description_and_Notes") # The first line in the section. Split into key, value elif 'Description:' in line: key, val = self.__slice_key_val(line) temp_description.append(val) # Keep a running list of all lines in the section else: line = self.__str_cleanup(line) temp_description.append(line) # SITE INFORMATION (Geo) elif site_info_on: if '-------' in line: site_info_on = False logger_noaa_lpd.info("end section: Site_Information") else: line = self.__str_cleanup(line) key, value = self.__slice_key_val(line) if key.lower() in ["northernmost_latitude", "southernmost_latitude"]: lat.append(self.__convert_num(value)) elif key.lower() in ["easternmost_longitude", "westernmost_longitude"]: lon.append(self.__convert_num(value)) elif key.lower() in ["site_name", "location", "country", "elevation"]: if key.lower() == 'elevation': val, unit = self.__split_name_unit(value) elev.append(val) else: geo_properties[self.__camel_case(key)] = value # CHRONOLOGY elif chronology_on: """ HOW IT WORKS: Chronology will be started at "Chronology:" section header Every line starting with a "#" will be ignored The first line without a "#" is considered the variable header line. Variable names are parsed. Each following line will be considered column data and sorted accordingly. Once the "-----" barrier is reached, we exit the chronology section. """ # When reaching the end of the chron section, set the marker to off and close the CSV file if '-------' in line: # Turn off markers to exit section chronology_on = False chron_vals_on = False try: # If nothing between the chronology start and the end barrier, then there won't be a CSV if chron_start_line != line_num - 1: try: chron_csv.close() logger_noaa_lpd.info("parse: chronology: no data found in chronology section") except NameError: logger_noaa_lpd.debug( "parse: chronology_on: NameError: chron_csv ref before assignment, {}".format( self.filename_txt)) print( "Chronology section is incorrectly formatted. " "Section data will not be converted") logger_noaa_lpd.info("end section: Chronology") except NameError: logger_noaa_lpd.debug( "parse: chronology_on: NameError: chron_start_line ref before assignment, {}".format( self.filename_txt)) print("Chronology section is incorrectly formatted. Section data will not be converted") # Data values line. Split, then write to CSV file elif chron_vals_on: values = line.split() try: cw.writerow(values) except NameError: logger_noaa_lpd.debug( "parse: chronology_on: NameError: csv writer ref before assignment, {}".format( self.filename_txt)) print("Chronology section is incorrectly formatted. Section data will not be converted") else: try: # Chron variable headers line if line and line[0] != "#": chron_filename = self.dsn + '.chron1.measurementTable1.csv' # Organize the var header into a dictionary variables = self.__reorganize_chron_header(line) # Create a dictionary of info for each column chron_col_list = self.__create_chron_cols(variables) chron_dict['filename'] = chron_filename chron_dict['chronTableName'] = 'Chronology' chron_dict['columns'] = chron_col_list # Open CSV for writing csv_path = os.path.join(self.dir_bag, chron_filename) chron_csv = open(csv_path, 'w+', newline='') logger_noaa_lpd.info("opened csv file: {}".format(chron_filename)) cw = csv.writer(chron_csv) # Turn the marker on to start processing the values columns chron_vals_on = True except IndexError: logger_noaa_lpd.debug("parse: chronology: IndexError when attempting chron var header") # VARIABLES elif variables_on: """ HOW IT WORKS: Variable lines are the only lines that have a "##" in front of them. Ignore all lines that don't match the "##" regex. Once there's a match, start parsing the variable lines, and create a column entry for each line. """ process_line = False # End of the section. Turn marker off if "------" in line: variables_on = False logger_noaa_lpd.info("end section: Variables") for item in NOAA_VAR_LINES: if item.lower() in line.lower(): process_line = False for item in NOAA_EMPTY: if item == line: process_line = False m = re.match(re_var, line) if m: process_line = True # If the line isn't in the ignore list, then it's a variable line if process_line: # Split the line items, and cleanup cleaned_line = self.__separate_data_vars(line) # Add the items into a column dictionary data_col_dict = self.__create_paleo_col(cleaned_line, data_col_ct) # Keep a list of all variable names try: # Use this list later to cross check with the variable line in the Data section data_var_names.append(data_col_dict['variableName']) except KeyError: data_var_names.append('') logger_noaa_lpd.warn("parse: variables: " "KeyError: {} not found in {}".format("variableName", "data_col_dict")) # Add the column dictionary into a final dictionary data_col_list.append(data_col_dict) data_col_ct += 1 # DATA # Missing Value, Create data columns, and output Data CSV elif data_on: """ HOW IT WORKS: Capture the "Missing Value" entry, if it exists. Data lines should not have a "#" in front of them. The first line without a "#" should be the variable header line All lines that follow should have column data. """ # Do not process blank or template lines process_line = True for item in NOAA_DATA_LINES: if item in line: process_line = False for item in NOAA_EMPTY: if item == line: process_line = False for item in ALTS_MV: # Missing value found. Store entry if item in line.lower(): process_line = False line = self.__str_cleanup(line) key, missing_str = self.__slice_key_val(line) if process_line: # Split the line at each space (There SHOULD one space between each variable. Not always true) values = line.split() # Write all data values to CSV if data_vals_on: try: dw.writerow(values) except NameError: logger_noaa_lpd.debug( "parse: data_on: NameError: csv writer ref before assignment, {}".format( self.filename_txt)) # Check for the line of variables else: var = self.__str_cleanup(values[0].lstrip()) # Check if a variable name is in the current line if var.lower() in line.lower(): data_vals_on = True logger_noaa_lpd.info("start section: Data_Values") # Open CSV for writing data_filename = "{}.paleoData1.measurementTable1.csv".format(self.dsn) csv_path = os.path.join(self.dir_bag, data_filename) data_csv = open(csv_path, 'w+', newline='') logger_noaa_lpd.info("opened csv file: {}".format(data_filename)) dw = csv.writer(data_csv) # METADATA else: # Line Continuation: Sometimes there are items that span a few lines. # If this happens, we want to combine them all properly into one entry. if '#' not in line and line not in NOAA_EMPTY and old_val: if old_key in ('funding', 'agency'): try: temp_funding[old_key] = old_val + line except KeyError as e: logger_noaa_lpd.debug( "parse: metadata: line continuation: {} not found in {}, {}".format(old_key, "temp_funding", e)) else: try: final_dict[old_key] = old_val + line except KeyError as e: logger_noaa_lpd.debug( "parse: metadata: line continuation: {} not found in {}, {}".format(old_key, "temp_funding", e)) # No Line Continuation: This is the start or a new entry else: line = self.__str_cleanup(line) # Grab the key and value from the current line try: # Split the line into key, value pieces key, value = self.__slice_key_val(line) l_key = key.lower() cc_key= self.__camel_case(key) # If there is no value, then we are at a section header. # Data often has a blank value, so that is a special check. if not value or l_key == 'data': # Turn on markers if we run into section headers if l_key == 'description_and_notes': description_on = True logger_noaa_lpd.info("reading section: Description_and_Notes") elif l_key == 'publication': publication_on = True logger_noaa_lpd.info("reading section: Publication") elif l_key == 'site_information': site_info_on = True logger_noaa_lpd.info("reading section: Site_Information") elif l_key == 'chronology': chronology_on = True logger_noaa_lpd.info("reading section: Chronology") chron_start_line = line_num elif l_key == 'variables': variables_on = True logger_noaa_lpd.info("reading section: Variables") elif l_key == 'data': data_on = True logger_noaa_lpd.info("reading section: Data") # For all else: # Ignore any entries that are specified in the skip list _ignore = [item.lower() for item in NOAA_KEYS_BY_SECTION["Ignore"]] if l_key not in _ignore: # There can be multiple funding agencies and grants. Keep a list of dict entries _funding = [item.lower() for item in NOAA_KEYS_BY_SECTION["Funding_Agency"]] if l_key in _funding: if l_key == 'funding_agency_name': funding_id += 1 key = 'agency' elif l_key == 'grant': grant_id += 1 key = 'grant' temp_funding[key] = value # If both counters are matching, we are ready to add content to the funding list if grant_id == funding_id: funding.append(temp_funding.copy()) temp_funding.clear() else: # There's likely two "Online_Resource"s, and we need both, so check and concat if cc_key == "onlineResource": # If it exists, append. If not, add entry as a list if cc_key in final_dict: final_dict[cc_key].append(value) else: final_dict[cc_key] = [value] else: final_dict[cc_key] = value # Keep track of old key in case we have a line continuation old_key = key old_val = value.strip() except TypeError as e: logger_noaa_lpd.warn( "parse: TypeError: none type received from slice_key_val, {}".format(e)) # Wait to close the data CSV until we reached the end of the text file try: data_csv.close() logger_noaa_lpd.info("end section: Data_Values") logger_noaa_lpd.info("end section: Data") except NameError as e: print("Error: NOAA text file is contains format errors. Unable to process.") logger_noaa_lpd.debug( "parse: NameError: failed to close csv, invalid formatting in NOAA txt file, {}".format(e)) # Piece together measurements block logger_noaa_lpd.info("compiling final paleoData") data_dict_upper['filename'] = data_filename data_dict_upper['paleoDataTableName'] = 'Data' data_dict_upper['missingValue'] = missing_str data_dict_upper['columns'] = data_col_list data_tables.append(data_dict_upper) # Piece together geo block logger_noaa_lpd.info("compiling final geo") geo = self.__create_coordinates(lat, lon, elev) geo['properties'] = geo_properties # Piece together final dictionary logger_noaa_lpd.info("compiling final master") final_dict['pub'] = pub final_dict['funding'] = funding final_dict['geo'] = geo final_dict['coreLength'] = core_len final_dict['chronData'] = [{"chronMeasurementTable": chron_dict}] final_dict['paleoData'] = data_tables self.metadata = final_dict logger_noaa_lpd.info("final dictionary compiled") # Start cleaning up the metadata logger_noaa_lpd.info("removing empty fields") self.metadata = rm_empty_fields(self.metadata) logger_noaa_lpd.info("removing empty doi") self.metadata = rm_empty_doi(self.metadata) logger_noaa_lpd.info("removing irrelevant keys") self.__remove_irr_fields() except Exception as e: logger_noaa_lpd.debug("parse: {}".format(e)) logger_noaa_lpd.info("exit parse") return
[ "def", "__parse", "(", "self", ")", ":", "logger_noaa_lpd", ".", "info", "(", "\"enter parse\"", ")", "# Strings", "missing_str", "=", "''", "data_filename", "=", "''", "# Counters", "grant_id", "=", "0", "funding_id", "=", "0", "data_col_ct", "=", "1", "lin...
Parse Accept the text file. We'll open it, read it, and return a compiled dictionary to write to a json file May write a chronology CSV and a data CSV if those sections are available :return:
[ "Parse", "Accept", "the", "text", "file", ".", "We", "ll", "open", "it", "read", "it", "and", "return", "a", "compiled", "dictionary", "to", "write", "to", "a", "json", "file", "May", "write", "a", "chronology", "CSV", "and", "a", "data", "CSV", "if", ...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L50-L492
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__create_paleo_col
def __create_paleo_col(l, col_count): """ Receive split list from separate_data_vars, and turn it into a dictionary for that column :param list l: :param int col_count: :return dict: """ # Format: what, material, error, units, seasonality, archive, detail, method, # C or N for Character or Numeric data, direction of relation to climate (positive or negative) d = OrderedDict() d['number'] = col_count for idx, var_name in enumerate(NOAA_KEYS_BY_SECTION["Variables"]): try: value = l[idx] # These two cases are nested in the column, so treat them special if var_name == "seasonality": d["climateInterpretation"] = {var_name: value} elif var_name == "uncertainty": d["calibration"] = {var_name: value} # All other cases are root items in the column, so add normally else: d[var_name] = value except IndexError as e: logger_noaa_lpd.debug("create_var_col: IndexError: var: {}, {}".format(var_name, e)) return d
python
def __create_paleo_col(l, col_count): """ Receive split list from separate_data_vars, and turn it into a dictionary for that column :param list l: :param int col_count: :return dict: """ # Format: what, material, error, units, seasonality, archive, detail, method, # C or N for Character or Numeric data, direction of relation to climate (positive or negative) d = OrderedDict() d['number'] = col_count for idx, var_name in enumerate(NOAA_KEYS_BY_SECTION["Variables"]): try: value = l[idx] # These two cases are nested in the column, so treat them special if var_name == "seasonality": d["climateInterpretation"] = {var_name: value} elif var_name == "uncertainty": d["calibration"] = {var_name: value} # All other cases are root items in the column, so add normally else: d[var_name] = value except IndexError as e: logger_noaa_lpd.debug("create_var_col: IndexError: var: {}, {}".format(var_name, e)) return d
[ "def", "__create_paleo_col", "(", "l", ",", "col_count", ")", ":", "# Format: what, material, error, units, seasonality, archive, detail, method,", "# C or N for Character or Numeric data, direction of relation to climate (positive or negative)", "d", "=", "OrderedDict", "(", ")", "d",...
Receive split list from separate_data_vars, and turn it into a dictionary for that column :param list l: :param int col_count: :return dict:
[ "Receive", "split", "list", "from", "separate_data_vars", "and", "turn", "it", "into", "a", "dictionary", "for", "that", "column", ":", "param", "list", "l", ":", ":", "param", "int", "col_count", ":", ":", "return", "dict", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L495-L519
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__separate_data_vars
def __separate_data_vars(line): """ For the variables section, clean up the line and return a list of each of the 10 items :param str line: :return str: """ combine = [] if '#' in line: line = line.replace("#", "") line = line.lstrip() if line not in NOAA_EMPTY and line not in EMPTY: m = re.match(re_var_split, line) if m: combine.append(m.group(1)) attr = m.group(2).split(',') combine += attr for index, string in enumerate(combine): combine[index] = string.strip() # for i, s in enumerate(combine): # if not s or s in NOAA_EMPTY: # del combine[i] return combine
python
def __separate_data_vars(line): """ For the variables section, clean up the line and return a list of each of the 10 items :param str line: :return str: """ combine = [] if '#' in line: line = line.replace("#", "") line = line.lstrip() if line not in NOAA_EMPTY and line not in EMPTY: m = re.match(re_var_split, line) if m: combine.append(m.group(1)) attr = m.group(2).split(',') combine += attr for index, string in enumerate(combine): combine[index] = string.strip() # for i, s in enumerate(combine): # if not s or s in NOAA_EMPTY: # del combine[i] return combine
[ "def", "__separate_data_vars", "(", "line", ")", ":", "combine", "=", "[", "]", "if", "'#'", "in", "line", ":", "line", "=", "line", ".", "replace", "(", "\"#\"", ",", "\"\"", ")", "line", "=", "line", ".", "lstrip", "(", ")", "if", "line", "not", ...
For the variables section, clean up the line and return a list of each of the 10 items :param str line: :return str:
[ "For", "the", "variables", "section", "clean", "up", "the", "line", "and", "return", "a", "list", "of", "each", "of", "the", "10", "items", ":", "param", "str", "line", ":", ":", "return", "str", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L522-L543
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__convert_num
def __convert_num(number): """ All path items are automatically strings. If you think it's an int or float, this attempts to convert it. :param str number: :return float or str: """ try: return float(number) except ValueError as e: logger_noaa_lpd.warn("convert_num: ValueError: {}".format(e)) return number
python
def __convert_num(number): """ All path items are automatically strings. If you think it's an int or float, this attempts to convert it. :param str number: :return float or str: """ try: return float(number) except ValueError as e: logger_noaa_lpd.warn("convert_num: ValueError: {}".format(e)) return number
[ "def", "__convert_num", "(", "number", ")", ":", "try", ":", "return", "float", "(", "number", ")", "except", "ValueError", "as", "e", ":", "logger_noaa_lpd", ".", "warn", "(", "\"convert_num: ValueError: {}\"", ".", "format", "(", "e", ")", ")", "return", ...
All path items are automatically strings. If you think it's an int or float, this attempts to convert it. :param str number: :return float or str:
[ "All", "path", "items", "are", "automatically", "strings", ".", "If", "you", "think", "it", "s", "an", "int", "or", "float", "this", "attempts", "to", "convert", "it", ".", ":", "param", "str", "number", ":", ":", "return", "float", "or", "str", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L546-L556
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__camel_case
def __camel_case(word): """ Convert underscore naming into camel case naming :param str word: :return str: """ word = word.lower() if '_' in word: split_word = word.split('_') else: split_word = word.split() if len(split_word) > 0: for i, word in enumerate(split_word): if i > 0: split_word[i] = word.title() strings = ''.join(split_word) return strings
python
def __camel_case(word): """ Convert underscore naming into camel case naming :param str word: :return str: """ word = word.lower() if '_' in word: split_word = word.split('_') else: split_word = word.split() if len(split_word) > 0: for i, word in enumerate(split_word): if i > 0: split_word[i] = word.title() strings = ''.join(split_word) return strings
[ "def", "__camel_case", "(", "word", ")", ":", "word", "=", "word", ".", "lower", "(", ")", "if", "'_'", "in", "word", ":", "split_word", "=", "word", ".", "split", "(", "'_'", ")", "else", ":", "split_word", "=", "word", ".", "split", "(", ")", "...
Convert underscore naming into camel case naming :param str word: :return str:
[ "Convert", "underscore", "naming", "into", "camel", "case", "naming", ":", "param", "str", "word", ":", ":", "return", "str", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L559-L575
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__name_unit_regex
def __name_unit_regex(word): """ Split a name and unit that are bunched together (i.e. '250m') :param str word: :return str str: """ value = "" unit = "" r = re.findall(re_name_unit, word) try: value = r[0][0] except IndexError as e: logger_noaa_lpd.warn("name_unit_regex: IndexError: value: {}, {}, {}".format(word, r, e)) try: unit = r[0][1] # Replace unit with correct synonym. if unit.lower() in UNITS: unit = UNITS[unit] except IndexError as e: logger_noaa_lpd.warn("name_unit_regex: IndexError: unit: {}, {}, {}".format(word, r, e)) if value: try: value = float(value) except ValueError as e: logger_noaa_lpd.warn("name_unit_regex: ValueError: val: {}, {}".format(value, e)) return value, unit
python
def __name_unit_regex(word): """ Split a name and unit that are bunched together (i.e. '250m') :param str word: :return str str: """ value = "" unit = "" r = re.findall(re_name_unit, word) try: value = r[0][0] except IndexError as e: logger_noaa_lpd.warn("name_unit_regex: IndexError: value: {}, {}, {}".format(word, r, e)) try: unit = r[0][1] # Replace unit with correct synonym. if unit.lower() in UNITS: unit = UNITS[unit] except IndexError as e: logger_noaa_lpd.warn("name_unit_regex: IndexError: unit: {}, {}, {}".format(word, r, e)) if value: try: value = float(value) except ValueError as e: logger_noaa_lpd.warn("name_unit_regex: ValueError: val: {}, {}".format(value, e)) return value, unit
[ "def", "__name_unit_regex", "(", "word", ")", ":", "value", "=", "\"\"", "unit", "=", "\"\"", "r", "=", "re", ".", "findall", "(", "re_name_unit", ",", "word", ")", "try", ":", "value", "=", "r", "[", "0", "]", "[", "0", "]", "except", "IndexError"...
Split a name and unit that are bunched together (i.e. '250m') :param str word: :return str str:
[ "Split", "a", "name", "and", "unit", "that", "are", "bunched", "together", "(", "i", ".", "e", ".", "250m", ")", ":", "param", "str", "word", ":", ":", "return", "str", "str", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L578-L603
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__split_name_unit
def __split_name_unit(self, line): """ Split a string that has value and unit as one. :param str line: :return str str: """ vals = [] unit = '' if line != '' or line != ' ': # If there are parenthesis, remove them line = line.replace('(', '').replace(')', '') # When value and units are a range (i.e. '100 m - 200 m'). if re.match(re_name_unit_range, line): m = re.findall(re_name_unit_range, line) if m: for group in m: for item in group: try: val = float(item) vals.append(val) except ValueError: if item: unit = item # Piece the number range back together. if len(vals) == 1: value = vals[0] else: value = str(vals[0]) + ' to ' + str(vals[1]) else: value, unit = self.__name_unit_regex(line) return value, unit
python
def __split_name_unit(self, line): """ Split a string that has value and unit as one. :param str line: :return str str: """ vals = [] unit = '' if line != '' or line != ' ': # If there are parenthesis, remove them line = line.replace('(', '').replace(')', '') # When value and units are a range (i.e. '100 m - 200 m'). if re.match(re_name_unit_range, line): m = re.findall(re_name_unit_range, line) if m: for group in m: for item in group: try: val = float(item) vals.append(val) except ValueError: if item: unit = item # Piece the number range back together. if len(vals) == 1: value = vals[0] else: value = str(vals[0]) + ' to ' + str(vals[1]) else: value, unit = self.__name_unit_regex(line) return value, unit
[ "def", "__split_name_unit", "(", "self", ",", "line", ")", ":", "vals", "=", "[", "]", "unit", "=", "''", "if", "line", "!=", "''", "or", "line", "!=", "' '", ":", "# If there are parenthesis, remove them", "line", "=", "line", ".", "replace", "(", "'('"...
Split a string that has value and unit as one. :param str line: :return str str:
[ "Split", "a", "string", "that", "has", "value", "and", "unit", "as", "one", ".", ":", "param", "str", "line", ":", ":", "return", "str", "str", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L614-L644
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__str_cleanup
def __str_cleanup(line): """ Remove the unnecessary characters in the line that we don't want :param str line: :return str: """ if '#' in line: line = line.replace("#", "") line = line.strip() if '-----------' in line: line = '' return line
python
def __str_cleanup(line): """ Remove the unnecessary characters in the line that we don't want :param str line: :return str: """ if '#' in line: line = line.replace("#", "") line = line.strip() if '-----------' in line: line = '' return line
[ "def", "__str_cleanup", "(", "line", ")", ":", "if", "'#'", "in", "line", ":", "line", "=", "line", ".", "replace", "(", "\"#\"", ",", "\"\"", ")", "line", "=", "line", ".", "strip", "(", ")", "if", "'-----------'", "in", "line", ":", "line", "=", ...
Remove the unnecessary characters in the line that we don't want :param str line: :return str:
[ "Remove", "the", "unnecessary", "characters", "in", "the", "line", "that", "we", "don", "t", "want", ":", "param", "str", "line", ":", ":", "return", "str", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L647-L658
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__slice_key_val
def __slice_key_val(line): """ Get the key and value items from a line by looking for and lines that have a ":" :param str line: :return str str: Key, Value """ position = line.find(":") # If value is -1, that means the item was not found in the string. if position != -1: key = line[:position] value = line[position + 1:] value = value.lstrip() return key, value else: key = line value = None return key, value
python
def __slice_key_val(line): """ Get the key and value items from a line by looking for and lines that have a ":" :param str line: :return str str: Key, Value """ position = line.find(":") # If value is -1, that means the item was not found in the string. if position != -1: key = line[:position] value = line[position + 1:] value = value.lstrip() return key, value else: key = line value = None return key, value
[ "def", "__slice_key_val", "(", "line", ")", ":", "position", "=", "line", ".", "find", "(", "\":\"", ")", "# If value is -1, that means the item was not found in the string.", "if", "position", "!=", "-", "1", ":", "key", "=", "line", "[", ":", "position", "]", ...
Get the key and value items from a line by looking for and lines that have a ":" :param str line: :return str str: Key, Value
[ "Get", "the", "key", "and", "value", "items", "from", "a", "line", "by", "looking", "for", "and", "lines", "that", "have", "a", ":", ":", "param", "str", "line", ":", ":", "return", "str", "str", ":", "Key", "Value" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L661-L677
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__create_coordinates
def __create_coordinates(self, lat, lon, elev): """ GeoJSON standard: Use to determine 2-point or 4-point coordinates :param list lat: :param list lon: :return dict: """ # Sort lat an lon in numerical order lat.sort() lon.sort() geo_dict = {} # 4 coordinate values if len(lat) == 2 and len(lon) == 2: # Matching coordinate pairs. Not 4 unique values. if lat[0] == lat[1] and lon[0] == lon[1]: logger_noaa_lpd.info("coordinates found: {}".format("2")) lat.pop() lon.pop() geo_dict = self.__geo_point(lat, lon, elev) # 4 unique coordinates else: logger_noaa_lpd.info("coordinates found: {}".format("4")) geo_dict = self.__geo_multipoint(lat, lon, elev) # 2 coordinate values elif len(lat) == 1 and len(lon) == 1: logger_noaa_lpd.info("coordinates found: {}".format("2")) geo_dict = self.__geo_point(lat, lon, elev) # 0 coordinate values elif not lat and not lon: logger_noaa_lpd.info("coordinates found: {}".format("0")) else: geo_dict = {} logger_noaa_lpd.info("coordinates found: {}".format("too many")) return geo_dict
python
def __create_coordinates(self, lat, lon, elev): """ GeoJSON standard: Use to determine 2-point or 4-point coordinates :param list lat: :param list lon: :return dict: """ # Sort lat an lon in numerical order lat.sort() lon.sort() geo_dict = {} # 4 coordinate values if len(lat) == 2 and len(lon) == 2: # Matching coordinate pairs. Not 4 unique values. if lat[0] == lat[1] and lon[0] == lon[1]: logger_noaa_lpd.info("coordinates found: {}".format("2")) lat.pop() lon.pop() geo_dict = self.__geo_point(lat, lon, elev) # 4 unique coordinates else: logger_noaa_lpd.info("coordinates found: {}".format("4")) geo_dict = self.__geo_multipoint(lat, lon, elev) # 2 coordinate values elif len(lat) == 1 and len(lon) == 1: logger_noaa_lpd.info("coordinates found: {}".format("2")) geo_dict = self.__geo_point(lat, lon, elev) # 0 coordinate values elif not lat and not lon: logger_noaa_lpd.info("coordinates found: {}".format("0")) else: geo_dict = {} logger_noaa_lpd.info("coordinates found: {}".format("too many")) return geo_dict
[ "def", "__create_coordinates", "(", "self", ",", "lat", ",", "lon", ",", "elev", ")", ":", "# Sort lat an lon in numerical order", "lat", ".", "sort", "(", ")", "lon", ".", "sort", "(", ")", "geo_dict", "=", "{", "}", "# 4 coordinate values", "if", "len", ...
GeoJSON standard: Use to determine 2-point or 4-point coordinates :param list lat: :param list lon: :return dict:
[ "GeoJSON", "standard", ":", "Use", "to", "determine", "2", "-", "point", "or", "4", "-", "point", "coordinates", ":", "param", "list", "lat", ":", ":", "param", "list", "lon", ":", ":", "return", "dict", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L679-L713
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__geo_multipoint
def __geo_multipoint(lat, lon, elev): """ GeoJSON standard: Create a geoJson MultiPoint-type dictionary :param list lat: :param list lon: :return dict: """ logger_noaa_lpd.info("enter geo_multipoint") geo_dict = OrderedDict() geometry_dict = OrderedDict() coordinates = [] # bbox = [] temp = [None, None] # 4 unique values # # Creates bounding box # for index, point in enumerate(lat): # bbox.append(lat[index]) # bbox.append(lon[index]) # Creates coordinates list for i in lat: temp[0] = i for j in lon: temp[1] = j coordinates.append(copy.copy(temp)) if elev: coordinates = coordinates + elev # Create geometry block geometry_dict['type'] = 'Polygon' geometry_dict['coordinates'] = coordinates # Create geo block geo_dict['type'] = 'Feature' # geo_dict['bbox'] = bbox geo_dict['geometry'] = geometry_dict return geo_dict
python
def __geo_multipoint(lat, lon, elev): """ GeoJSON standard: Create a geoJson MultiPoint-type dictionary :param list lat: :param list lon: :return dict: """ logger_noaa_lpd.info("enter geo_multipoint") geo_dict = OrderedDict() geometry_dict = OrderedDict() coordinates = [] # bbox = [] temp = [None, None] # 4 unique values # # Creates bounding box # for index, point in enumerate(lat): # bbox.append(lat[index]) # bbox.append(lon[index]) # Creates coordinates list for i in lat: temp[0] = i for j in lon: temp[1] = j coordinates.append(copy.copy(temp)) if elev: coordinates = coordinates + elev # Create geometry block geometry_dict['type'] = 'Polygon' geometry_dict['coordinates'] = coordinates # Create geo block geo_dict['type'] = 'Feature' # geo_dict['bbox'] = bbox geo_dict['geometry'] = geometry_dict return geo_dict
[ "def", "__geo_multipoint", "(", "lat", ",", "lon", ",", "elev", ")", ":", "logger_noaa_lpd", ".", "info", "(", "\"enter geo_multipoint\"", ")", "geo_dict", "=", "OrderedDict", "(", ")", "geometry_dict", "=", "OrderedDict", "(", ")", "coordinates", "=", "[", ...
GeoJSON standard: Create a geoJson MultiPoint-type dictionary :param list lat: :param list lon: :return dict:
[ "GeoJSON", "standard", ":", "Create", "a", "geoJson", "MultiPoint", "-", "type", "dictionary", ":", "param", "list", "lat", ":", ":", "param", "list", "lon", ":", ":", "return", "dict", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L716-L752
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__geo_point
def __geo_point(lat, lon, elev): """ GeoJSON standard: Create a geoJson Point-type dictionary :param list lat: :param list lon: :return dict: """ logger_noaa_lpd.info("enter geo_point") coordinates = [] geo_dict = OrderedDict() geometry_dict = OrderedDict() for index, point in enumerate(lat): coordinates.append(lat[index]) coordinates.append(lon[index]) if elev: coordinates = coordinates + elev geometry_dict['type'] = 'Point' geometry_dict['coordinates'] = coordinates geo_dict['type'] = 'Feature' geo_dict['geometry'] = geometry_dict return geo_dict
python
def __geo_point(lat, lon, elev): """ GeoJSON standard: Create a geoJson Point-type dictionary :param list lat: :param list lon: :return dict: """ logger_noaa_lpd.info("enter geo_point") coordinates = [] geo_dict = OrderedDict() geometry_dict = OrderedDict() for index, point in enumerate(lat): coordinates.append(lat[index]) coordinates.append(lon[index]) if elev: coordinates = coordinates + elev geometry_dict['type'] = 'Point' geometry_dict['coordinates'] = coordinates geo_dict['type'] = 'Feature' geo_dict['geometry'] = geometry_dict return geo_dict
[ "def", "__geo_point", "(", "lat", ",", "lon", ",", "elev", ")", ":", "logger_noaa_lpd", ".", "info", "(", "\"enter geo_point\"", ")", "coordinates", "=", "[", "]", "geo_dict", "=", "OrderedDict", "(", ")", "geometry_dict", "=", "OrderedDict", "(", ")", "fo...
GeoJSON standard: Create a geoJson Point-type dictionary :param list lat: :param list lon: :return dict:
[ "GeoJSON", "standard", ":", "Create", "a", "geoJson", "Point", "-", "type", "dictionary", ":", "param", "list", "lat", ":", ":", "param", "list", "lon", ":", ":", "return", "dict", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L755-L776
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__reorganize_doi
def __reorganize_doi(temp_pub): """ Create a valid bib json entry for the DOI information. "DOI" is technically the only valid DOI key, but there are sometimes a doi_id and doi_url entry. Check for all three and compare them, then keep whichever seems best. :param dict temp_pub: :return dict: """ doi_out = "" doi_list = [] rm = ["doiId", "doi", "doiUrl"] # Check if both entries exist if "doi" and "doiId" in temp_pub: if temp_pub["doi"] == temp_pub["doiId"]: # If the entries are the same, then pick one to use doi_out = temp_pub["doiId"] else: # If entries are not the same, check if it matches the regex pattern. if re_doi.findall(temp_pub["doiId"]): doi_out = temp_pub["doiId"] # If it doesnt match the regex, just use the "doi" entry as-is else: doi_out = temp_pub["doi"] # Check if "doiId" entry exists. Most common entry. elif "doiId" in temp_pub: doi_out = temp_pub["doiId"] # Check if "doi" entry exists. Fallback. elif "doi" in temp_pub: doi_out = temp_pub["doi"] # It's unlikely that ONLY the doi_url exists, but check if no doi found so far elif "doiUrl" in temp_pub: doi_out = temp_pub["doiUrl"] # Log if the DOI is invalid or not. if not re_doi.match(doi_out): logger_noaa_lpd.info("reorganize_doi: invalid doi input from NOAA file") # Get a list of all DOIs found in the string matches = re.findall(re_doi, doi_out) logger_noaa_lpd.info("reorganize_dois: found {} dois: {}".format(len(matches), doi_out)) # Add identifier block to publication dictionary for doi in matches: doi_list.append({"type": "doi", "id": doi}) # Remove the other DOI entries for k in rm: try: del temp_pub[k] except KeyError: # If there's a KeyError, don't worry about it. It's likely that only one of these keys will be present. pass temp_pub["identifier"] = doi_list return temp_pub
python
def __reorganize_doi(temp_pub): """ Create a valid bib json entry for the DOI information. "DOI" is technically the only valid DOI key, but there are sometimes a doi_id and doi_url entry. Check for all three and compare them, then keep whichever seems best. :param dict temp_pub: :return dict: """ doi_out = "" doi_list = [] rm = ["doiId", "doi", "doiUrl"] # Check if both entries exist if "doi" and "doiId" in temp_pub: if temp_pub["doi"] == temp_pub["doiId"]: # If the entries are the same, then pick one to use doi_out = temp_pub["doiId"] else: # If entries are not the same, check if it matches the regex pattern. if re_doi.findall(temp_pub["doiId"]): doi_out = temp_pub["doiId"] # If it doesnt match the regex, just use the "doi" entry as-is else: doi_out = temp_pub["doi"] # Check if "doiId" entry exists. Most common entry. elif "doiId" in temp_pub: doi_out = temp_pub["doiId"] # Check if "doi" entry exists. Fallback. elif "doi" in temp_pub: doi_out = temp_pub["doi"] # It's unlikely that ONLY the doi_url exists, but check if no doi found so far elif "doiUrl" in temp_pub: doi_out = temp_pub["doiUrl"] # Log if the DOI is invalid or not. if not re_doi.match(doi_out): logger_noaa_lpd.info("reorganize_doi: invalid doi input from NOAA file") # Get a list of all DOIs found in the string matches = re.findall(re_doi, doi_out) logger_noaa_lpd.info("reorganize_dois: found {} dois: {}".format(len(matches), doi_out)) # Add identifier block to publication dictionary for doi in matches: doi_list.append({"type": "doi", "id": doi}) # Remove the other DOI entries for k in rm: try: del temp_pub[k] except KeyError: # If there's a KeyError, don't worry about it. It's likely that only one of these keys will be present. pass temp_pub["identifier"] = doi_list return temp_pub
[ "def", "__reorganize_doi", "(", "temp_pub", ")", ":", "doi_out", "=", "\"\"", "doi_list", "=", "[", "]", "rm", "=", "[", "\"doiId\"", ",", "\"doi\"", ",", "\"doiUrl\"", "]", "# Check if both entries exist", "if", "\"doi\"", "and", "\"doiId\"", "in", "temp_pub"...
Create a valid bib json entry for the DOI information. "DOI" is technically the only valid DOI key, but there are sometimes a doi_id and doi_url entry. Check for all three and compare them, then keep whichever seems best. :param dict temp_pub: :return dict:
[ "Create", "a", "valid", "bib", "json", "entry", "for", "the", "DOI", "information", ".", "DOI", "is", "technically", "the", "only", "valid", "DOI", "key", "but", "there", "are", "sometimes", "a", "doi_id", "and", "doi_url", "entry", ".", "Check", "for", ...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L779-L833
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__create_chron_cols
def __create_chron_cols(metadata): """ Use to collected chron metadata to create the chron columns :param dict metadata: key: variable, val: unit (optional) :return list: list of one dict per column """ chron_col_list = [] chron_col_ct = 1 for variableName, unit in metadata.items(): temp_dict = OrderedDict() temp_dict['column'] = chron_col_ct temp_dict['variableName'] = variableName temp_dict['unit'] = unit chron_col_list.append(copy.deepcopy(temp_dict)) chron_col_ct += 1 return chron_col_list
python
def __create_chron_cols(metadata): """ Use to collected chron metadata to create the chron columns :param dict metadata: key: variable, val: unit (optional) :return list: list of one dict per column """ chron_col_list = [] chron_col_ct = 1 for variableName, unit in metadata.items(): temp_dict = OrderedDict() temp_dict['column'] = chron_col_ct temp_dict['variableName'] = variableName temp_dict['unit'] = unit chron_col_list.append(copy.deepcopy(temp_dict)) chron_col_ct += 1 return chron_col_list
[ "def", "__create_chron_cols", "(", "metadata", ")", ":", "chron_col_list", "=", "[", "]", "chron_col_ct", "=", "1", "for", "variableName", ",", "unit", "in", "metadata", ".", "items", "(", ")", ":", "temp_dict", "=", "OrderedDict", "(", ")", "temp_dict", "...
Use to collected chron metadata to create the chron columns :param dict metadata: key: variable, val: unit (optional) :return list: list of one dict per column
[ "Use", "to", "collected", "chron", "metadata", "to", "create", "the", "chron", "columns", ":", "param", "dict", "metadata", ":", "key", ":", "variable", "val", ":", "unit", "(", "optional", ")", ":", "return", "list", ":", "list", "of", "one", "dict", ...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L851-L867
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__reorganize_chron_header
def __reorganize_chron_header(line): """ Reorganize the list of variables. If there are units given, log them. :param str line: :return dict: key: variable, val: units (optional) """ d = {} # Header variables should be tab-delimited. Use regex to split by tabs m = re.split(re_tab_split, line) # If there was an output match from the line, then keep going if m: # Loop once for each variable in the line for s in m: # Match the variable to the 'variable (units)' regex to look for units m2 = re.match(re_var_w_units, s) # If there was a match if m2: # If no units were found, set to blank if m2.group(2) is None: d[m2.group(1)] = "" # Units were found else: # Set both values d[m2.group(1)] = m2.group(2) return d
python
def __reorganize_chron_header(line): """ Reorganize the list of variables. If there are units given, log them. :param str line: :return dict: key: variable, val: units (optional) """ d = {} # Header variables should be tab-delimited. Use regex to split by tabs m = re.split(re_tab_split, line) # If there was an output match from the line, then keep going if m: # Loop once for each variable in the line for s in m: # Match the variable to the 'variable (units)' regex to look for units m2 = re.match(re_var_w_units, s) # If there was a match if m2: # If no units were found, set to blank if m2.group(2) is None: d[m2.group(1)] = "" # Units were found else: # Set both values d[m2.group(1)] = m2.group(2) return d
[ "def", "__reorganize_chron_header", "(", "line", ")", ":", "d", "=", "{", "}", "# Header variables should be tab-delimited. Use regex to split by tabs", "m", "=", "re", ".", "split", "(", "re_tab_split", ",", "line", ")", "# If there was an output match from the line, then ...
Reorganize the list of variables. If there are units given, log them. :param str line: :return dict: key: variable, val: units (optional)
[ "Reorganize", "the", "list", "of", "variables", ".", "If", "there", "are", "units", "given", "log", "them", ".", ":", "param", "str", "line", ":", ":", "return", "dict", ":", "key", ":", "variable", "val", ":", "units", "(", "optional", ")" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L870-L894
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
NOAA_LPD.__reorganize_authors
def __reorganize_authors(authors): """ Separate the string of authors and put it into a BibJSON compliant list :param str authors: :return list: List of dictionaries of author names. """ # String SHOULD be semi-colon separated names. l = [] s = authors.split(";") for author in s: try: l.append({"name": author.strip()}) except AttributeError: logger_noaa_lpd.warning("reorganize_authors: AttributeError: authors incorrectly formatted") return l
python
def __reorganize_authors(authors): """ Separate the string of authors and put it into a BibJSON compliant list :param str authors: :return list: List of dictionaries of author names. """ # String SHOULD be semi-colon separated names. l = [] s = authors.split(";") for author in s: try: l.append({"name": author.strip()}) except AttributeError: logger_noaa_lpd.warning("reorganize_authors: AttributeError: authors incorrectly formatted") return l
[ "def", "__reorganize_authors", "(", "authors", ")", ":", "# String SHOULD be semi-colon separated names.", "l", "=", "[", "]", "s", "=", "authors", ".", "split", "(", "\";\"", ")", "for", "author", "in", "s", ":", "try", ":", "l", ".", "append", "(", "{", ...
Separate the string of authors and put it into a BibJSON compliant list :param str authors: :return list: List of dictionaries of author names.
[ "Separate", "the", "string", "of", "authors", "and", "put", "it", "into", "a", "BibJSON", "compliant", "list", ":", "param", "str", "authors", ":", ":", "return", "list", ":", "List", "of", "dictionaries", "of", "author", "names", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L897-L911
nickmckay/LiPD-utilities
Python/lipd/versions.py
fix_doi
def fix_doi(L): """ DOIs are commonly stored in the BibJson format under "identifier". We want to move these to the root of the publication under "doi". Make the reassignments necessary and also remove duplicate and unwanted Doi/DOI keys. :param dict L: Metadata :return dict L: Metdata """ # Keys that we don't want. Reassign data to 'doi' key _dois = ["DOI", "Doi"] # Loop for each publication entry for pub in L["pub"]: try: # Is there an identifier in this publication? if "identifier" in pub: # Attempt to grab the doi from the id location. If it doesn't work, we'll catch the error. _identifier = pub["identifier"][0]["id"] # Got identifier. Is there a valid string here? if _identifier: # Reassign the doi to the publication root 'doi' key pub["doi"] = _identifier # Delete the identifier key and data del pub["identifier"] except Exception: # Catch the KeyError, and continue on normally. pass # Check for each doi key that we don't want for _key in _dois: # Is it in the publication entry? if _key in pub: # Is there valid string data? if pub[_key]: # Reassign the doi to the 'doi' key pub["doi"] = pub[_key] # Delete the bad doi key del pub[_key] return L
python
def fix_doi(L): """ DOIs are commonly stored in the BibJson format under "identifier". We want to move these to the root of the publication under "doi". Make the reassignments necessary and also remove duplicate and unwanted Doi/DOI keys. :param dict L: Metadata :return dict L: Metdata """ # Keys that we don't want. Reassign data to 'doi' key _dois = ["DOI", "Doi"] # Loop for each publication entry for pub in L["pub"]: try: # Is there an identifier in this publication? if "identifier" in pub: # Attempt to grab the doi from the id location. If it doesn't work, we'll catch the error. _identifier = pub["identifier"][0]["id"] # Got identifier. Is there a valid string here? if _identifier: # Reassign the doi to the publication root 'doi' key pub["doi"] = _identifier # Delete the identifier key and data del pub["identifier"] except Exception: # Catch the KeyError, and continue on normally. pass # Check for each doi key that we don't want for _key in _dois: # Is it in the publication entry? if _key in pub: # Is there valid string data? if pub[_key]: # Reassign the doi to the 'doi' key pub["doi"] = pub[_key] # Delete the bad doi key del pub[_key] return L
[ "def", "fix_doi", "(", "L", ")", ":", "# Keys that we don't want. Reassign data to 'doi' key", "_dois", "=", "[", "\"DOI\"", ",", "\"Doi\"", "]", "# Loop for each publication entry", "for", "pub", "in", "L", "[", "\"pub\"", "]", ":", "try", ":", "# Is there an ident...
DOIs are commonly stored in the BibJson format under "identifier". We want to move these to the root of the publication under "doi". Make the reassignments necessary and also remove duplicate and unwanted Doi/DOI keys. :param dict L: Metadata :return dict L: Metdata
[ "DOIs", "are", "commonly", "stored", "in", "the", "BibJson", "format", "under", "identifier", ".", "We", "want", "to", "move", "these", "to", "the", "root", "of", "the", "publication", "under", "doi", ".", "Make", "the", "reassignments", "necessary", "and", ...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/versions.py#L7-L45
nickmckay/LiPD-utilities
Python/lipd/versions.py
get_lipd_version
def get_lipd_version(L): """ Check what version of LiPD this file is using. If none is found, assume it's using version 1.0 :param dict L: Metadata :return float: """ version = 1.0 _keys = ["LipdVersion", "LiPDVersion", "lipdVersion", "liPDVersion"] for _key in _keys: if _key in L: version = L[_key] # Cast the version number to a float try: version = float(version) except AttributeError: # If the casting failed, then something is wrong with the key so assume version is 1.0 version = 1.0 L.pop(_key) return L, version
python
def get_lipd_version(L): """ Check what version of LiPD this file is using. If none is found, assume it's using version 1.0 :param dict L: Metadata :return float: """ version = 1.0 _keys = ["LipdVersion", "LiPDVersion", "lipdVersion", "liPDVersion"] for _key in _keys: if _key in L: version = L[_key] # Cast the version number to a float try: version = float(version) except AttributeError: # If the casting failed, then something is wrong with the key so assume version is 1.0 version = 1.0 L.pop(_key) return L, version
[ "def", "get_lipd_version", "(", "L", ")", ":", "version", "=", "1.0", "_keys", "=", "[", "\"LipdVersion\"", ",", "\"LiPDVersion\"", ",", "\"lipdVersion\"", ",", "\"liPDVersion\"", "]", "for", "_key", "in", "_keys", ":", "if", "_key", "in", "L", ":", "versi...
Check what version of LiPD this file is using. If none is found, assume it's using version 1.0 :param dict L: Metadata :return float:
[ "Check", "what", "version", "of", "LiPD", "this", "file", "is", "using", ".", "If", "none", "is", "found", "assume", "it", "s", "using", "version", "1", ".", "0", ":", "param", "dict", "L", ":", "Metadata", ":", "return", "float", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/versions.py#L47-L65
nickmckay/LiPD-utilities
Python/lipd/versions.py
update_lipd_version
def update_lipd_version(L): """ Metadata is indexed by number at this step. Use the current version number to determine where to start updating from. Use "chain versioning" to make it modular. If a file is a few versions behind, convert to EACH version until reaching current. If a file is one version behind, it will only convert once to the newest. :param dict L: Metadata :return dict d: Metadata """ # Get the lipd version number. L, version = get_lipd_version(L) # Update from (N/A or 1.0) to 1.1 if version in [1.0, "1.0"]: L = update_lipd_v1_1(L) version = 1.1 # Update from 1.1 to 1.2 if version in [1.1, "1.1"]: L = update_lipd_v1_2(L) version = 1.2 if version in [1.2, "1.2"]: L = update_lipd_v1_3(L) version = 1.3 L = fix_doi(L) L["lipdVersion"] = 1.3 return L
python
def update_lipd_version(L): """ Metadata is indexed by number at this step. Use the current version number to determine where to start updating from. Use "chain versioning" to make it modular. If a file is a few versions behind, convert to EACH version until reaching current. If a file is one version behind, it will only convert once to the newest. :param dict L: Metadata :return dict d: Metadata """ # Get the lipd version number. L, version = get_lipd_version(L) # Update from (N/A or 1.0) to 1.1 if version in [1.0, "1.0"]: L = update_lipd_v1_1(L) version = 1.1 # Update from 1.1 to 1.2 if version in [1.1, "1.1"]: L = update_lipd_v1_2(L) version = 1.2 if version in [1.2, "1.2"]: L = update_lipd_v1_3(L) version = 1.3 L = fix_doi(L) L["lipdVersion"] = 1.3 return L
[ "def", "update_lipd_version", "(", "L", ")", ":", "# Get the lipd version number.", "L", ",", "version", "=", "get_lipd_version", "(", "L", ")", "# Update from (N/A or 1.0) to 1.1", "if", "version", "in", "[", "1.0", ",", "\"1.0\"", "]", ":", "L", "=", "update_l...
Metadata is indexed by number at this step. Use the current version number to determine where to start updating from. Use "chain versioning" to make it modular. If a file is a few versions behind, convert to EACH version until reaching current. If a file is one version behind, it will only convert once to the newest. :param dict L: Metadata :return dict d: Metadata
[ "Metadata", "is", "indexed", "by", "number", "at", "this", "step", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/versions.py#L101-L129
nickmckay/LiPD-utilities
Python/lipd/versions.py
update_lipd_v1_1
def update_lipd_v1_1(d): """ Update LiPD v1.0 to v1.1 - chronData entry is a list that allows multiple tables - paleoData entry is a list that allows multiple tables - chronData now allows measurement, model, summary, modelTable, ensemble, calibratedAges tables - Added 'lipdVersion' key :param dict d: Metadata v1.0 :return dict d: Metadata v1.1 """ logger_versions.info("enter update_lipd_v1_1") tmp_all = [] try: # ChronData is the only structure update if "chronData" in d: # As of v1.1, ChronData should have an extra level of abstraction. # No longer shares the same structure of paleoData # If no measurement table, then make a measurement table list with the table as the entry for table in d["chronData"]: if "chronMeasurementTable" not in table: tmp_all.append({"chronMeasurementTable": [table]}) # If the table exists, but it is a dictionary, then turn it into a list with one entry elif "chronMeasurementTable" in table: if isinstance(table["chronMeasurementTable"], dict): tmp_all.append({"chronMeasurementTable": [table["chronMeasurementTable"]]}) if tmp_all: d["chronData"] = tmp_all # Log that this is now a v1.1 structured file d["lipdVersion"] = 1.1 except Exception as e: logger_versions.error("update_lipd_v1_1: Exception: {}".format(e)) logger_versions.info("exit update_lipd_v1_1") return d
python
def update_lipd_v1_1(d): """ Update LiPD v1.0 to v1.1 - chronData entry is a list that allows multiple tables - paleoData entry is a list that allows multiple tables - chronData now allows measurement, model, summary, modelTable, ensemble, calibratedAges tables - Added 'lipdVersion' key :param dict d: Metadata v1.0 :return dict d: Metadata v1.1 """ logger_versions.info("enter update_lipd_v1_1") tmp_all = [] try: # ChronData is the only structure update if "chronData" in d: # As of v1.1, ChronData should have an extra level of abstraction. # No longer shares the same structure of paleoData # If no measurement table, then make a measurement table list with the table as the entry for table in d["chronData"]: if "chronMeasurementTable" not in table: tmp_all.append({"chronMeasurementTable": [table]}) # If the table exists, but it is a dictionary, then turn it into a list with one entry elif "chronMeasurementTable" in table: if isinstance(table["chronMeasurementTable"], dict): tmp_all.append({"chronMeasurementTable": [table["chronMeasurementTable"]]}) if tmp_all: d["chronData"] = tmp_all # Log that this is now a v1.1 structured file d["lipdVersion"] = 1.1 except Exception as e: logger_versions.error("update_lipd_v1_1: Exception: {}".format(e)) logger_versions.info("exit update_lipd_v1_1") return d
[ "def", "update_lipd_v1_1", "(", "d", ")", ":", "logger_versions", ".", "info", "(", "\"enter update_lipd_v1_1\"", ")", "tmp_all", "=", "[", "]", "try", ":", "# ChronData is the only structure update", "if", "\"chronData\"", "in", "d", ":", "# As of v1.1, ChronData sho...
Update LiPD v1.0 to v1.1 - chronData entry is a list that allows multiple tables - paleoData entry is a list that allows multiple tables - chronData now allows measurement, model, summary, modelTable, ensemble, calibratedAges tables - Added 'lipdVersion' key :param dict d: Metadata v1.0 :return dict d: Metadata v1.1
[ "Update", "LiPD", "v1", ".", "0", "to", "v1", ".", "1", "-", "chronData", "entry", "is", "a", "list", "that", "allows", "multiple", "tables", "-", "paleoData", "entry", "is", "a", "list", "that", "allows", "multiple", "tables", "-", "chronData", "now", ...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/versions.py#L132-L169
nickmckay/LiPD-utilities
Python/lipd/versions.py
update_lipd_v1_3
def update_lipd_v1_3(d): """ Update LiPD v1.2 to v1.3 - Added 'createdBy' key - Top-level folder inside LiPD archives are named "bag". (No longer <datasetname>) - .jsonld file is now generically named 'metadata.jsonld' (No longer <datasetname>.lpd ) - All "paleo" and "chron" prefixes are removed from "paleoMeasurementTable", "paleoModel", etc. - Merge isotopeInterpretation and climateInterpretation into "interpretation" block - ensemble table entry is a list that allows multiple tables - summary table entry is a list that allows multiple tables :param dict d: Metadata v1.2 :return dict d: Metadata v1.3 """ # sub routine (recursive): changes all the key names and merges interpretation d = update_lipd_v1_3_names(d) # sub routine: changes ensemble and summary table structure d = update_lipd_v1_3_structure(d) d["lipdVersion"] = 1.3 if "LiPDVersion" in d: del d["LiPDVersion"] return d
python
def update_lipd_v1_3(d): """ Update LiPD v1.2 to v1.3 - Added 'createdBy' key - Top-level folder inside LiPD archives are named "bag". (No longer <datasetname>) - .jsonld file is now generically named 'metadata.jsonld' (No longer <datasetname>.lpd ) - All "paleo" and "chron" prefixes are removed from "paleoMeasurementTable", "paleoModel", etc. - Merge isotopeInterpretation and climateInterpretation into "interpretation" block - ensemble table entry is a list that allows multiple tables - summary table entry is a list that allows multiple tables :param dict d: Metadata v1.2 :return dict d: Metadata v1.3 """ # sub routine (recursive): changes all the key names and merges interpretation d = update_lipd_v1_3_names(d) # sub routine: changes ensemble and summary table structure d = update_lipd_v1_3_structure(d) d["lipdVersion"] = 1.3 if "LiPDVersion" in d: del d["LiPDVersion"] return d
[ "def", "update_lipd_v1_3", "(", "d", ")", ":", "# sub routine (recursive): changes all the key names and merges interpretation", "d", "=", "update_lipd_v1_3_names", "(", "d", ")", "# sub routine: changes ensemble and summary table structure", "d", "=", "update_lipd_v1_3_structure", ...
Update LiPD v1.2 to v1.3 - Added 'createdBy' key - Top-level folder inside LiPD archives are named "bag". (No longer <datasetname>) - .jsonld file is now generically named 'metadata.jsonld' (No longer <datasetname>.lpd ) - All "paleo" and "chron" prefixes are removed from "paleoMeasurementTable", "paleoModel", etc. - Merge isotopeInterpretation and climateInterpretation into "interpretation" block - ensemble table entry is a list that allows multiple tables - summary table entry is a list that allows multiple tables :param dict d: Metadata v1.2 :return dict d: Metadata v1.3
[ "Update", "LiPD", "v1", ".", "2", "to", "v1", ".", "3", "-", "Added", "createdBy", "key", "-", "Top", "-", "level", "folder", "inside", "LiPD", "archives", "are", "named", "bag", ".", "(", "No", "longer", "<datasetname", ">", ")", "-", ".", "jsonld",...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/versions.py#L209-L229
nickmckay/LiPD-utilities
Python/lipd/versions.py
update_lipd_v1_3_names
def update_lipd_v1_3_names(d): """ Update the key names and merge interpretation data :param dict d: Metadata :return dict d: Metadata """ try: if isinstance(d, dict): for k, v in d.items(): # dive down for dictionaries d[k] = update_lipd_v1_3_names(v) # see if the key is in the remove list if k in VER_1_3["swap"]: # replace the key in the dictionary _key_swap = VER_1_3["swap"][k] d[_key_swap] = d.pop(k) elif k in VER_1_3["tables"]: d[k] = "" for _key in ["climateInterpretation", "isotopeInterpretation"]: if _key in d: d = _merge_interpretations(d) elif isinstance(d, list): # dive down for lists for idx, i in enumerate(d): d[idx] = update_lipd_v1_3_names(i) except Exception as e: print("Error: Unable to update file to LiPD v1.3: {}".format(e)) logger_versions.error("update_lipd_v1_3_names: Exception: {}".format(e)) return d
python
def update_lipd_v1_3_names(d): """ Update the key names and merge interpretation data :param dict d: Metadata :return dict d: Metadata """ try: if isinstance(d, dict): for k, v in d.items(): # dive down for dictionaries d[k] = update_lipd_v1_3_names(v) # see if the key is in the remove list if k in VER_1_3["swap"]: # replace the key in the dictionary _key_swap = VER_1_3["swap"][k] d[_key_swap] = d.pop(k) elif k in VER_1_3["tables"]: d[k] = "" for _key in ["climateInterpretation", "isotopeInterpretation"]: if _key in d: d = _merge_interpretations(d) elif isinstance(d, list): # dive down for lists for idx, i in enumerate(d): d[idx] = update_lipd_v1_3_names(i) except Exception as e: print("Error: Unable to update file to LiPD v1.3: {}".format(e)) logger_versions.error("update_lipd_v1_3_names: Exception: {}".format(e)) return d
[ "def", "update_lipd_v1_3_names", "(", "d", ")", ":", "try", ":", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "# dive down for dictionaries", "d", "[", "k", "]", "=", "update_lipd_...
Update the key names and merge interpretation data :param dict d: Metadata :return dict d: Metadata
[ "Update", "the", "key", "names", "and", "merge", "interpretation", "data", ":", "param", "dict", "d", ":", "Metadata", ":", "return", "dict", "d", ":", "Metadata" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/versions.py#L232-L262
nickmckay/LiPD-utilities
Python/lipd/versions.py
update_lipd_v1_3_structure
def update_lipd_v1_3_structure(d): """ Update the structure for summary and ensemble tables :param dict d: Metadata :return dict d: Metadata """ for key in ["paleoData", "chronData"]: if key in d: for entry1 in d[key]: if "model" in entry1: for entry2 in entry1["model"]: for key_table in ["summaryTable", "ensembleTable"]: if key_table in entry2: if isinstance(entry2[key_table], dict): try: _tmp = entry2[key_table] entry2[key_table] = [] entry2[key_table].append(_tmp) except Exception as e: logger_versions.error("update_lipd_v1_3_structure: Exception: {}".format(e)) return d
python
def update_lipd_v1_3_structure(d): """ Update the structure for summary and ensemble tables :param dict d: Metadata :return dict d: Metadata """ for key in ["paleoData", "chronData"]: if key in d: for entry1 in d[key]: if "model" in entry1: for entry2 in entry1["model"]: for key_table in ["summaryTable", "ensembleTable"]: if key_table in entry2: if isinstance(entry2[key_table], dict): try: _tmp = entry2[key_table] entry2[key_table] = [] entry2[key_table].append(_tmp) except Exception as e: logger_versions.error("update_lipd_v1_3_structure: Exception: {}".format(e)) return d
[ "def", "update_lipd_v1_3_structure", "(", "d", ")", ":", "for", "key", "in", "[", "\"paleoData\"", ",", "\"chronData\"", "]", ":", "if", "key", "in", "d", ":", "for", "entry1", "in", "d", "[", "key", "]", ":", "if", "\"model\"", "in", "entry1", ":", ...
Update the structure for summary and ensemble tables :param dict d: Metadata :return dict d: Metadata
[ "Update", "the", "structure", "for", "summary", "and", "ensemble", "tables", ":", "param", "dict", "d", ":", "Metadata", ":", "return", "dict", "d", ":", "Metadata" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/versions.py#L265-L285
nickmckay/LiPD-utilities
Python/lipd/csvs.py
merge_csv_metadata
def merge_csv_metadata(d, csvs): """ Using the given metadata dictionary, retrieve CSV data from CSV files, and insert the CSV values into their respective metadata columns. Checks for both paleoData and chronData tables. :param dict d: Metadata :return dict: Modified metadata dictionary """ logger_csvs.info("enter merge_csv_metadata") # Add CSV to paleoData if "paleoData" in d: d["paleoData"] = _merge_csv_section(d["paleoData"], "paleo", csvs) # Add CSV to chronData if "chronData" in d: d["chronData"] = _merge_csv_section(d["chronData"], "chron", csvs) logger_csvs.info("exit merge_csv_metadata") return d
python
def merge_csv_metadata(d, csvs): """ Using the given metadata dictionary, retrieve CSV data from CSV files, and insert the CSV values into their respective metadata columns. Checks for both paleoData and chronData tables. :param dict d: Metadata :return dict: Modified metadata dictionary """ logger_csvs.info("enter merge_csv_metadata") # Add CSV to paleoData if "paleoData" in d: d["paleoData"] = _merge_csv_section(d["paleoData"], "paleo", csvs) # Add CSV to chronData if "chronData" in d: d["chronData"] = _merge_csv_section(d["chronData"], "chron", csvs) logger_csvs.info("exit merge_csv_metadata") return d
[ "def", "merge_csv_metadata", "(", "d", ",", "csvs", ")", ":", "logger_csvs", ".", "info", "(", "\"enter merge_csv_metadata\"", ")", "# Add CSV to paleoData", "if", "\"paleoData\"", "in", "d", ":", "d", "[", "\"paleoData\"", "]", "=", "_merge_csv_section", "(", "...
Using the given metadata dictionary, retrieve CSV data from CSV files, and insert the CSV values into their respective metadata columns. Checks for both paleoData and chronData tables. :param dict d: Metadata :return dict: Modified metadata dictionary
[ "Using", "the", "given", "metadata", "dictionary", "retrieve", "CSV", "data", "from", "CSV", "files", "and", "insert", "the", "CSV", "values", "into", "their", "respective", "metadata", "columns", ".", "Checks", "for", "both", "paleoData", "and", "chronData", ...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L18-L37
nickmckay/LiPD-utilities
Python/lipd/csvs.py
_merge_csv_section
def _merge_csv_section(sections, pc, csvs): """ Add csv data to all paleo data tables :param dict sections: Metadata :return dict sections: Metadata """ logger_csvs.info("enter merge_csv_section") try: # Loop through each table_data in paleoData for _name, _section in sections.items(): if "measurementTable" in _section: sections[_name]["measurementTable"] = _merge_csv_table(_section["measurementTable"], pc, csvs) if "model" in _section: sections[_name]["model"] = _merge_csv_model(_section["model"], pc, csvs) except Exception as e: print("Error: There was an error merging CSV data into the metadata ") logger_csvs.error("merge_csv_section: {}".format(e)) logger_csvs.info("exit merge_csv_section") return sections
python
def _merge_csv_section(sections, pc, csvs): """ Add csv data to all paleo data tables :param dict sections: Metadata :return dict sections: Metadata """ logger_csvs.info("enter merge_csv_section") try: # Loop through each table_data in paleoData for _name, _section in sections.items(): if "measurementTable" in _section: sections[_name]["measurementTable"] = _merge_csv_table(_section["measurementTable"], pc, csvs) if "model" in _section: sections[_name]["model"] = _merge_csv_model(_section["model"], pc, csvs) except Exception as e: print("Error: There was an error merging CSV data into the metadata ") logger_csvs.error("merge_csv_section: {}".format(e)) logger_csvs.info("exit merge_csv_section") return sections
[ "def", "_merge_csv_section", "(", "sections", ",", "pc", ",", "csvs", ")", ":", "logger_csvs", ".", "info", "(", "\"enter merge_csv_section\"", ")", "try", ":", "# Loop through each table_data in paleoData", "for", "_name", ",", "_section", "in", "sections", ".", ...
Add csv data to all paleo data tables :param dict sections: Metadata :return dict sections: Metadata
[ "Add", "csv", "data", "to", "all", "paleo", "data", "tables" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L40-L64
nickmckay/LiPD-utilities
Python/lipd/csvs.py
_merge_csv_model
def _merge_csv_model(models, pc, csvs): """ Add csv data to each column in chron model :param dict models: Metadata :return dict models: Metadata """ logger_csvs.info("enter merge_csv_model") try: for _name, _model in models.items(): if "summaryTable" in _model: models[_name]["summaryTable"] = _merge_csv_table(_model["summaryTable"], pc, csvs) if "ensembleTable" in _model: models[_name]["ensembleTable"] = _merge_csv_table(_model["ensembleTable"], pc, csvs) if "distributionTable" in _model: models[_name]["distributionTable"] = _merge_csv_table(_model["distributionTable"], pc, csvs) except Exception as e: logger_csvs.error("merge_csv_model: {}",format(e)) logger_csvs.info("exit merge_csv_model") return models
python
def _merge_csv_model(models, pc, csvs): """ Add csv data to each column in chron model :param dict models: Metadata :return dict models: Metadata """ logger_csvs.info("enter merge_csv_model") try: for _name, _model in models.items(): if "summaryTable" in _model: models[_name]["summaryTable"] = _merge_csv_table(_model["summaryTable"], pc, csvs) if "ensembleTable" in _model: models[_name]["ensembleTable"] = _merge_csv_table(_model["ensembleTable"], pc, csvs) if "distributionTable" in _model: models[_name]["distributionTable"] = _merge_csv_table(_model["distributionTable"], pc, csvs) except Exception as e: logger_csvs.error("merge_csv_model: {}",format(e)) logger_csvs.info("exit merge_csv_model") return models
[ "def", "_merge_csv_model", "(", "models", ",", "pc", ",", "csvs", ")", ":", "logger_csvs", ".", "info", "(", "\"enter merge_csv_model\"", ")", "try", ":", "for", "_name", ",", "_model", "in", "models", ".", "items", "(", ")", ":", "if", "\"summaryTable\"",...
Add csv data to each column in chron model :param dict models: Metadata :return dict models: Metadata
[ "Add", "csv", "data", "to", "each", "column", "in", "chron", "model" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L67-L92
nickmckay/LiPD-utilities
Python/lipd/csvs.py
_merge_csv_column
def _merge_csv_column(table, csvs): """ Add csv data to each column in a list of columns :param dict table: Table metadata :param str crumbs: Hierarchy crumbs :param str pc: Paleo or Chron table type :return dict: Table metadata with csv "values" entry :return bool ensemble: Ensemble data or not ensemble data """ # Start putting CSV data into corresponding column "values" key try: ensemble = is_ensemble(table["columns"]) if ensemble: # realization columns if len(table["columns"]) == 1: for _name, _column in table["columns"].items(): _column["values"] = csvs # depth column + realization columns elif len(table["columns"]) == 2: _multi_column = False for _name, _column in table["columns"].items(): if isinstance(_column["number"], (int, float)): col_num = cast_int(_column["number"]) _column['values'] = csvs[col_num - 1] elif isinstance(_column["number"], list): if _multi_column: raise Exception("Error: merge_csv_column: This jsonld metadata looks wrong!\n" "\tAn ensemble table depth should not reference multiple columns of CSV data.\n" "\tPlease manually fix the ensemble columns in 'metadata.jsonld' inside of your LiPD file.") else: _multi_column = True _column["values"] = csvs[2:] else: for _name, _column in table['columns'].items(): col_num = cast_int(_column["number"]) _column['values'] = csvs[col_num - 1] except IndexError: logger_csvs.warning("merge_csv_column: IndexError: index out of range of csv_data list") except KeyError: logger_csvs.error("merge_csv_column: KeyError: missing columns key") except Exception as e: logger_csvs.error("merge_csv_column: Unknown Error: {}".format(e)) print("Quitting...") exit(1) # We want to keep one missing value ONLY at the table level. Remove MVs if they're still in column-level return table, ensemble
python
def _merge_csv_column(table, csvs): """ Add csv data to each column in a list of columns :param dict table: Table metadata :param str crumbs: Hierarchy crumbs :param str pc: Paleo or Chron table type :return dict: Table metadata with csv "values" entry :return bool ensemble: Ensemble data or not ensemble data """ # Start putting CSV data into corresponding column "values" key try: ensemble = is_ensemble(table["columns"]) if ensemble: # realization columns if len(table["columns"]) == 1: for _name, _column in table["columns"].items(): _column["values"] = csvs # depth column + realization columns elif len(table["columns"]) == 2: _multi_column = False for _name, _column in table["columns"].items(): if isinstance(_column["number"], (int, float)): col_num = cast_int(_column["number"]) _column['values'] = csvs[col_num - 1] elif isinstance(_column["number"], list): if _multi_column: raise Exception("Error: merge_csv_column: This jsonld metadata looks wrong!\n" "\tAn ensemble table depth should not reference multiple columns of CSV data.\n" "\tPlease manually fix the ensemble columns in 'metadata.jsonld' inside of your LiPD file.") else: _multi_column = True _column["values"] = csvs[2:] else: for _name, _column in table['columns'].items(): col_num = cast_int(_column["number"]) _column['values'] = csvs[col_num - 1] except IndexError: logger_csvs.warning("merge_csv_column: IndexError: index out of range of csv_data list") except KeyError: logger_csvs.error("merge_csv_column: KeyError: missing columns key") except Exception as e: logger_csvs.error("merge_csv_column: Unknown Error: {}".format(e)) print("Quitting...") exit(1) # We want to keep one missing value ONLY at the table level. Remove MVs if they're still in column-level return table, ensemble
[ "def", "_merge_csv_column", "(", "table", ",", "csvs", ")", ":", "# Start putting CSV data into corresponding column \"values\" key", "try", ":", "ensemble", "=", "is_ensemble", "(", "table", "[", "\"columns\"", "]", ")", "if", "ensemble", ":", "# realization columns", ...
Add csv data to each column in a list of columns :param dict table: Table metadata :param str crumbs: Hierarchy crumbs :param str pc: Paleo or Chron table type :return dict: Table metadata with csv "values" entry :return bool ensemble: Ensemble data or not ensemble data
[ "Add", "csv", "data", "to", "each", "column", "in", "a", "list", "of", "columns" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L140-L188
nickmckay/LiPD-utilities
Python/lipd/csvs.py
read_csv_from_file
def read_csv_from_file(filename): """ Opens the target CSV file and creates a dictionary with one list for each CSV column. :param str filename: :return list of lists: column values """ logger_csvs.info("enter read_csv_from_file") d = {} l = [] try: logger_csvs.info("open file: {}".format(filename)) with open(filename, 'r') as f: r = csv.reader(f, delimiter=',') # Create a dict with X lists corresponding to X columns for idx, col in enumerate(next(r)): d[idx] = [] d = cast_values_csvs(d, idx, col) # Start iter through CSV data for row in r: for idx, col in enumerate(row): # Append the cell to the correct column list d = cast_values_csvs(d, idx, col) # Make a list of lists out of the dictionary instead for idx, col in d.items(): l.append(col) except FileNotFoundError as e: print('CSV FileNotFound: ' + filename) logger_csvs.warn("read_csv_to_columns: FileNotFound: {}, {}".format(filename, e)) logger_csvs.info("exit read_csv_from_file") return l
python
def read_csv_from_file(filename): """ Opens the target CSV file and creates a dictionary with one list for each CSV column. :param str filename: :return list of lists: column values """ logger_csvs.info("enter read_csv_from_file") d = {} l = [] try: logger_csvs.info("open file: {}".format(filename)) with open(filename, 'r') as f: r = csv.reader(f, delimiter=',') # Create a dict with X lists corresponding to X columns for idx, col in enumerate(next(r)): d[idx] = [] d = cast_values_csvs(d, idx, col) # Start iter through CSV data for row in r: for idx, col in enumerate(row): # Append the cell to the correct column list d = cast_values_csvs(d, idx, col) # Make a list of lists out of the dictionary instead for idx, col in d.items(): l.append(col) except FileNotFoundError as e: print('CSV FileNotFound: ' + filename) logger_csvs.warn("read_csv_to_columns: FileNotFound: {}, {}".format(filename, e)) logger_csvs.info("exit read_csv_from_file") return l
[ "def", "read_csv_from_file", "(", "filename", ")", ":", "logger_csvs", ".", "info", "(", "\"enter read_csv_from_file\"", ")", "d", "=", "{", "}", "l", "=", "[", "]", "try", ":", "logger_csvs", ".", "info", "(", "\"open file: {}\"", ".", "format", "(", "fil...
Opens the target CSV file and creates a dictionary with one list for each CSV column. :param str filename: :return list of lists: column values
[ "Opens", "the", "target", "CSV", "file", "and", "creates", "a", "dictionary", "with", "one", "list", "for", "each", "CSV", "column", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L208-L241
nickmckay/LiPD-utilities
Python/lipd/csvs.py
write_csv_to_file
def write_csv_to_file(d): """ Writes columns of data to a target CSV file. :param dict d: A dictionary containing one list for every data column. Keys: int, Values: list :return None: """ logger_csvs.info("enter write_csv_to_file") try: for filename, data in d.items(): try: l_columns = _reorder_csv(data, filename) rows = zip(*l_columns) with open(filename, 'w+') as f: w = csv.writer(f) for row in rows: row2 = decimal_precision(row) w.writerow(row2) except TypeError as e: print("Error: Unable to write values to CSV file, {}:\n" "(1) The data table may have 2 or more identical variables. Please correct the LiPD file manually\n" "(2) There may have been an error trying to prep the values for file write. The 'number' field in the data columns may be a 'string' instead of an 'integer' data type".format(filename)) print(e) except Exception as e: print("Error: CSV file not written, {}, {}:\n" "The data table may have 2 or more identical variables. Please correct the LiPD file manually".format(filename, e)) except AttributeError as e: logger_csvs.error("write_csv_to_file: Unable to write CSV File: {}".format(e, exc_info=True)) logger_csvs.info("exit write_csv_to_file") return
python
def write_csv_to_file(d): """ Writes columns of data to a target CSV file. :param dict d: A dictionary containing one list for every data column. Keys: int, Values: list :return None: """ logger_csvs.info("enter write_csv_to_file") try: for filename, data in d.items(): try: l_columns = _reorder_csv(data, filename) rows = zip(*l_columns) with open(filename, 'w+') as f: w = csv.writer(f) for row in rows: row2 = decimal_precision(row) w.writerow(row2) except TypeError as e: print("Error: Unable to write values to CSV file, {}:\n" "(1) The data table may have 2 or more identical variables. Please correct the LiPD file manually\n" "(2) There may have been an error trying to prep the values for file write. The 'number' field in the data columns may be a 'string' instead of an 'integer' data type".format(filename)) print(e) except Exception as e: print("Error: CSV file not written, {}, {}:\n" "The data table may have 2 or more identical variables. Please correct the LiPD file manually".format(filename, e)) except AttributeError as e: logger_csvs.error("write_csv_to_file: Unable to write CSV File: {}".format(e, exc_info=True)) logger_csvs.info("exit write_csv_to_file") return
[ "def", "write_csv_to_file", "(", "d", ")", ":", "logger_csvs", ".", "info", "(", "\"enter write_csv_to_file\"", ")", "try", ":", "for", "filename", ",", "data", "in", "d", ".", "items", "(", ")", ":", "try", ":", "l_columns", "=", "_reorder_csv", "(", "d...
Writes columns of data to a target CSV file. :param dict d: A dictionary containing one list for every data column. Keys: int, Values: list :return None:
[ "Writes", "columns", "of", "data", "to", "a", "target", "CSV", "file", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L247-L277
nickmckay/LiPD-utilities
Python/lipd/csvs.py
get_csv_from_metadata
def get_csv_from_metadata(dsn, d): """ Two goals. Get all csv from metadata, and return new metadata with generated filenames to match files. :param str dsn: Dataset name :param dict d: Metadata :return dict _csvs: Csv """ logger_csvs.info("enter get_csv_from_metadata") _csvs = OrderedDict() _d = copy.deepcopy(d) try: if "paleoData" in _d: # Process paleoData section _d["paleoData"], _csvs = _get_csv_from_section(_d["paleoData"], "{}.paleo".format(dsn), _csvs) if "chronData" in _d: _d["chronData"], _csvs = _get_csv_from_section(_d["chronData"], "{}.chron".format(dsn), _csvs) except Exception as e: print("Error: get_csv_from_metadata: {}, {}".format(dsn, e)) logger_csvs.error("get_csv_from_metadata: {}, {}".format(dsn, e)) logger_csvs.info("exit get_csv_from_metadata") return _d, _csvs
python
def get_csv_from_metadata(dsn, d): """ Two goals. Get all csv from metadata, and return new metadata with generated filenames to match files. :param str dsn: Dataset name :param dict d: Metadata :return dict _csvs: Csv """ logger_csvs.info("enter get_csv_from_metadata") _csvs = OrderedDict() _d = copy.deepcopy(d) try: if "paleoData" in _d: # Process paleoData section _d["paleoData"], _csvs = _get_csv_from_section(_d["paleoData"], "{}.paleo".format(dsn), _csvs) if "chronData" in _d: _d["chronData"], _csvs = _get_csv_from_section(_d["chronData"], "{}.chron".format(dsn), _csvs) except Exception as e: print("Error: get_csv_from_metadata: {}, {}".format(dsn, e)) logger_csvs.error("get_csv_from_metadata: {}, {}".format(dsn, e)) logger_csvs.info("exit get_csv_from_metadata") return _d, _csvs
[ "def", "get_csv_from_metadata", "(", "dsn", ",", "d", ")", ":", "logger_csvs", ".", "info", "(", "\"enter get_csv_from_metadata\"", ")", "_csvs", "=", "OrderedDict", "(", ")", "_d", "=", "copy", ".", "deepcopy", "(", "d", ")", "try", ":", "if", "\"paleoDat...
Two goals. Get all csv from metadata, and return new metadata with generated filenames to match files. :param str dsn: Dataset name :param dict d: Metadata :return dict _csvs: Csv
[ "Two", "goals", ".", "Get", "all", "csv", "from", "metadata", "and", "return", "new", "metadata", "with", "generated", "filenames", "to", "match", "files", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L283-L308
nickmckay/LiPD-utilities
Python/lipd/csvs.py
_get_csv_from_section
def _get_csv_from_section(sections, crumbs, csvs): """ Get table name, variable name, and column values from paleo metadata :param dict sections: Metadata :param str crumbs: Crumbs :param dict csvs: Csv :return dict sections: Metadata :return dict csvs: Csv """ logger_csvs.info("enter get_csv_from_section: {}".format(crumbs)) _idx = 0 try: # Process the tables in section for _name, _section in sections.items(): # Process each entry sub-table below if they exist if "measurementTable" in _section: sections[_name]["measurementTable"], csvs = _get_csv_from_table(_section["measurementTable"],"{}{}{}".format(crumbs, _idx, "measurement") , csvs) if "model" in _section: sections[_name]["model"], csvs = _get_csv_from_model(_section["model"], "{}{}{}".format(crumbs, _idx, "model") , csvs) _idx += 1 except Exception as e: logger_csvs.error("get_csv_from_section: {}, {}".format(crumbs, e)) print("Error: get_csv_from_section: {}, {}".format(crumbs, e)) logger_csvs.info("exit get_csv_from_section: {}".format(crumbs)) return sections, csvs
python
def _get_csv_from_section(sections, crumbs, csvs): """ Get table name, variable name, and column values from paleo metadata :param dict sections: Metadata :param str crumbs: Crumbs :param dict csvs: Csv :return dict sections: Metadata :return dict csvs: Csv """ logger_csvs.info("enter get_csv_from_section: {}".format(crumbs)) _idx = 0 try: # Process the tables in section for _name, _section in sections.items(): # Process each entry sub-table below if they exist if "measurementTable" in _section: sections[_name]["measurementTable"], csvs = _get_csv_from_table(_section["measurementTable"],"{}{}{}".format(crumbs, _idx, "measurement") , csvs) if "model" in _section: sections[_name]["model"], csvs = _get_csv_from_model(_section["model"], "{}{}{}".format(crumbs, _idx, "model") , csvs) _idx += 1 except Exception as e: logger_csvs.error("get_csv_from_section: {}, {}".format(crumbs, e)) print("Error: get_csv_from_section: {}, {}".format(crumbs, e)) logger_csvs.info("exit get_csv_from_section: {}".format(crumbs)) return sections, csvs
[ "def", "_get_csv_from_section", "(", "sections", ",", "crumbs", ",", "csvs", ")", ":", "logger_csvs", ".", "info", "(", "\"enter get_csv_from_section: {}\"", ".", "format", "(", "crumbs", ")", ")", "_idx", "=", "0", "try", ":", "# Process the tables in section", ...
Get table name, variable name, and column values from paleo metadata :param dict sections: Metadata :param str crumbs: Crumbs :param dict csvs: Csv :return dict sections: Metadata :return dict csvs: Csv
[ "Get", "table", "name", "variable", "name", "and", "column", "values", "from", "paleo", "metadata" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L311-L339
nickmckay/LiPD-utilities
Python/lipd/csvs.py
_get_csv_from_model
def _get_csv_from_model(models, crumbs, csvs): """ Get csv from model data :param dict models: Metadata :param str crumbs: Crumbs :param dict csvs: Csv :return dict models: Metadata :return dict csvs: Csv """ logger_csvs.info("enter get_csv_from_model: {}".format(crumbs)) _idx = 0 try: for _name, _model in models.items(): if "distributionTable" in _model: models[_name]["distributionTable"], csvs = _get_csv_from_table(_model["distributionTable"], "{}{}{}".format(crumbs, _idx, "distribution"), csvs) if "summaryTable" in _model: models[_name]["summaryTable"], csvs = _get_csv_from_table(_model["summaryTable"], "{}{}{}".format(crumbs, _idx, "summary"), csvs) if "ensembleTable" in _model: models[_name]["ensembleTable"], csvs = _get_csv_from_table(_model["ensembleTable"], "{}{}{}".format(crumbs, _idx, "ensemble"), csvs) _idx += 1 except Exception as e: print("Error: get_csv_from_model: {}, {}".format(crumbs, e)) logger_csvs.error("Error: get_csv_from_model: {}, {}".format(crumbs, e)) return models, csvs
python
def _get_csv_from_model(models, crumbs, csvs): """ Get csv from model data :param dict models: Metadata :param str crumbs: Crumbs :param dict csvs: Csv :return dict models: Metadata :return dict csvs: Csv """ logger_csvs.info("enter get_csv_from_model: {}".format(crumbs)) _idx = 0 try: for _name, _model in models.items(): if "distributionTable" in _model: models[_name]["distributionTable"], csvs = _get_csv_from_table(_model["distributionTable"], "{}{}{}".format(crumbs, _idx, "distribution"), csvs) if "summaryTable" in _model: models[_name]["summaryTable"], csvs = _get_csv_from_table(_model["summaryTable"], "{}{}{}".format(crumbs, _idx, "summary"), csvs) if "ensembleTable" in _model: models[_name]["ensembleTable"], csvs = _get_csv_from_table(_model["ensembleTable"], "{}{}{}".format(crumbs, _idx, "ensemble"), csvs) _idx += 1 except Exception as e: print("Error: get_csv_from_model: {}, {}".format(crumbs, e)) logger_csvs.error("Error: get_csv_from_model: {}, {}".format(crumbs, e)) return models, csvs
[ "def", "_get_csv_from_model", "(", "models", ",", "crumbs", ",", "csvs", ")", ":", "logger_csvs", ".", "info", "(", "\"enter get_csv_from_model: {}\"", ".", "format", "(", "crumbs", ")", ")", "_idx", "=", "0", "try", ":", "for", "_name", ",", "_model", "in...
Get csv from model data :param dict models: Metadata :param str crumbs: Crumbs :param dict csvs: Csv :return dict models: Metadata :return dict csvs: Csv
[ "Get", "csv", "from", "model", "data" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L342-L368
nickmckay/LiPD-utilities
Python/lipd/csvs.py
_get_csv_from_columns
def _get_csv_from_columns(table, filename, csvs): """ Search a data tables for column values. Return a dict of column values :param dict d: Table data :return dict: Column values. ref by var name """ csvs[filename] = OrderedDict() try: if "columns" in table: try: for _name, _column in table["columns"].items(): csvs[filename][_name] = {"number": _column["number"], "values": _column["values"]} except KeyError as ke: print("Error: get_csv_from_columns: {}, {}".format(filename, ke)) except Exception as e: print("Error: get_csv_from_columns: inner: {}, {}".format(filename, e)) logger_csvs.error("get_csv_from_columns: inner: {}, {}".format(filename, e)) except Exception as e: print("Error: get_csv_from_columns: {}, {}".format(filename, e)) logger_csvs.error("get_csv_from_columns: {}, {}".format(filename, e)) return csvs
python
def _get_csv_from_columns(table, filename, csvs): """ Search a data tables for column values. Return a dict of column values :param dict d: Table data :return dict: Column values. ref by var name """ csvs[filename] = OrderedDict() try: if "columns" in table: try: for _name, _column in table["columns"].items(): csvs[filename][_name] = {"number": _column["number"], "values": _column["values"]} except KeyError as ke: print("Error: get_csv_from_columns: {}, {}".format(filename, ke)) except Exception as e: print("Error: get_csv_from_columns: inner: {}, {}".format(filename, e)) logger_csvs.error("get_csv_from_columns: inner: {}, {}".format(filename, e)) except Exception as e: print("Error: get_csv_from_columns: {}, {}".format(filename, e)) logger_csvs.error("get_csv_from_columns: {}, {}".format(filename, e)) return csvs
[ "def", "_get_csv_from_columns", "(", "table", ",", "filename", ",", "csvs", ")", ":", "csvs", "[", "filename", "]", "=", "OrderedDict", "(", ")", "try", ":", "if", "\"columns\"", "in", "table", ":", "try", ":", "for", "_name", ",", "_column", "in", "ta...
Search a data tables for column values. Return a dict of column values :param dict d: Table data :return dict: Column values. ref by var name
[ "Search", "a", "data", "tables", "for", "column", "values", ".", "Return", "a", "dict", "of", "column", "values", ":", "param", "dict", "d", ":", "Table", "data", ":", "return", "dict", ":", "Column", "values", ".", "ref", "by", "var", "name" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L389-L410
nickmckay/LiPD-utilities
Python/lipd/csvs.py
_get_filename
def _get_filename(table): """ Get the filename from a data table. If it doesn't exist, create a new one based on table hierarchy in metadata file. format: <dataSetName>.<section><idx><table><idx>.csv example: ODP1098B.Chron1.ChronMeasurementTable.csv :param dict table: Table data :param str crumbs: Crumbs :return str filename: Filename """ try: filename = table["filename"] except KeyError: logger_csvs.info("get_filename: KeyError: missing filename for a table") print("Error: Missing filename for a table") filename = "" except Exception as e: logger_csvs.error("get_filename: {}".format(e)) filename = "" return filename
python
def _get_filename(table): """ Get the filename from a data table. If it doesn't exist, create a new one based on table hierarchy in metadata file. format: <dataSetName>.<section><idx><table><idx>.csv example: ODP1098B.Chron1.ChronMeasurementTable.csv :param dict table: Table data :param str crumbs: Crumbs :return str filename: Filename """ try: filename = table["filename"] except KeyError: logger_csvs.info("get_filename: KeyError: missing filename for a table") print("Error: Missing filename for a table") filename = "" except Exception as e: logger_csvs.error("get_filename: {}".format(e)) filename = "" return filename
[ "def", "_get_filename", "(", "table", ")", ":", "try", ":", "filename", "=", "table", "[", "\"filename\"", "]", "except", "KeyError", ":", "logger_csvs", ".", "info", "(", "\"get_filename: KeyError: missing filename for a table\"", ")", "print", "(", "\"Error: Missi...
Get the filename from a data table. If it doesn't exist, create a new one based on table hierarchy in metadata file. format: <dataSetName>.<section><idx><table><idx>.csv example: ODP1098B.Chron1.ChronMeasurementTable.csv :param dict table: Table data :param str crumbs: Crumbs :return str filename: Filename
[ "Get", "the", "filename", "from", "a", "data", "table", ".", "If", "it", "doesn", "t", "exist", "create", "a", "new", "one", "based", "on", "table", "hierarchy", "in", "metadata", "file", ".", "format", ":", "<dataSetName", ">", ".", "<section", ">", "...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L413-L432
nickmckay/LiPD-utilities
Python/lipd/csvs.py
_reorder_csv
def _reorder_csv(d, filename=""): """ Preserve the csv column ordering before writing back out to CSV file. Keep column data consistent with JSONLD column number alignment. { "var1" : {"number": 1, "values": [] }, "var2": {"number": 1, "values": [] } } :param dict d: csv data :param str filename: Filename :return dict: csv data """ _ensemble = is_ensemble(d) _d2 = [] try: if _ensemble: # 1 column ensemble: realizations if len(d) == 1: for var, data in d.items(): if "values" in data: _d2 = data["values"] # 2 column ensemble: depth and realizations else: _count = 0 # count up how many columns total, and how many placeholders to make in our list for var, data in d.items(): if isinstance(data["number"], list): _curr_count = len(data["number"]) _count += _curr_count elif isinstance(data["number"], (int, float, str)): _count += 1 # make a list with X number of placeholders _d2 = [None for i in range(0, _count)] # Loop again and start combining all columns into one list of lists for var, data in d.items(): # realizations: insert at (hopefully) index 1,2...1001 if isinstance(data["number"], list): for idx, number in enumerate(data["number"]): # we can't trust the number entries. sometimes they start at "number 1", # which isn't true, because DEPTH is number 1. Use enumerate index instead. _insert_at = int(idx) + 1 # Insert at one above the index. Grab values at exact index _d2[_insert_at] = data["values"][idx-1] # depth column: insert at (hopefully) index 0 else: # we can trust to use the number entry as an index placement _insert_at = int(data["number"]) - 1 # insert at one below number, to compensate for 0-index _d2[_insert_at] = data["values"] else: _count = len(d) _d2 = [None for i in range(0, _count)] for key, data in d.items(): _insert_at = int(data["number"]) - 1 _d2[_insert_at] = data["values"] except Exception as e: print("Error: Unable to write CSV: There was an error trying to prep the values for file write: {}".format(e)) logger_csvs.error("reorder_csvs: Unable to write CSV file: {}, {}".format(filename, e)) return _d2
python
def _reorder_csv(d, filename=""): """ Preserve the csv column ordering before writing back out to CSV file. Keep column data consistent with JSONLD column number alignment. { "var1" : {"number": 1, "values": [] }, "var2": {"number": 1, "values": [] } } :param dict d: csv data :param str filename: Filename :return dict: csv data """ _ensemble = is_ensemble(d) _d2 = [] try: if _ensemble: # 1 column ensemble: realizations if len(d) == 1: for var, data in d.items(): if "values" in data: _d2 = data["values"] # 2 column ensemble: depth and realizations else: _count = 0 # count up how many columns total, and how many placeholders to make in our list for var, data in d.items(): if isinstance(data["number"], list): _curr_count = len(data["number"]) _count += _curr_count elif isinstance(data["number"], (int, float, str)): _count += 1 # make a list with X number of placeholders _d2 = [None for i in range(0, _count)] # Loop again and start combining all columns into one list of lists for var, data in d.items(): # realizations: insert at (hopefully) index 1,2...1001 if isinstance(data["number"], list): for idx, number in enumerate(data["number"]): # we can't trust the number entries. sometimes they start at "number 1", # which isn't true, because DEPTH is number 1. Use enumerate index instead. _insert_at = int(idx) + 1 # Insert at one above the index. Grab values at exact index _d2[_insert_at] = data["values"][idx-1] # depth column: insert at (hopefully) index 0 else: # we can trust to use the number entry as an index placement _insert_at = int(data["number"]) - 1 # insert at one below number, to compensate for 0-index _d2[_insert_at] = data["values"] else: _count = len(d) _d2 = [None for i in range(0, _count)] for key, data in d.items(): _insert_at = int(data["number"]) - 1 _d2[_insert_at] = data["values"] except Exception as e: print("Error: Unable to write CSV: There was an error trying to prep the values for file write: {}".format(e)) logger_csvs.error("reorder_csvs: Unable to write CSV file: {}, {}".format(filename, e)) return _d2
[ "def", "_reorder_csv", "(", "d", ",", "filename", "=", "\"\"", ")", ":", "_ensemble", "=", "is_ensemble", "(", "d", ")", "_d2", "=", "[", "]", "try", ":", "if", "_ensemble", ":", "# 1 column ensemble: realizations", "if", "len", "(", "d", ")", "==", "1...
Preserve the csv column ordering before writing back out to CSV file. Keep column data consistent with JSONLD column number alignment. { "var1" : {"number": 1, "values": [] }, "var2": {"number": 1, "values": [] } } :param dict d: csv data :param str filename: Filename :return dict: csv data
[ "Preserve", "the", "csv", "column", "ordering", "before", "writing", "back", "out", "to", "CSV", "file", ".", "Keep", "column", "data", "consistent", "with", "JSONLD", "column", "number", "alignment", ".", "{", "var1", ":", "{", "number", ":", "1", "values...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L470-L528
nickmckay/LiPD-utilities
Python/lipd/csvs.py
_is_numeric_data
def _is_numeric_data(ll): """ List of lists of csv values data :param list ll: :return bool: True, all lists are numeric lists, False, data contains at least one numeric list. """ for l in ll: try: if any(math.isnan(float(i)) or isinstance(i, str) for i in l): return False # if not all(isinstance(i, (int, float)) or math.isnan(float(i)) for i in l): # # There is an entry that is a non-numeric entry in this list # return False except ValueError: # Trying to case a str as a float didnt work, and we got an error return False # All arrays are 100% numeric or "nan" entries. return True
python
def _is_numeric_data(ll): """ List of lists of csv values data :param list ll: :return bool: True, all lists are numeric lists, False, data contains at least one numeric list. """ for l in ll: try: if any(math.isnan(float(i)) or isinstance(i, str) for i in l): return False # if not all(isinstance(i, (int, float)) or math.isnan(float(i)) for i in l): # # There is an entry that is a non-numeric entry in this list # return False except ValueError: # Trying to case a str as a float didnt work, and we got an error return False # All arrays are 100% numeric or "nan" entries. return True
[ "def", "_is_numeric_data", "(", "ll", ")", ":", "for", "l", "in", "ll", ":", "try", ":", "if", "any", "(", "math", ".", "isnan", "(", "float", "(", "i", ")", ")", "or", "isinstance", "(", "i", ",", "str", ")", "for", "i", "in", "l", ")", ":",...
List of lists of csv values data :param list ll: :return bool: True, all lists are numeric lists, False, data contains at least one numeric list.
[ "List", "of", "lists", "of", "csv", "values", "data", ":", "param", "list", "ll", ":", ":", "return", "bool", ":", "True", "all", "lists", "are", "numeric", "lists", "False", "data", "contains", "at", "least", "one", "numeric", "list", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L531-L548
nickmckay/LiPD-utilities
Python/lipd/csvs.py
_merge_ensemble
def _merge_ensemble(ensemble, col_nums, col_vals): """ The second column is not typical. "number" is a list of column numbers and "values" is an array of column values. Before we can write this to csv, it has to match the format the writer expects. :param dict ensemble: First column data :param list col_nums: Second column numbers. list of ints :param list col_vals: Second column values. list of lists :return dict: """ try: # Loop for each column available for num in col_nums: # first column number in col_nums is usually "2", so backtrack once since ensemble already has one entry # col_vals is 0-indexed, so backtrack 2 entries ensemble[num-1] = col_vals[num - 2] except IndexError: logger_csvs.error("merge_ensemble: IndexError: index out of range") return ensemble
python
def _merge_ensemble(ensemble, col_nums, col_vals): """ The second column is not typical. "number" is a list of column numbers and "values" is an array of column values. Before we can write this to csv, it has to match the format the writer expects. :param dict ensemble: First column data :param list col_nums: Second column numbers. list of ints :param list col_vals: Second column values. list of lists :return dict: """ try: # Loop for each column available for num in col_nums: # first column number in col_nums is usually "2", so backtrack once since ensemble already has one entry # col_vals is 0-indexed, so backtrack 2 entries ensemble[num-1] = col_vals[num - 2] except IndexError: logger_csvs.error("merge_ensemble: IndexError: index out of range") return ensemble
[ "def", "_merge_ensemble", "(", "ensemble", ",", "col_nums", ",", "col_vals", ")", ":", "try", ":", "# Loop for each column available", "for", "num", "in", "col_nums", ":", "# first column number in col_nums is usually \"2\", so backtrack once since ensemble already has one entry"...
The second column is not typical. "number" is a list of column numbers and "values" is an array of column values. Before we can write this to csv, it has to match the format the writer expects. :param dict ensemble: First column data :param list col_nums: Second column numbers. list of ints :param list col_vals: Second column values. list of lists :return dict:
[ "The", "second", "column", "is", "not", "typical", ".", "number", "is", "a", "list", "of", "column", "numbers", "and", "values", "is", "an", "array", "of", "column", "values", ".", "Before", "we", "can", "write", "this", "to", "csv", "it", "has", "to",...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/csvs.py#L551-L571
nickmckay/LiPD-utilities
Python/lipd/validator_api.py
get_validator_format
def get_validator_format(L): """ Format the LIPD data in the layout that the Lipd.net validator accepts. '_format' example: [ {"type": "csv", "filenameFull": /path/to/filename.csv, "data": "", ...}, {"type": "json", "filenameFull": /path/to/metadata.jsonld, "data": "", ...}, ... ] :param dict L: Metadata :return list _api_data: Data formatted for validator API """ _api_data = [] _j, _csvs = get_csv_from_metadata(L["dataSetName"], L) _j = rm_values_fields(copy.deepcopy(L)) _j = idx_name_to_num(_j) # All the filenames being processed _filenames = ["metadata.jsonld", "bagit.txt", "bag-info.txt", "manifest-md5.txt", "tagmanifest-md5.txt"]\ + [k for k,v in _csvs.items()] # Loop for each filename for filename in _filenames: # Create a blank template _file = {"type": "", "filenameFull": "", "filenameShort": "", "data": "", "pretty": ""} # filename, no path prefix # _short = os.path.basename(filename) _short = filename # Bagit files if filename.endswith(".txt"): _file = {"type": "bagit", "filenameFull": filename, "filenameShort": _short} # JSONLD files elif filename.endswith(".jsonld"): _file = {"type": "json", "filenameFull": filename, "filenameShort": _short, "data": _j} # CSV files elif filename.endswith(".csv"): _cols_rows = {"cols": 0, "rows": 0} ensemble = is_ensemble(_csvs[_short]) # special case for calculating ensemble rows and columns if ensemble: _cols_rows = get_ensemble_counts(_csvs[_short]) # all other non-ensemble csv files. else: _cols_rows["cols"] = len(_csvs[_short]) for k, v in _csvs[_short].items(): _cols_rows["rows"] = len(v["values"]) break # take what we've gathered for this file, and add it to the list. _file = {"type": "csv", "filenameFull": filename, "filenameShort": _short, "data": _cols_rows} _api_data.append(_file) return _api_data
python
def get_validator_format(L): """ Format the LIPD data in the layout that the Lipd.net validator accepts. '_format' example: [ {"type": "csv", "filenameFull": /path/to/filename.csv, "data": "", ...}, {"type": "json", "filenameFull": /path/to/metadata.jsonld, "data": "", ...}, ... ] :param dict L: Metadata :return list _api_data: Data formatted for validator API """ _api_data = [] _j, _csvs = get_csv_from_metadata(L["dataSetName"], L) _j = rm_values_fields(copy.deepcopy(L)) _j = idx_name_to_num(_j) # All the filenames being processed _filenames = ["metadata.jsonld", "bagit.txt", "bag-info.txt", "manifest-md5.txt", "tagmanifest-md5.txt"]\ + [k for k,v in _csvs.items()] # Loop for each filename for filename in _filenames: # Create a blank template _file = {"type": "", "filenameFull": "", "filenameShort": "", "data": "", "pretty": ""} # filename, no path prefix # _short = os.path.basename(filename) _short = filename # Bagit files if filename.endswith(".txt"): _file = {"type": "bagit", "filenameFull": filename, "filenameShort": _short} # JSONLD files elif filename.endswith(".jsonld"): _file = {"type": "json", "filenameFull": filename, "filenameShort": _short, "data": _j} # CSV files elif filename.endswith(".csv"): _cols_rows = {"cols": 0, "rows": 0} ensemble = is_ensemble(_csvs[_short]) # special case for calculating ensemble rows and columns if ensemble: _cols_rows = get_ensemble_counts(_csvs[_short]) # all other non-ensemble csv files. else: _cols_rows["cols"] = len(_csvs[_short]) for k, v in _csvs[_short].items(): _cols_rows["rows"] = len(v["values"]) break # take what we've gathered for this file, and add it to the list. _file = {"type": "csv", "filenameFull": filename, "filenameShort": _short, "data": _cols_rows} _api_data.append(_file) return _api_data
[ "def", "get_validator_format", "(", "L", ")", ":", "_api_data", "=", "[", "]", "_j", ",", "_csvs", "=", "get_csv_from_metadata", "(", "L", "[", "\"dataSetName\"", "]", ",", "L", ")", "_j", "=", "rm_values_fields", "(", "copy", ".", "deepcopy", "(", "L", ...
Format the LIPD data in the layout that the Lipd.net validator accepts. '_format' example: [ {"type": "csv", "filenameFull": /path/to/filename.csv, "data": "", ...}, {"type": "json", "filenameFull": /path/to/metadata.jsonld, "data": "", ...}, ... ] :param dict L: Metadata :return list _api_data: Data formatted for validator API
[ "Format", "the", "LIPD", "data", "in", "the", "layout", "that", "the", "Lipd", ".", "net", "validator", "accepts", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/validator_api.py#L14-L73
nickmckay/LiPD-utilities
Python/lipd/validator_api.py
create_detailed_results
def create_detailed_results(result): """ Use the result from the API call to create an organized single string output for printing to the console. :param dict result: Results from API call for one file :return str string: Organized results for printing """ string = "" # Validation Response output # string += "VALIDATION RESPONSE\n" string += "STATUS: {}\n".format(result["status"]) if result["feedback"]: string += "WARNINGS: {}\n".format(len(result["feedback"]["wrnMsgs"])) # Loop through and output the Warnings for msg in result["feedback"]["wrnMsgs"]: string += "- {}\n".format(msg) string += "ERRORS: {}\n".format(len(result["feedback"]["errMsgs"])) # Loop through and output the Errors for msg in result["feedback"]["errMsgs"]: string += "- {}\n".format(msg) return string
python
def create_detailed_results(result): """ Use the result from the API call to create an organized single string output for printing to the console. :param dict result: Results from API call for one file :return str string: Organized results for printing """ string = "" # Validation Response output # string += "VALIDATION RESPONSE\n" string += "STATUS: {}\n".format(result["status"]) if result["feedback"]: string += "WARNINGS: {}\n".format(len(result["feedback"]["wrnMsgs"])) # Loop through and output the Warnings for msg in result["feedback"]["wrnMsgs"]: string += "- {}\n".format(msg) string += "ERRORS: {}\n".format(len(result["feedback"]["errMsgs"])) # Loop through and output the Errors for msg in result["feedback"]["errMsgs"]: string += "- {}\n".format(msg) return string
[ "def", "create_detailed_results", "(", "result", ")", ":", "string", "=", "\"\"", "# Validation Response output", "# string += \"VALIDATION RESPONSE\\n\"", "string", "+=", "\"STATUS: {}\\n\"", ".", "format", "(", "result", "[", "\"status\"", "]", ")", "if", "result", ...
Use the result from the API call to create an organized single string output for printing to the console. :param dict result: Results from API call for one file :return str string: Organized results for printing
[ "Use", "the", "result", "from", "the", "API", "call", "to", "create", "an", "organized", "single", "string", "output", "for", "printing", "to", "the", "console", "." ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/validator_api.py#L76-L96
nickmckay/LiPD-utilities
Python/lipd/validator_api.py
display_results
def display_results(results, detailed=False): """ Display the results from the validator in a brief or detailed output :param list results: API results, sorted by dataset name (multiple) :param bool detailed: Detailed results on or off :return none: """ # print("\nVALIDATOR RESULTS") # print("======================\n") if not detailed: print('FILENAME......................................... STATUS..........') for entry in results: try: if detailed: print("\n{}".format(entry["filename"])) print(create_detailed_results(entry)) else: print("{:<50}{}".format(entry["filename"], entry["status"])) except Exception as e: logger_validator_api.debug("display_results: Exception: {}".format(e)) print("Error: display_results: {}".format(e)) return
python
def display_results(results, detailed=False): """ Display the results from the validator in a brief or detailed output :param list results: API results, sorted by dataset name (multiple) :param bool detailed: Detailed results on or off :return none: """ # print("\nVALIDATOR RESULTS") # print("======================\n") if not detailed: print('FILENAME......................................... STATUS..........') for entry in results: try: if detailed: print("\n{}".format(entry["filename"])) print(create_detailed_results(entry)) else: print("{:<50}{}".format(entry["filename"], entry["status"])) except Exception as e: logger_validator_api.debug("display_results: Exception: {}".format(e)) print("Error: display_results: {}".format(e)) return
[ "def", "display_results", "(", "results", ",", "detailed", "=", "False", ")", ":", "# print(\"\\nVALIDATOR RESULTS\")", "# print(\"======================\\n\")", "if", "not", "detailed", ":", "print", "(", "'FILENAME......................................... STATUS..........'", ...
Display the results from the validator in a brief or detailed output :param list results: API results, sorted by dataset name (multiple) :param bool detailed: Detailed results on or off :return none:
[ "Display", "the", "results", "from", "the", "validator", "in", "a", "brief", "or", "detailed", "output" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/validator_api.py#L99-L124
nickmckay/LiPD-utilities
Python/lipd/validator_api.py
call_validator_api
def call_validator_api(dsn, api_data): """ Single call to the lipd.net validator API 'api_data' format: [ {"type": "csv", "filenameFull": /path/to/filename.csv, "data": "", ...}, {"type": "json", "filenameFull": /path/to/metadata.jsonld, "data": "", ...}, ... ] Result format: {"dat": <dict>, "feedback": <dict>, "filename": "", "status": ""} :param str dsn: Dataset name :param list api_data: Prepared payload for one LiPD dataset. All the sorted files (txt, jsonld, csv), API formatted :return list result: Validator result for one file """ _filename = dsn + ".lpd" try: # Contact server and send LiPD metadata as the payload # print("Sending request to LiPD.net validator...\n") api_data = json.dumps(api_data) # The payload that is going to be sent with the JSON request payload = {'json_payload': api_data, 'apikey': 'lipd_linked'} # Development Link # response = requests.post('http://localhost:3000/api/validator', data=payload) # Production Link response = requests.post('http://www.lipd.net/api/validator', data=payload) if response.status_code == 413: result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "HTTP 413: Request Entity Too Large"} elif response.status_code == 404: result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "HTTP 404: Not Found"} elif response.status_code == 400: result = {"dat": {}, "feedback": {}, "filename": _filename, "status": response.text} # For an example of the JSON Response, reference the "sample_data_response" below # Convert JSON string into a Python dictionary # print("Converting response to json...\n") else: result = json.loads(response.text) except TypeError as e: logger_validator_api.warning("get_validator_results: TypeError: {}".format(e)) result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "JSON DECODE ERROR"} except requests.exceptions.ConnectionError as e: logger_validator_api.warning("get_validator_results: ConnectionError: {}".format(e)) result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "UNABLE TO REACH SERVER"} except Exception as e: logger_validator_api.debug("get_validator_results: Exception: {}".format(e)) result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "ERROR BEFORE VALIDATION, {}".format(e)} if not result: result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "EMPTY RESPONSE"} result["filename"] = _filename return result
python
def call_validator_api(dsn, api_data): """ Single call to the lipd.net validator API 'api_data' format: [ {"type": "csv", "filenameFull": /path/to/filename.csv, "data": "", ...}, {"type": "json", "filenameFull": /path/to/metadata.jsonld, "data": "", ...}, ... ] Result format: {"dat": <dict>, "feedback": <dict>, "filename": "", "status": ""} :param str dsn: Dataset name :param list api_data: Prepared payload for one LiPD dataset. All the sorted files (txt, jsonld, csv), API formatted :return list result: Validator result for one file """ _filename = dsn + ".lpd" try: # Contact server and send LiPD metadata as the payload # print("Sending request to LiPD.net validator...\n") api_data = json.dumps(api_data) # The payload that is going to be sent with the JSON request payload = {'json_payload': api_data, 'apikey': 'lipd_linked'} # Development Link # response = requests.post('http://localhost:3000/api/validator', data=payload) # Production Link response = requests.post('http://www.lipd.net/api/validator', data=payload) if response.status_code == 413: result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "HTTP 413: Request Entity Too Large"} elif response.status_code == 404: result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "HTTP 404: Not Found"} elif response.status_code == 400: result = {"dat": {}, "feedback": {}, "filename": _filename, "status": response.text} # For an example of the JSON Response, reference the "sample_data_response" below # Convert JSON string into a Python dictionary # print("Converting response to json...\n") else: result = json.loads(response.text) except TypeError as e: logger_validator_api.warning("get_validator_results: TypeError: {}".format(e)) result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "JSON DECODE ERROR"} except requests.exceptions.ConnectionError as e: logger_validator_api.warning("get_validator_results: ConnectionError: {}".format(e)) result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "UNABLE TO REACH SERVER"} except Exception as e: logger_validator_api.debug("get_validator_results: Exception: {}".format(e)) result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "ERROR BEFORE VALIDATION, {}".format(e)} if not result: result = {"dat": {}, "feedback": {}, "filename": _filename, "status": "EMPTY RESPONSE"} result["filename"] = _filename return result
[ "def", "call_validator_api", "(", "dsn", ",", "api_data", ")", ":", "_filename", "=", "dsn", "+", "\".lpd\"", "try", ":", "# Contact server and send LiPD metadata as the payload", "# print(\"Sending request to LiPD.net validator...\\n\")", "api_data", "=", "json", ".", "dum...
Single call to the lipd.net validator API 'api_data' format: [ {"type": "csv", "filenameFull": /path/to/filename.csv, "data": "", ...}, {"type": "json", "filenameFull": /path/to/metadata.jsonld, "data": "", ...}, ... ] Result format: {"dat": <dict>, "feedback": <dict>, "filename": "", "status": ""} :param str dsn: Dataset name :param list api_data: Prepared payload for one LiPD dataset. All the sorted files (txt, jsonld, csv), API formatted :return list result: Validator result for one file
[ "Single", "call", "to", "the", "lipd", ".", "net", "validator", "API" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/validator_api.py#L127-L190
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/filter_common.py
procces_filters
def procces_filters(all_needs, current_needlist): """ Filters all needs with given configuration :param current_needlist: needlist object, which stores all filters :param all_needs: List of all needs inside document :return: list of needs, which passed the filters """ if current_needlist["sort_by"] is not None: if current_needlist["sort_by"] == "id": all_needs = sorted(all_needs, key=lambda node: node["id"]) elif current_needlist["sort_by"] == "status": all_needs = sorted(all_needs, key=status_sorter) found_needs_by_options = [] # Add all need_parts of given needs to the search list all_needs_incl_parts = prepare_need_list(all_needs) for need_info in all_needs_incl_parts: status_filter_passed = False if current_needlist["status"] is None or len(current_needlist["status"]) == 0: # Filtering for status was not requested status_filter_passed = True elif need_info["status"] is not None and need_info["status"] in current_needlist["status"]: # Match was found status_filter_passed = True tags_filter_passed = False if len(set(need_info["tags"]) & set(current_needlist["tags"])) > 0 or len(current_needlist["tags"]) == 0: tags_filter_passed = True type_filter_passed = False if need_info["type"] in current_needlist["types"] \ or need_info["type_name"] in current_needlist["types"] \ or len(current_needlist["types"]) == 0: type_filter_passed = True if status_filter_passed and tags_filter_passed and type_filter_passed: found_needs_by_options.append(need_info) found_needs_by_string = filter_needs(all_needs_incl_parts, current_needlist["filter"]) # found_needs = [x for x in found_needs_by_string if x in found_needs_by_options] found_needs = check_need_list(found_needs_by_options, found_needs_by_string) return found_needs
python
def procces_filters(all_needs, current_needlist): """ Filters all needs with given configuration :param current_needlist: needlist object, which stores all filters :param all_needs: List of all needs inside document :return: list of needs, which passed the filters """ if current_needlist["sort_by"] is not None: if current_needlist["sort_by"] == "id": all_needs = sorted(all_needs, key=lambda node: node["id"]) elif current_needlist["sort_by"] == "status": all_needs = sorted(all_needs, key=status_sorter) found_needs_by_options = [] # Add all need_parts of given needs to the search list all_needs_incl_parts = prepare_need_list(all_needs) for need_info in all_needs_incl_parts: status_filter_passed = False if current_needlist["status"] is None or len(current_needlist["status"]) == 0: # Filtering for status was not requested status_filter_passed = True elif need_info["status"] is not None and need_info["status"] in current_needlist["status"]: # Match was found status_filter_passed = True tags_filter_passed = False if len(set(need_info["tags"]) & set(current_needlist["tags"])) > 0 or len(current_needlist["tags"]) == 0: tags_filter_passed = True type_filter_passed = False if need_info["type"] in current_needlist["types"] \ or need_info["type_name"] in current_needlist["types"] \ or len(current_needlist["types"]) == 0: type_filter_passed = True if status_filter_passed and tags_filter_passed and type_filter_passed: found_needs_by_options.append(need_info) found_needs_by_string = filter_needs(all_needs_incl_parts, current_needlist["filter"]) # found_needs = [x for x in found_needs_by_string if x in found_needs_by_options] found_needs = check_need_list(found_needs_by_options, found_needs_by_string) return found_needs
[ "def", "procces_filters", "(", "all_needs", ",", "current_needlist", ")", ":", "if", "current_needlist", "[", "\"sort_by\"", "]", "is", "not", "None", ":", "if", "current_needlist", "[", "\"sort_by\"", "]", "==", "\"id\"", ":", "all_needs", "=", "sorted", "(",...
Filters all needs with given configuration :param current_needlist: needlist object, which stores all filters :param all_needs: List of all needs inside document :return: list of needs, which passed the filters
[ "Filters", "all", "needs", "with", "given", "configuration" ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/filter_common.py#L68-L117
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/filter_common.py
filter_needs
def filter_needs(needs, filter_string="", filter_parts=True, merge_part_with_parent=True): """ Filters given needs based on a given filter string. Returns all needs, which pass the given filter. :param merge_part_with_parent: If True, need_parts inherit options from their parent need :param filter_parts: If True, need_parts get also filtered :param filter_string: strings, which gets evaluated against each need :param needs: list of needs, which shall be filtered :return: """ if filter_string is None or filter_string == "": return needs found_needs = [] for filter_need in needs: try: if filter_single_need(filter_need, filter_string): found_needs.append(filter_need) except Exception as e: logger.warning("Filter {0} not valid: Error: {1}".format(filter_string, e)) return found_needs
python
def filter_needs(needs, filter_string="", filter_parts=True, merge_part_with_parent=True): """ Filters given needs based on a given filter string. Returns all needs, which pass the given filter. :param merge_part_with_parent: If True, need_parts inherit options from their parent need :param filter_parts: If True, need_parts get also filtered :param filter_string: strings, which gets evaluated against each need :param needs: list of needs, which shall be filtered :return: """ if filter_string is None or filter_string == "": return needs found_needs = [] for filter_need in needs: try: if filter_single_need(filter_need, filter_string): found_needs.append(filter_need) except Exception as e: logger.warning("Filter {0} not valid: Error: {1}".format(filter_string, e)) return found_needs
[ "def", "filter_needs", "(", "needs", ",", "filter_string", "=", "\"\"", ",", "filter_parts", "=", "True", ",", "merge_part_with_parent", "=", "True", ")", ":", "if", "filter_string", "is", "None", "or", "filter_string", "==", "\"\"", ":", "return", "needs", ...
Filters given needs based on a given filter string. Returns all needs, which pass the given filter. :param merge_part_with_parent: If True, need_parts inherit options from their parent need :param filter_parts: If True, need_parts get also filtered :param filter_string: strings, which gets evaluated against each need :param needs: list of needs, which shall be filtered :return:
[ "Filters", "given", "needs", "based", "on", "a", "given", "filter", "string", ".", "Returns", "all", "needs", "which", "pass", "the", "given", "filter", "." ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/filter_common.py#L160-L184
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/filter_common.py
filter_single_need
def filter_single_need(need, filter_string=""): """ Checks if a single need/need_part passes a filter_string :param need: need or need_part :param filter_string: string, which is used as input for eval() :return: True, if need as passed the filter_string, else False """ filter_context = need.copy() filter_context["search"] = re.search result = False try: result = bool(eval(filter_string, None, filter_context)) except Exception as e: raise NeedInvalidFilter("Filter {0} not valid: Error: {1}".format(filter_string, e)) return result
python
def filter_single_need(need, filter_string=""): """ Checks if a single need/need_part passes a filter_string :param need: need or need_part :param filter_string: string, which is used as input for eval() :return: True, if need as passed the filter_string, else False """ filter_context = need.copy() filter_context["search"] = re.search result = False try: result = bool(eval(filter_string, None, filter_context)) except Exception as e: raise NeedInvalidFilter("Filter {0} not valid: Error: {1}".format(filter_string, e)) return result
[ "def", "filter_single_need", "(", "need", ",", "filter_string", "=", "\"\"", ")", ":", "filter_context", "=", "need", ".", "copy", "(", ")", "filter_context", "[", "\"search\"", "]", "=", "re", ".", "search", "result", "=", "False", "try", ":", "result", ...
Checks if a single need/need_part passes a filter_string :param need: need or need_part :param filter_string: string, which is used as input for eval() :return: True, if need as passed the filter_string, else False
[ "Checks", "if", "a", "single", "need", "/", "need_part", "passes", "a", "filter_string" ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/filter_common.py#L187-L202
sijis/sumologic-python
src/sumologic/utils.py
get_logging_level
def get_logging_level(debug): """Returns logging level based on boolean""" level = logging.INFO if debug: level = logging.DEBUG return level
python
def get_logging_level(debug): """Returns logging level based on boolean""" level = logging.INFO if debug: level = logging.DEBUG return level
[ "def", "get_logging_level", "(", "debug", ")", ":", "level", "=", "logging", ".", "INFO", "if", "debug", ":", "level", "=", "logging", ".", "DEBUG", "return", "level" ]
Returns logging level based on boolean
[ "Returns", "logging", "level", "based", "on", "boolean" ]
train
https://github.com/sijis/sumologic-python/blob/b50200907837f0d452d14ead5e647b8e24e2e9e5/src/sumologic/utils.py#L4-L9
mfussenegger/cr8
cr8/run_spec.py
run_spec
def run_spec(spec, benchmark_hosts, result_hosts=None, output_fmt=None, logfile_info=None, logfile_result=None, action=None, fail_if=None, sample_mode='reservoir'): """Run a spec file, executing the statements on the benchmark_hosts. Short example of a spec file: [setup] statement_files = ["sql/create_table.sql"] [[setup.data_files]] target = "t" source = "data/t.json" [[queries]] statement = "select count(*) from t" iterations = 2000 concurrency = 10 [teardown] statements = ["drop table t"] See https://github.com/mfussenegger/cr8/tree/master/specs for more examples. Args: spec: path to a spec file benchmark_hosts: hostname[:port] pairs of Crate nodes result_hosts: optional hostname[:port] Crate node pairs into which the runtime statistics should be inserted. output_fmt: output format action: Optional action to execute. Default is to execute all actions - setup, queries and teardown. If present only the specified action will be executed. The argument can be provided multiple times to execute more than one action. fail-if: An expression that causes cr8 to exit with a failure if it evaluates to true. The expression can contain formatting expressions for: - runtime_stats - statement - meta - concurrency - bulk_size For example: --fail-if "{runtime_stats.mean} > 1.34" """ with Logger(output_fmt=output_fmt, logfile_info=logfile_info, logfile_result=logfile_result) as log: do_run_spec( spec=spec, benchmark_hosts=benchmark_hosts, log=log, result_hosts=result_hosts, action=action, fail_if=fail_if, sample_mode=sample_mode )
python
def run_spec(spec, benchmark_hosts, result_hosts=None, output_fmt=None, logfile_info=None, logfile_result=None, action=None, fail_if=None, sample_mode='reservoir'): """Run a spec file, executing the statements on the benchmark_hosts. Short example of a spec file: [setup] statement_files = ["sql/create_table.sql"] [[setup.data_files]] target = "t" source = "data/t.json" [[queries]] statement = "select count(*) from t" iterations = 2000 concurrency = 10 [teardown] statements = ["drop table t"] See https://github.com/mfussenegger/cr8/tree/master/specs for more examples. Args: spec: path to a spec file benchmark_hosts: hostname[:port] pairs of Crate nodes result_hosts: optional hostname[:port] Crate node pairs into which the runtime statistics should be inserted. output_fmt: output format action: Optional action to execute. Default is to execute all actions - setup, queries and teardown. If present only the specified action will be executed. The argument can be provided multiple times to execute more than one action. fail-if: An expression that causes cr8 to exit with a failure if it evaluates to true. The expression can contain formatting expressions for: - runtime_stats - statement - meta - concurrency - bulk_size For example: --fail-if "{runtime_stats.mean} > 1.34" """ with Logger(output_fmt=output_fmt, logfile_info=logfile_info, logfile_result=logfile_result) as log: do_run_spec( spec=spec, benchmark_hosts=benchmark_hosts, log=log, result_hosts=result_hosts, action=action, fail_if=fail_if, sample_mode=sample_mode )
[ "def", "run_spec", "(", "spec", ",", "benchmark_hosts", ",", "result_hosts", "=", "None", ",", "output_fmt", "=", "None", ",", "logfile_info", "=", "None", ",", "logfile_result", "=", "None", ",", "action", "=", "None", ",", "fail_if", "=", "None", ",", ...
Run a spec file, executing the statements on the benchmark_hosts. Short example of a spec file: [setup] statement_files = ["sql/create_table.sql"] [[setup.data_files]] target = "t" source = "data/t.json" [[queries]] statement = "select count(*) from t" iterations = 2000 concurrency = 10 [teardown] statements = ["drop table t"] See https://github.com/mfussenegger/cr8/tree/master/specs for more examples. Args: spec: path to a spec file benchmark_hosts: hostname[:port] pairs of Crate nodes result_hosts: optional hostname[:port] Crate node pairs into which the runtime statistics should be inserted. output_fmt: output format action: Optional action to execute. Default is to execute all actions - setup, queries and teardown. If present only the specified action will be executed. The argument can be provided multiple times to execute more than one action. fail-if: An expression that causes cr8 to exit with a failure if it evaluates to true. The expression can contain formatting expressions for: - runtime_stats - statement - meta - concurrency - bulk_size For example: --fail-if "{runtime_stats.mean} > 1.34"
[ "Run", "a", "spec", "file", "executing", "the", "statements", "on", "the", "benchmark_hosts", "." ]
train
https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_spec.py#L240-L304
nickmckay/LiPD-utilities
Python/lipd/download_lipd.py
download_from_url
def download_from_url(src_url, dst_dir): """ Use the given URL and destination to download and save a file :param str src_url: Direct URL to lipd file download :param str dst_path: Local path to download file to, including filename and ext. ex. /path/to/filename.lpd :return none: """ dst_path = "" if "MD982181" not in src_url: dsn = input("Please enter the dataset name for this file (Name.Location.Year) : ") dst_path = os.path.join(dst_dir, dsn + ".lpd") try: print("downloading file from url...") urllib.request.urlretrieve(src_url, dst_path) except Exception as e: print("Error: unable to download from url: {}".format(e)) else: print("Error: That file cannot be download due to server restrictions") return dst_path
python
def download_from_url(src_url, dst_dir): """ Use the given URL and destination to download and save a file :param str src_url: Direct URL to lipd file download :param str dst_path: Local path to download file to, including filename and ext. ex. /path/to/filename.lpd :return none: """ dst_path = "" if "MD982181" not in src_url: dsn = input("Please enter the dataset name for this file (Name.Location.Year) : ") dst_path = os.path.join(dst_dir, dsn + ".lpd") try: print("downloading file from url...") urllib.request.urlretrieve(src_url, dst_path) except Exception as e: print("Error: unable to download from url: {}".format(e)) else: print("Error: That file cannot be download due to server restrictions") return dst_path
[ "def", "download_from_url", "(", "src_url", ",", "dst_dir", ")", ":", "dst_path", "=", "\"\"", "if", "\"MD982181\"", "not", "in", "src_url", ":", "dsn", "=", "input", "(", "\"Please enter the dataset name for this file (Name.Location.Year) : \"", ")", "dst_path", "=",...
Use the given URL and destination to download and save a file :param str src_url: Direct URL to lipd file download :param str dst_path: Local path to download file to, including filename and ext. ex. /path/to/filename.lpd :return none:
[ "Use", "the", "given", "URL", "and", "destination", "to", "download", "and", "save", "a", "file", ":", "param", "str", "src_url", ":", "Direct", "URL", "to", "lipd", "file", "download", ":", "param", "str", "dst_path", ":", "Local", "path", "to", "downlo...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/download_lipd.py#L20-L38
nickmckay/LiPD-utilities
Python/lipd/doi_main.py
doi_main
def doi_main(files): """ Main function that controls the script. Take in directory containing the .lpd file(s). Loop for each file. :return None: """ logger_doi_main.info("enter doi_main") print("Found {0} {1} file(s)".format(str(len(files[".lpd"])), 'LiPD')) force = prompt_force() for file in files[".lpd"]: # Unzip file and get tmp directory path dir_tmp = create_tmp_dir() unzipper(file["filename_ext"], dir_tmp) # Force DOI update? if force: # Update file. Forcing updates for all files. print('processing: {}'.format(file["filename_ext"])) logger_doi_main.info("processing: {}".format(file["filename_ext"])) # dir: dir_root -> dir_tmp process_lpd(file["filename_no_ext"], dir_tmp) # dir: dir_tmp -> dir_root os.chdir(file["dir"]) # Zip the directory containing the updated files. Created in dir_root directory zipper(path_name_ext=file["filename_ext"], root_dir=dir_tmp, name=file["filename_no_ext"]) # Cleanup and remove tmp directory shutil.rmtree(dir_tmp) if not force: # Don't Update File. Flag found and we're not forcing updates. if resolved_flag(open_bag(os.path.join(dir_tmp, file["filename_no_ext"]))): print('skipping: {}'.format(file["filename_ext"])) logger_doi_main.info("skipping: {}".format(file["filename_ext"])) shutil.rmtree(dir_tmp) # Update File. No flag found and hasn't been processed before. else: print('processing: {}'.format(file["filename_ext"])) logger_doi_main.info("processing: {}".format(file["filename_ext"])) # dir: dir_root -> dir_tmp process_lpd(file["filename_no_ext"], dir_tmp) # dir: dir_tmp -> dir_root os.chdir(file["dir"]) # Zip the directory containing the updated files. Created in dir_root directory zipper(path_name_ext=file["filename_ext"], root_dir=dir_tmp, name=file["filename_no_ext"]) # Cleanup and remove tmp directory shutil.rmtree(dir_tmp) logger_doi_main.info("exit doi_main") print("Process Complete") return
python
def doi_main(files): """ Main function that controls the script. Take in directory containing the .lpd file(s). Loop for each file. :return None: """ logger_doi_main.info("enter doi_main") print("Found {0} {1} file(s)".format(str(len(files[".lpd"])), 'LiPD')) force = prompt_force() for file in files[".lpd"]: # Unzip file and get tmp directory path dir_tmp = create_tmp_dir() unzipper(file["filename_ext"], dir_tmp) # Force DOI update? if force: # Update file. Forcing updates for all files. print('processing: {}'.format(file["filename_ext"])) logger_doi_main.info("processing: {}".format(file["filename_ext"])) # dir: dir_root -> dir_tmp process_lpd(file["filename_no_ext"], dir_tmp) # dir: dir_tmp -> dir_root os.chdir(file["dir"]) # Zip the directory containing the updated files. Created in dir_root directory zipper(path_name_ext=file["filename_ext"], root_dir=dir_tmp, name=file["filename_no_ext"]) # Cleanup and remove tmp directory shutil.rmtree(dir_tmp) if not force: # Don't Update File. Flag found and we're not forcing updates. if resolved_flag(open_bag(os.path.join(dir_tmp, file["filename_no_ext"]))): print('skipping: {}'.format(file["filename_ext"])) logger_doi_main.info("skipping: {}".format(file["filename_ext"])) shutil.rmtree(dir_tmp) # Update File. No flag found and hasn't been processed before. else: print('processing: {}'.format(file["filename_ext"])) logger_doi_main.info("processing: {}".format(file["filename_ext"])) # dir: dir_root -> dir_tmp process_lpd(file["filename_no_ext"], dir_tmp) # dir: dir_tmp -> dir_root os.chdir(file["dir"]) # Zip the directory containing the updated files. Created in dir_root directory zipper(path_name_ext=file["filename_ext"], root_dir=dir_tmp, name=file["filename_no_ext"]) # Cleanup and remove tmp directory shutil.rmtree(dir_tmp) logger_doi_main.info("exit doi_main") print("Process Complete") return
[ "def", "doi_main", "(", "files", ")", ":", "logger_doi_main", ".", "info", "(", "\"enter doi_main\"", ")", "print", "(", "\"Found {0} {1} file(s)\"", ".", "format", "(", "str", "(", "len", "(", "files", "[", "\".lpd\"", "]", ")", ")", ",", "'LiPD'", ")", ...
Main function that controls the script. Take in directory containing the .lpd file(s). Loop for each file. :return None:
[ "Main", "function", "that", "controls", "the", "script", ".", "Take", "in", "directory", "containing", "the", ".", "lpd", "file", "(", "s", ")", ".", "Loop", "for", "each", "file", ".", ":", "return", "None", ":" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/doi_main.py#L11-L61
nickmckay/LiPD-utilities
Python/lipd/doi_main.py
process_lpd
def process_lpd(name, dir_tmp): """ Opens up json file, invokes doi_resolver, closes file, updates changelog, cleans directory, and makes new bag. :param str name: Name of current .lpd file :param str dir_tmp: Path to tmp directory :return none: """ logger_doi_main.info("enter process_lpd") dir_root = os.getcwd() dir_bag = os.path.join(dir_tmp, name) dir_data = os.path.join(dir_bag, 'data') # Navigate down to jLD file # dir : dir_root -> dir_data os.chdir(dir_data) # Open jld file and read in the contents. Execute DOI Resolver. jld_data = read_json_from_file(os.path.join(dir_data, name + '.jsonld')) # Overwrite data with new data jld_data = DOIResolver(dir_root, name, jld_data).main() # Open the jld file and overwrite the contents with the new data. write_json_to_file(jld_data) # Open changelog. timestamp it. Prompt user for short description of changes. Close and save # update_changelog() # Delete old bag files, and move files to bag root for re-bagging # dir : dir_data -> dir_bag dir_cleanup(dir_bag, dir_data) finish_bag(dir_bag) logger_doi_main.info("exit process_lpd") return
python
def process_lpd(name, dir_tmp): """ Opens up json file, invokes doi_resolver, closes file, updates changelog, cleans directory, and makes new bag. :param str name: Name of current .lpd file :param str dir_tmp: Path to tmp directory :return none: """ logger_doi_main.info("enter process_lpd") dir_root = os.getcwd() dir_bag = os.path.join(dir_tmp, name) dir_data = os.path.join(dir_bag, 'data') # Navigate down to jLD file # dir : dir_root -> dir_data os.chdir(dir_data) # Open jld file and read in the contents. Execute DOI Resolver. jld_data = read_json_from_file(os.path.join(dir_data, name + '.jsonld')) # Overwrite data with new data jld_data = DOIResolver(dir_root, name, jld_data).main() # Open the jld file and overwrite the contents with the new data. write_json_to_file(jld_data) # Open changelog. timestamp it. Prompt user for short description of changes. Close and save # update_changelog() # Delete old bag files, and move files to bag root for re-bagging # dir : dir_data -> dir_bag dir_cleanup(dir_bag, dir_data) finish_bag(dir_bag) logger_doi_main.info("exit process_lpd") return
[ "def", "process_lpd", "(", "name", ",", "dir_tmp", ")", ":", "logger_doi_main", ".", "info", "(", "\"enter process_lpd\"", ")", "dir_root", "=", "os", ".", "getcwd", "(", ")", "dir_bag", "=", "os", ".", "path", ".", "join", "(", "dir_tmp", ",", "name", ...
Opens up json file, invokes doi_resolver, closes file, updates changelog, cleans directory, and makes new bag. :param str name: Name of current .lpd file :param str dir_tmp: Path to tmp directory :return none:
[ "Opens", "up", "json", "file", "invokes", "doi_resolver", "closes", "file", "updates", "changelog", "cleans", "directory", "and", "makes", "new", "bag", ".", ":", "param", "str", "name", ":", "Name", "of", "current", ".", "lpd", "file", ":", "param", "str"...
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/doi_main.py#L64-L97
nickmckay/LiPD-utilities
Python/lipd/doi_main.py
prompt_force
def prompt_force(): """ Ask the user if they want to force update files that were previously resolved :return bool: response """ logger_doi_main.info("enter prompt_force") count = 0 print("Do you want to force updates for previously resolved files? (y/n)") while True: force = input("> ") try: if count == 2: return True elif force.lower() in ('y', 'yes'): return True elif force.lower() in ('n', 'no'): return False else: print("invalid response") except AttributeError as e: print("invalid response") logger_doi_main.warn("invalid response: {}, {}".format(force, e)) count += 1 logger_doi_main.info("force update: {}".format(force)) logger_doi_main.info("exit prompt_force") return True
python
def prompt_force(): """ Ask the user if they want to force update files that were previously resolved :return bool: response """ logger_doi_main.info("enter prompt_force") count = 0 print("Do you want to force updates for previously resolved files? (y/n)") while True: force = input("> ") try: if count == 2: return True elif force.lower() in ('y', 'yes'): return True elif force.lower() in ('n', 'no'): return False else: print("invalid response") except AttributeError as e: print("invalid response") logger_doi_main.warn("invalid response: {}, {}".format(force, e)) count += 1 logger_doi_main.info("force update: {}".format(force)) logger_doi_main.info("exit prompt_force") return True
[ "def", "prompt_force", "(", ")", ":", "logger_doi_main", ".", "info", "(", "\"enter prompt_force\"", ")", "count", "=", "0", "print", "(", "\"Do you want to force updates for previously resolved files? (y/n)\"", ")", "while", "True", ":", "force", "=", "input", "(", ...
Ask the user if they want to force update files that were previously resolved :return bool: response
[ "Ask", "the", "user", "if", "they", "want", "to", "force", "update", "files", "that", "were", "previously", "resolved", ":", "return", "bool", ":", "response" ]
train
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/doi_main.py#L100-L125
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/directives/needtable.py
process_needtables
def process_needtables(app, doctree, fromdocname): """ Replace all needtables nodes with a tale of filtered noded. :param app: :param doctree: :param fromdocname: :return: """ env = app.builder.env for node in doctree.traverse(Needtable): if not app.config.needs_include_needs: # Ok, this is really dirty. # If we replace a node, docutils checks, if it will not lose any attributes. # But this is here the case, because we are using the attribute "ids" of a node. # However, I do not understand, why losing an attribute is such a big deal, so we delete everything # before docutils claims about it. for att in ('ids', 'names', 'classes', 'dupnames'): node[att] = [] node.replace_self([]) continue id = node.attributes["ids"][0] current_needtable = env.need_all_needtables[id] all_needs = env.needs_all_needs if current_needtable["style"] == "" or current_needtable["style"].upper() not in ["TABLE", "DATATABLES"]: if app.config.needs_table_style == "": style = "DATATABLES" else: style = app.config.needs_table_style.upper() else: style = current_needtable["style"].upper() # Prepare table classes = ["NEEDS_{style}".format(style=style)] content = nodes.table(classes=classes) tgroup = nodes.tgroup() # Define Table column width # ToDo: Find a way to chosen to perfect width automatically. for col in current_needtable["columns"]: if col == "TITLE": tgroup += nodes.colspec(colwidth=15) else: tgroup += nodes.colspec(colwidth=5) node_columns = [] for col in current_needtable["columns"]: header_name = col.title() if col != "ID" else col header_name = header_name.replace("_", " ") node_columns.append(nodes.entry('', nodes.paragraph('', header_name))) tgroup += nodes.thead('', nodes.row( '', *node_columns)) tbody = nodes.tbody() tgroup += tbody content += tgroup all_needs = list(all_needs.values()) if current_needtable["sort_by"] is not None: if current_needtable["sort_by"] == "id": all_needs = sorted(all_needs, key=lambda node: node["id"]) elif current_needtable["sort_by"] == "status": all_needs = sorted(all_needs, key=status_sorter) # Perform filtering of needs found_needs = procces_filters(all_needs, current_needtable) for need_info in found_needs: temp_need = need_info.copy() if temp_need['is_need']: row = nodes.row(classes=['need']) prefix = '' else: row = nodes.row(classes=['need_part']) temp_need['id'] = temp_need['id_complete'] prefix = app.config.needs_part_prefix temp_need['title'] = temp_need['content'] for col in current_needtable["columns"]: if col == "ID": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, "id", make_ref=True, prefix=prefix) elif col == "TITLE": row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_need, "title", prefix=app.config.needs_part_prefix) elif col == "INCOMING": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, "links_back", ref_lookup=True) elif col == "OUTGOING": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, "links", ref_lookup=True) else: row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, col.lower()) tbody += row # Need part rows if current_needtable["show_parts"] and need_info['is_need']: for key, part in need_info["parts"].items(): row = nodes.row(classes=['need_part']) temp_part = part.copy() # The dict needs to be manipulated, so that row_col_maker() can be used temp_part['docname'] = need_info['docname'] for col in current_needtable["columns"]: if col == "ID": temp_part['id'] = '.'.join([need_info['id'], part['id']]) row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_part, "id", make_ref=True, prefix=app.config.needs_part_prefix) elif col == "TITLE": row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_part, "content", prefix=app.config.needs_part_prefix) elif col == "INCOMING": row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_part, "links_back", ref_lookup=True) else: row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_part, col.lower()) tbody += row if len(found_needs) == 0: content.append(no_needs_found_paragraph()) # add filter information to output if current_needtable["show_filters"]: content.append(used_filter_paragraph(current_needtable)) node.replace_self(content)
python
def process_needtables(app, doctree, fromdocname): """ Replace all needtables nodes with a tale of filtered noded. :param app: :param doctree: :param fromdocname: :return: """ env = app.builder.env for node in doctree.traverse(Needtable): if not app.config.needs_include_needs: # Ok, this is really dirty. # If we replace a node, docutils checks, if it will not lose any attributes. # But this is here the case, because we are using the attribute "ids" of a node. # However, I do not understand, why losing an attribute is such a big deal, so we delete everything # before docutils claims about it. for att in ('ids', 'names', 'classes', 'dupnames'): node[att] = [] node.replace_self([]) continue id = node.attributes["ids"][0] current_needtable = env.need_all_needtables[id] all_needs = env.needs_all_needs if current_needtable["style"] == "" or current_needtable["style"].upper() not in ["TABLE", "DATATABLES"]: if app.config.needs_table_style == "": style = "DATATABLES" else: style = app.config.needs_table_style.upper() else: style = current_needtable["style"].upper() # Prepare table classes = ["NEEDS_{style}".format(style=style)] content = nodes.table(classes=classes) tgroup = nodes.tgroup() # Define Table column width # ToDo: Find a way to chosen to perfect width automatically. for col in current_needtable["columns"]: if col == "TITLE": tgroup += nodes.colspec(colwidth=15) else: tgroup += nodes.colspec(colwidth=5) node_columns = [] for col in current_needtable["columns"]: header_name = col.title() if col != "ID" else col header_name = header_name.replace("_", " ") node_columns.append(nodes.entry('', nodes.paragraph('', header_name))) tgroup += nodes.thead('', nodes.row( '', *node_columns)) tbody = nodes.tbody() tgroup += tbody content += tgroup all_needs = list(all_needs.values()) if current_needtable["sort_by"] is not None: if current_needtable["sort_by"] == "id": all_needs = sorted(all_needs, key=lambda node: node["id"]) elif current_needtable["sort_by"] == "status": all_needs = sorted(all_needs, key=status_sorter) # Perform filtering of needs found_needs = procces_filters(all_needs, current_needtable) for need_info in found_needs: temp_need = need_info.copy() if temp_need['is_need']: row = nodes.row(classes=['need']) prefix = '' else: row = nodes.row(classes=['need_part']) temp_need['id'] = temp_need['id_complete'] prefix = app.config.needs_part_prefix temp_need['title'] = temp_need['content'] for col in current_needtable["columns"]: if col == "ID": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, "id", make_ref=True, prefix=prefix) elif col == "TITLE": row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_need, "title", prefix=app.config.needs_part_prefix) elif col == "INCOMING": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, "links_back", ref_lookup=True) elif col == "OUTGOING": row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, "links", ref_lookup=True) else: row += row_col_maker(app, fromdocname, env.needs_all_needs, temp_need, col.lower()) tbody += row # Need part rows if current_needtable["show_parts"] and need_info['is_need']: for key, part in need_info["parts"].items(): row = nodes.row(classes=['need_part']) temp_part = part.copy() # The dict needs to be manipulated, so that row_col_maker() can be used temp_part['docname'] = need_info['docname'] for col in current_needtable["columns"]: if col == "ID": temp_part['id'] = '.'.join([need_info['id'], part['id']]) row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_part, "id", make_ref=True, prefix=app.config.needs_part_prefix) elif col == "TITLE": row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_part, "content", prefix=app.config.needs_part_prefix) elif col == "INCOMING": row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_part, "links_back", ref_lookup=True) else: row += row_col_maker( app, fromdocname, env.needs_all_needs, temp_part, col.lower()) tbody += row if len(found_needs) == 0: content.append(no_needs_found_paragraph()) # add filter information to output if current_needtable["show_filters"]: content.append(used_filter_paragraph(current_needtable)) node.replace_self(content)
[ "def", "process_needtables", "(", "app", ",", "doctree", ",", "fromdocname", ")", ":", "env", "=", "app", ".", "builder", ".", "env", "for", "node", "in", "doctree", ".", "traverse", "(", "Needtable", ")", ":", "if", "not", "app", ".", "config", ".", ...
Replace all needtables nodes with a tale of filtered noded. :param app: :param doctree: :param fromdocname: :return:
[ "Replace", "all", "needtables", "nodes", "with", "a", "tale", "of", "filtered", "noded", "." ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/needtable.py#L76-L208
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/directives/need.py
get_sections
def get_sections(need_info): """Gets the hierarchy of the section nodes as a list starting at the section of the current need and then its parent sections""" sections = [] current_node = need_info['target_node'] while current_node: if isinstance(current_node, nodes.section): title = current_node.children[0].astext() # If using auto-section numbering, then Sphinx inserts # multiple non-breaking space unicode characters into the title # we'll replace those with a simple space to make them easier to # use in filters title = NON_BREAKING_SPACE.sub(' ', title) sections.append(title) current_node = getattr(current_node, 'parent', None) return sections
python
def get_sections(need_info): """Gets the hierarchy of the section nodes as a list starting at the section of the current need and then its parent sections""" sections = [] current_node = need_info['target_node'] while current_node: if isinstance(current_node, nodes.section): title = current_node.children[0].astext() # If using auto-section numbering, then Sphinx inserts # multiple non-breaking space unicode characters into the title # we'll replace those with a simple space to make them easier to # use in filters title = NON_BREAKING_SPACE.sub(' ', title) sections.append(title) current_node = getattr(current_node, 'parent', None) return sections
[ "def", "get_sections", "(", "need_info", ")", ":", "sections", "=", "[", "]", "current_node", "=", "need_info", "[", "'target_node'", "]", "while", "current_node", ":", "if", "isinstance", "(", "current_node", ",", "nodes", ".", "section", ")", ":", "title",...
Gets the hierarchy of the section nodes as a list starting at the section of the current need and then its parent sections
[ "Gets", "the", "hierarchy", "of", "the", "section", "nodes", "as", "a", "list", "starting", "at", "the", "section", "of", "the", "current", "need", "and", "then", "its", "parent", "sections" ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/need.py#L350-L365
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/directives/need.py
purge_needs
def purge_needs(app, env, docname): """ Gets executed, if a doc file needs to be purged/ read in again. So this code delete all found needs for the given docname. """ if not hasattr(env, 'needs_all_needs'): return env.needs_all_needs = {key: need for key, need in env.needs_all_needs.items() if need['docname'] != docname}
python
def purge_needs(app, env, docname): """ Gets executed, if a doc file needs to be purged/ read in again. So this code delete all found needs for the given docname. """ if not hasattr(env, 'needs_all_needs'): return env.needs_all_needs = {key: need for key, need in env.needs_all_needs.items() if need['docname'] != docname}
[ "def", "purge_needs", "(", "app", ",", "env", ",", "docname", ")", ":", "if", "not", "hasattr", "(", "env", ",", "'needs_all_needs'", ")", ":", "return", "env", ".", "needs_all_needs", "=", "{", "key", ":", "need", "for", "key", ",", "need", "in", "e...
Gets executed, if a doc file needs to be purged/ read in again. So this code delete all found needs for the given docname.
[ "Gets", "executed", "if", "a", "doc", "file", "needs", "to", "be", "purged", "/", "read", "in", "again", ".", "So", "this", "code", "delete", "all", "found", "needs", "for", "the", "given", "docname", "." ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/need.py#L368-L375
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/directives/need.py
add_sections
def add_sections(app, doctree, fromdocname): """Add section titles to the needs as additional attributes that can be used in tables and filters""" needs = getattr(app.builder.env, 'needs_all_needs', {}) for key, need_info in needs.items(): sections = get_sections(need_info) need_info['sections'] = sections need_info['section_name'] = sections[0] if sections else ""
python
def add_sections(app, doctree, fromdocname): """Add section titles to the needs as additional attributes that can be used in tables and filters""" needs = getattr(app.builder.env, 'needs_all_needs', {}) for key, need_info in needs.items(): sections = get_sections(need_info) need_info['sections'] = sections need_info['section_name'] = sections[0] if sections else ""
[ "def", "add_sections", "(", "app", ",", "doctree", ",", "fromdocname", ")", ":", "needs", "=", "getattr", "(", "app", ".", "builder", ".", "env", ",", "'needs_all_needs'", ",", "{", "}", ")", "for", "key", ",", "need_info", "in", "needs", ".", "items",...
Add section titles to the needs as additional attributes that can be used in tables and filters
[ "Add", "section", "titles", "to", "the", "needs", "as", "additional", "attributes", "that", "can", "be", "used", "in", "tables", "and", "filters" ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/need.py#L378-L385
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/directives/need.py
process_need_nodes
def process_need_nodes(app, doctree, fromdocname): """ Event handler to add title meta data (status, tags, links, ...) information to the Need node. :param app: :param doctree: :param fromdocname: :return: """ if not app.config.needs_include_needs: for node in doctree.traverse(Need): node.parent.remove(node) return env = app.builder.env # If no needs were defined, we do not need to do anything if not hasattr(env, "needs_all_needs"): return needs = env.needs_all_needs # Call dynamic functions and replace related note data with their return values resolve_dynamic_values(env) create_back_links(env) for node_need in doctree.traverse(Need): need_id = node_need.attributes["ids"][0] need_data = needs[need_id] find_and_replace_node_content(node_need, env, need_data) node_headline = construct_headline(need_data, app) node_meta = construct_meta(need_data, env) # Collapse check if need_data["collapse"] and "HTML" in app.builder.name.upper(): # HEADER node_need_toogle_container = nodes.container(classes=['toggle']) node_need_toogle_head_container = nodes.container(classes=['header']) node_need_toogle_head_container += node_headline.children node_need_toogle_container.append(node_need_toogle_head_container) # Only add node_meta(line_block), if it has lines in it # Otherwise the pdf/latex build will claim about an empty line_block. if node_meta.children: node_need_toogle_container.append(node_meta) node_need.insert(0, node_need_toogle_container) else: node_meta.insert(0, node_headline) node_need.insert(0, node_meta)
python
def process_need_nodes(app, doctree, fromdocname): """ Event handler to add title meta data (status, tags, links, ...) information to the Need node. :param app: :param doctree: :param fromdocname: :return: """ if not app.config.needs_include_needs: for node in doctree.traverse(Need): node.parent.remove(node) return env = app.builder.env # If no needs were defined, we do not need to do anything if not hasattr(env, "needs_all_needs"): return needs = env.needs_all_needs # Call dynamic functions and replace related note data with their return values resolve_dynamic_values(env) create_back_links(env) for node_need in doctree.traverse(Need): need_id = node_need.attributes["ids"][0] need_data = needs[need_id] find_and_replace_node_content(node_need, env, need_data) node_headline = construct_headline(need_data, app) node_meta = construct_meta(need_data, env) # Collapse check if need_data["collapse"] and "HTML" in app.builder.name.upper(): # HEADER node_need_toogle_container = nodes.container(classes=['toggle']) node_need_toogle_head_container = nodes.container(classes=['header']) node_need_toogle_head_container += node_headline.children node_need_toogle_container.append(node_need_toogle_head_container) # Only add node_meta(line_block), if it has lines in it # Otherwise the pdf/latex build will claim about an empty line_block. if node_meta.children: node_need_toogle_container.append(node_meta) node_need.insert(0, node_need_toogle_container) else: node_meta.insert(0, node_headline) node_need.insert(0, node_meta)
[ "def", "process_need_nodes", "(", "app", ",", "doctree", ",", "fromdocname", ")", ":", "if", "not", "app", ".", "config", ".", "needs_include_needs", ":", "for", "node", "in", "doctree", ".", "traverse", "(", "Need", ")", ":", "node", ".", "parent", ".",...
Event handler to add title meta data (status, tags, links, ...) information to the Need node. :param app: :param doctree: :param fromdocname: :return:
[ "Event", "handler", "to", "add", "title", "meta", "data", "(", "status", "tags", "links", "...", ")", "information", "to", "the", "Need", "node", "." ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/need.py#L388-L441
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/directives/need.py
create_back_links
def create_back_links(env): """ Create back-links in all found needs. But do this only once, as all needs are already collected and this sorting is for all needs and not only for the ones of the current document. :param env: sphinx enviroment :return: None """ if env.needs_workflow['backlink_creation']: return needs = env.needs_all_needs for key, need in needs.items(): for link in need["links"]: link_main = link.split('.')[0] try: link_part = link.split('.')[1] except IndexError: link_part = None if link_main in needs: if key not in needs[link_main]["links_back"]: needs[link_main]["links_back"].append(key) # Handling of links to need_parts inside a need if link_part is not None: if link_part in needs[link_main]['parts']: if 'links_back' not in needs[link_main]['parts'][link_part].keys(): needs[link_main]['parts'][link_part]['links_back'] = [] needs[link_main]['parts'][link_part]['links_back'].append(key) env.needs_workflow['backlink_creation'] = True
python
def create_back_links(env): """ Create back-links in all found needs. But do this only once, as all needs are already collected and this sorting is for all needs and not only for the ones of the current document. :param env: sphinx enviroment :return: None """ if env.needs_workflow['backlink_creation']: return needs = env.needs_all_needs for key, need in needs.items(): for link in need["links"]: link_main = link.split('.')[0] try: link_part = link.split('.')[1] except IndexError: link_part = None if link_main in needs: if key not in needs[link_main]["links_back"]: needs[link_main]["links_back"].append(key) # Handling of links to need_parts inside a need if link_part is not None: if link_part in needs[link_main]['parts']: if 'links_back' not in needs[link_main]['parts'][link_part].keys(): needs[link_main]['parts'][link_part]['links_back'] = [] needs[link_main]['parts'][link_part]['links_back'].append(key) env.needs_workflow['backlink_creation'] = True
[ "def", "create_back_links", "(", "env", ")", ":", "if", "env", ".", "needs_workflow", "[", "'backlink_creation'", "]", ":", "return", "needs", "=", "env", ".", "needs_all_needs", "for", "key", ",", "need", "in", "needs", ".", "items", "(", ")", ":", "for...
Create back-links in all found needs. But do this only once, as all needs are already collected and this sorting is for all needs and not only for the ones of the current document. :param env: sphinx enviroment :return: None
[ "Create", "back", "-", "links", "in", "all", "found", "needs", ".", "But", "do", "this", "only", "once", "as", "all", "needs", "are", "already", "collected", "and", "this", "sorting", "is", "for", "all", "needs", "and", "not", "only", "for", "the", "on...
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/need.py#L444-L476
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/directives/need.py
construct_headline
def construct_headline(need_data, app): """ Constructs the node-structure for the headline/title container :param need_data: need_info container :return: node """ # need title calculation title_type = '{}: '.format(need_data["type_name"]) title_headline = need_data["title"] title_id = "{}".format(need_data["id"]) title_spacer = " " # need title node_type = nodes.inline(title_type, title_type, classes=["needs-type"]) node_title = nodes.inline(title_headline, title_headline, classes=["needs-title"]) nodes_id = nodes.inline(classes=["needs-id"]) nodes_id_text = nodes.Text(title_id, title_id) id_ref = make_refnode(app.builder, fromdocname=need_data['docname'], todocname=need_data['docname'], targetid=need_data['id'], child=nodes_id_text.deepcopy(), title=title_id) nodes_id += id_ref node_spacer = nodes.inline(title_spacer, title_spacer, classes=["needs-spacer"]) headline_line = nodes.line(classes=["headline"]) headline_line.append(node_type) headline_line.append(node_spacer) headline_line.append(node_title) headline_line.append(node_spacer) headline_line.append(nodes_id) return headline_line
python
def construct_headline(need_data, app): """ Constructs the node-structure for the headline/title container :param need_data: need_info container :return: node """ # need title calculation title_type = '{}: '.format(need_data["type_name"]) title_headline = need_data["title"] title_id = "{}".format(need_data["id"]) title_spacer = " " # need title node_type = nodes.inline(title_type, title_type, classes=["needs-type"]) node_title = nodes.inline(title_headline, title_headline, classes=["needs-title"]) nodes_id = nodes.inline(classes=["needs-id"]) nodes_id_text = nodes.Text(title_id, title_id) id_ref = make_refnode(app.builder, fromdocname=need_data['docname'], todocname=need_data['docname'], targetid=need_data['id'], child=nodes_id_text.deepcopy(), title=title_id) nodes_id += id_ref node_spacer = nodes.inline(title_spacer, title_spacer, classes=["needs-spacer"]) headline_line = nodes.line(classes=["headline"]) headline_line.append(node_type) headline_line.append(node_spacer) headline_line.append(node_title) headline_line.append(node_spacer) headline_line.append(nodes_id) return headline_line
[ "def", "construct_headline", "(", "need_data", ",", "app", ")", ":", "# need title calculation", "title_type", "=", "'{}: '", ".", "format", "(", "need_data", "[", "\"type_name\"", "]", ")", "title_headline", "=", "need_data", "[", "\"title\"", "]", "title_id", ...
Constructs the node-structure for the headline/title container :param need_data: need_info container :return: node
[ "Constructs", "the", "node", "-", "structure", "for", "the", "headline", "/", "title", "container", ":", "param", "need_data", ":", "need_info", "container", ":", "return", ":", "node" ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/need.py#L479-L515
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/directives/need.py
construct_meta
def construct_meta(need_data, env): """ Constructs the node-structure for the status container :param need_data: need_info container :return: node """ hide_options = env.config.needs_hide_options if not isinstance(hide_options, list): raise SphinxError('Config parameter needs_hide_options must be of type list') node_meta = nodes.line_block(classes=['needs_meta']) # need parameters param_status = "status: " param_tags = "tags: " if need_data["status"] is not None and 'status' not in hide_options: status_line = nodes.line(classes=['status']) # node_status = nodes.line(param_status, param_status, classes=['status']) node_status = nodes.inline(param_status, param_status, classes=['status']) status_line.append(node_status) status_line.append(nodes.inline(need_data["status"], need_data["status"], classes=["needs-status", str(need_data['status'])])) node_meta.append(status_line) if need_data["tags"] and 'tags' not in hide_options: tag_line = nodes.line(classes=['tags']) # node_tags = nodes.line(param_tags, param_tags, classes=['tags']) node_tags = nodes.inline(param_tags, param_tags, classes=['tags']) tag_line.append(node_tags) for tag in need_data['tags']: # node_tags.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)])) # node_tags.append(nodes.inline(' ', ' ')) tag_line.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)])) tag_line.append(nodes.inline(' ', ' ')) node_meta.append(tag_line) # Links incoming if need_data['links_back'] and 'links_back' not in hide_options: node_incoming_line = nodes.line(classes=['links', 'incoming']) prefix = "links incoming: " node_incoming_prefix = nodes.inline(prefix, prefix) node_incoming_line.append(node_incoming_prefix) node_incoming_links = Need_incoming(reftarget=need_data['id']) node_incoming_links.append(nodes.inline(need_data['id'], need_data['id'])) node_incoming_line.append(node_incoming_links) node_meta.append(node_incoming_line) # # Links outgoing if need_data['links'] and 'links' not in hide_options: node_outgoing_line = nodes.line(classes=['links', 'outgoing']) prefix = "links outgoing: " node_outgoing_prefix = nodes.inline(prefix, prefix) node_outgoing_line.append(node_outgoing_prefix) node_outgoing_links = Need_outgoing(reftarget=need_data['id']) node_outgoing_links.append(nodes.inline(need_data['id'], need_data['id'])) node_outgoing_line.append(node_outgoing_links) node_meta.append(node_outgoing_line) extra_options = getattr(env.config, 'needs_extra_options', {}) node_extra_options = [] for key, value in extra_options.items(): if key in hide_options: continue param_data = need_data[key] if param_data is None or not param_data: continue param_option = '{}: '.format(key) option_line = nodes.line(classes=['extra_option']) option_line.append(nodes.inline(param_option, param_option, classes=['extra_option'])) option_line.append(nodes.inline(param_data, param_data, classes=["needs-extra-option", str(key)])) node_extra_options.append(option_line) node_meta += node_extra_options global_options = getattr(env.config, 'needs_global_options', {}) node_global_options = [] for key, value in global_options.items(): # If a global option got locally overwritten, it must already part of extra_options. # In this skipp output, as this is done during extra_option handling if key in extra_options or key in hide_options: continue param_data = need_data[key] if param_data is None or not param_data: continue param_option = '{}: '.format(key) global_option_line = nodes.line(classes=['global_option']) global_option_line.append(nodes.inline(param_option, param_option, classes=['global_option'])) global_option_line.append(nodes.inline(param_data, param_data, classes=["needs-global-option", str(key)])) node_global_options.append(global_option_line) node_meta += node_global_options return node_meta
python
def construct_meta(need_data, env): """ Constructs the node-structure for the status container :param need_data: need_info container :return: node """ hide_options = env.config.needs_hide_options if not isinstance(hide_options, list): raise SphinxError('Config parameter needs_hide_options must be of type list') node_meta = nodes.line_block(classes=['needs_meta']) # need parameters param_status = "status: " param_tags = "tags: " if need_data["status"] is not None and 'status' not in hide_options: status_line = nodes.line(classes=['status']) # node_status = nodes.line(param_status, param_status, classes=['status']) node_status = nodes.inline(param_status, param_status, classes=['status']) status_line.append(node_status) status_line.append(nodes.inline(need_data["status"], need_data["status"], classes=["needs-status", str(need_data['status'])])) node_meta.append(status_line) if need_data["tags"] and 'tags' not in hide_options: tag_line = nodes.line(classes=['tags']) # node_tags = nodes.line(param_tags, param_tags, classes=['tags']) node_tags = nodes.inline(param_tags, param_tags, classes=['tags']) tag_line.append(node_tags) for tag in need_data['tags']: # node_tags.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)])) # node_tags.append(nodes.inline(' ', ' ')) tag_line.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)])) tag_line.append(nodes.inline(' ', ' ')) node_meta.append(tag_line) # Links incoming if need_data['links_back'] and 'links_back' not in hide_options: node_incoming_line = nodes.line(classes=['links', 'incoming']) prefix = "links incoming: " node_incoming_prefix = nodes.inline(prefix, prefix) node_incoming_line.append(node_incoming_prefix) node_incoming_links = Need_incoming(reftarget=need_data['id']) node_incoming_links.append(nodes.inline(need_data['id'], need_data['id'])) node_incoming_line.append(node_incoming_links) node_meta.append(node_incoming_line) # # Links outgoing if need_data['links'] and 'links' not in hide_options: node_outgoing_line = nodes.line(classes=['links', 'outgoing']) prefix = "links outgoing: " node_outgoing_prefix = nodes.inline(prefix, prefix) node_outgoing_line.append(node_outgoing_prefix) node_outgoing_links = Need_outgoing(reftarget=need_data['id']) node_outgoing_links.append(nodes.inline(need_data['id'], need_data['id'])) node_outgoing_line.append(node_outgoing_links) node_meta.append(node_outgoing_line) extra_options = getattr(env.config, 'needs_extra_options', {}) node_extra_options = [] for key, value in extra_options.items(): if key in hide_options: continue param_data = need_data[key] if param_data is None or not param_data: continue param_option = '{}: '.format(key) option_line = nodes.line(classes=['extra_option']) option_line.append(nodes.inline(param_option, param_option, classes=['extra_option'])) option_line.append(nodes.inline(param_data, param_data, classes=["needs-extra-option", str(key)])) node_extra_options.append(option_line) node_meta += node_extra_options global_options = getattr(env.config, 'needs_global_options', {}) node_global_options = [] for key, value in global_options.items(): # If a global option got locally overwritten, it must already part of extra_options. # In this skipp output, as this is done during extra_option handling if key in extra_options or key in hide_options: continue param_data = need_data[key] if param_data is None or not param_data: continue param_option = '{}: '.format(key) global_option_line = nodes.line(classes=['global_option']) global_option_line.append(nodes.inline(param_option, param_option, classes=['global_option'])) global_option_line.append(nodes.inline(param_data, param_data, classes=["needs-global-option", str(key)])) node_global_options.append(global_option_line) node_meta += node_global_options return node_meta
[ "def", "construct_meta", "(", "need_data", ",", "env", ")", ":", "hide_options", "=", "env", ".", "config", ".", "needs_hide_options", "if", "not", "isinstance", "(", "hide_options", ",", "list", ")", ":", "raise", "SphinxError", "(", "'Config parameter needs_hi...
Constructs the node-structure for the status container :param need_data: need_info container :return: node
[ "Constructs", "the", "node", "-", "structure", "for", "the", "status", "container", ":", "param", "need_data", ":", "need_info", "container", ":", "return", ":", "node" ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/need.py#L518-L614
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/directives/need.py
_fix_list_dyn_func
def _fix_list_dyn_func(list): """ This searches a list for dynamic function fragments, which may have been cut by generic searches for ",|;". Example: `link_a, [[copy('links', need_id)]]` this will be splitted in list of 3 parts: #. link_a #. [[copy('links' #. need_id)]] This function fixes the above list to the following: #. link_a #. [[copy('links', need_id)]] :param list: list which may contain splitted function calls :return: list of fixed elements """ open_func_string = False new_list = [] for element in list: if '[[' in element: open_func_string = True new_link = [element] elif ']]' in element: new_link.append(element) open_func_string = False element = ",".join(new_link) new_list.append(element) elif open_func_string: new_link.append(element) else: new_list.append(element) return new_list
python
def _fix_list_dyn_func(list): """ This searches a list for dynamic function fragments, which may have been cut by generic searches for ",|;". Example: `link_a, [[copy('links', need_id)]]` this will be splitted in list of 3 parts: #. link_a #. [[copy('links' #. need_id)]] This function fixes the above list to the following: #. link_a #. [[copy('links', need_id)]] :param list: list which may contain splitted function calls :return: list of fixed elements """ open_func_string = False new_list = [] for element in list: if '[[' in element: open_func_string = True new_link = [element] elif ']]' in element: new_link.append(element) open_func_string = False element = ",".join(new_link) new_list.append(element) elif open_func_string: new_link.append(element) else: new_list.append(element) return new_list
[ "def", "_fix_list_dyn_func", "(", "list", ")", ":", "open_func_string", "=", "False", "new_list", "=", "[", "]", "for", "element", "in", "list", ":", "if", "'[['", "in", "element", ":", "open_func_string", "=", "True", "new_link", "=", "[", "element", "]",...
This searches a list for dynamic function fragments, which may have been cut by generic searches for ",|;". Example: `link_a, [[copy('links', need_id)]]` this will be splitted in list of 3 parts: #. link_a #. [[copy('links' #. need_id)]] This function fixes the above list to the following: #. link_a #. [[copy('links', need_id)]] :param list: list which may contain splitted function calls :return: list of fixed elements
[ "This", "searches", "a", "list", "for", "dynamic", "function", "fragments", "which", "may", "have", "been", "cut", "by", "generic", "searches", "for", "|", ";", "." ]
train
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/need.py#L617-L651