repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
meraki-analytics/datapipelines-python
datapipelines/pipelines.py
DataPipeline.put_many
def put_many(self, type: Type[T], items: Iterable[T]) -> None: """Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary. Args: items: An iterable (e.g. list) of objects to be inserted into the data pipeline. """ LOGGER.info("Getting SinkHandlers for \"{type}\"".format(type=type.__name__)) try: handlers = self._put_types[type] except KeyError: try: LOGGER.info("Building new SinkHandlers for \"{type}\"".format(type=type.__name__)) handlers = self._put_handlers(type) except NoConversionError: handlers = None self._get_types[type] = handlers LOGGER.info("Creating new PipelineContext") context = self._new_context() LOGGER.info("Sending items \"{items}\" to SourceHandlers".format(items=items)) if handlers is not None: items = list(items) for handler in handlers: handler.put_many(items, context)
python
def put_many(self, type: Type[T], items: Iterable[T]) -> None: """Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary. Args: items: An iterable (e.g. list) of objects to be inserted into the data pipeline. """ LOGGER.info("Getting SinkHandlers for \"{type}\"".format(type=type.__name__)) try: handlers = self._put_types[type] except KeyError: try: LOGGER.info("Building new SinkHandlers for \"{type}\"".format(type=type.__name__)) handlers = self._put_handlers(type) except NoConversionError: handlers = None self._get_types[type] = handlers LOGGER.info("Creating new PipelineContext") context = self._new_context() LOGGER.info("Sending items \"{items}\" to SourceHandlers".format(items=items)) if handlers is not None: items = list(items) for handler in handlers: handler.put_many(items, context)
[ "def", "put_many", "(", "self", ",", "type", ":", "Type", "[", "T", "]", ",", "items", ":", "Iterable", "[", "T", "]", ")", "->", "None", ":", "LOGGER", ".", "info", "(", "\"Getting SinkHandlers for \\\"{type}\\\"\"", ".", "format", "(", "type", "=", "...
Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary. Args: items: An iterable (e.g. list) of objects to be inserted into the data pipeline.
[ "Puts", "multiple", "objects", "of", "the", "same", "type", "into", "the", "data", "sink", ".", "The", "objects", "may", "be", "transformed", "into", "a", "new", "type", "for", "insertion", "if", "necessary", "." ]
train
https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/pipelines.py#L532-L556
jmvrbanac/Specter
specter/reporting/dots.py
DotsReporter.print_error
def print_error(self, wrapper): """ A crude way of output the errors for now. This needs to be cleaned up into something better. """ level = 0 parent = wrapper.parent while parent: print_test_msg(parent.name, level, TestStatus.FAIL, self.use_color) level += 1 parent = parent.parent print_test_msg(wrapper.name, level, TestStatus.FAIL, self.use_color) print_test_args(wrapper.execute_kwargs, level, TestStatus.FAIL, self.use_color) if wrapper.error: for line in wrapper.error: print_test_msg( line, level + 2, TestStatus.FAIL, self.use_color ) print_expects(wrapper, level, use_color=self.use_color)
python
def print_error(self, wrapper): """ A crude way of output the errors for now. This needs to be cleaned up into something better. """ level = 0 parent = wrapper.parent while parent: print_test_msg(parent.name, level, TestStatus.FAIL, self.use_color) level += 1 parent = parent.parent print_test_msg(wrapper.name, level, TestStatus.FAIL, self.use_color) print_test_args(wrapper.execute_kwargs, level, TestStatus.FAIL, self.use_color) if wrapper.error: for line in wrapper.error: print_test_msg( line, level + 2, TestStatus.FAIL, self.use_color ) print_expects(wrapper, level, use_color=self.use_color)
[ "def", "print_error", "(", "self", ",", "wrapper", ")", ":", "level", "=", "0", "parent", "=", "wrapper", ".", "parent", "while", "parent", ":", "print_test_msg", "(", "parent", ".", "name", ",", "level", ",", "TestStatus", ".", "FAIL", ",", "self", "....
A crude way of output the errors for now. This needs to be cleaned up into something better.
[ "A", "crude", "way", "of", "output", "the", "errors", "for", "now", ".", "This", "needs", "to", "be", "cleaned", "up", "into", "something", "better", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/reporting/dots.py#L29-L53
kylef/refract.py
refract/refraction.py
refract
def refract(structure) -> Element: """ Refracts the given value. >>> refract('string') String(content='string') >>> refract(1) Number(content=1) >>> refract(True) Boolean(content=True) >>> refract(None) Null() >>> refract([1, 2]) Array(content=[Number(content=1), Number(content=2)]) >>> refract({'name': 'Doe'}) Object(content=[Member( key=String(content='name'), value=String(content='Doe') )]) """ if isinstance(structure, Element): return structure elif isinstance(structure, str): return String(content=structure) elif isinstance(structure, bool): return Boolean(content=structure) elif isinstance(structure, (int, float)): return Number(content=structure) elif isinstance(structure, (list, tuple)): return Array(content=list(map(refract, structure))) elif isinstance(structure, dict): return Object(content=[Member(key=refract(k), value=refract(v)) for (k, v) in structure.items()]) elif structure is None: return Null() raise ValueError('Unsupported Value Type')
python
def refract(structure) -> Element: """ Refracts the given value. >>> refract('string') String(content='string') >>> refract(1) Number(content=1) >>> refract(True) Boolean(content=True) >>> refract(None) Null() >>> refract([1, 2]) Array(content=[Number(content=1), Number(content=2)]) >>> refract({'name': 'Doe'}) Object(content=[Member( key=String(content='name'), value=String(content='Doe') )]) """ if isinstance(structure, Element): return structure elif isinstance(structure, str): return String(content=structure) elif isinstance(structure, bool): return Boolean(content=structure) elif isinstance(structure, (int, float)): return Number(content=structure) elif isinstance(structure, (list, tuple)): return Array(content=list(map(refract, structure))) elif isinstance(structure, dict): return Object(content=[Member(key=refract(k), value=refract(v)) for (k, v) in structure.items()]) elif structure is None: return Null() raise ValueError('Unsupported Value Type')
[ "def", "refract", "(", "structure", ")", "->", "Element", ":", "if", "isinstance", "(", "structure", ",", "Element", ")", ":", "return", "structure", "elif", "isinstance", "(", "structure", ",", "str", ")", ":", "return", "String", "(", "content", "=", "...
Refracts the given value. >>> refract('string') String(content='string') >>> refract(1) Number(content=1) >>> refract(True) Boolean(content=True) >>> refract(None) Null() >>> refract([1, 2]) Array(content=[Number(content=1), Number(content=2)]) >>> refract({'name': 'Doe'}) Object(content=[Member( key=String(content='name'), value=String(content='Doe') )])
[ "Refracts", "the", "given", "value", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/refraction.py#L8-L50
kylef/refract.py
refract/json.py
JSONSerialiser.serialise
def serialise(self, element: Element, **kwargs) -> str: """ Serialises the given element into JSON. >>> JSONSerialiser().serialise(String(content='Hello')) '{"element": "string", "content": "Hello"}' """ return json.dumps(self.serialise_dict(element), **kwargs)
python
def serialise(self, element: Element, **kwargs) -> str: """ Serialises the given element into JSON. >>> JSONSerialiser().serialise(String(content='Hello')) '{"element": "string", "content": "Hello"}' """ return json.dumps(self.serialise_dict(element), **kwargs)
[ "def", "serialise", "(", "self", ",", "element", ":", "Element", ",", "*", "*", "kwargs", ")", "->", "str", ":", "return", "json", ".", "dumps", "(", "self", ".", "serialise_dict", "(", "element", ")", ",", "*", "*", "kwargs", ")" ]
Serialises the given element into JSON. >>> JSONSerialiser().serialise(String(content='Hello')) '{"element": "string", "content": "Hello"}'
[ "Serialises", "the", "given", "element", "into", "JSON", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/json.py#L62-L70
kylef/refract.py
refract/json.py
JSONDeserialiser.deserialise
def deserialise(self, element_json: str) -> Element: """ Deserialises the given JSON into an element. >>> json = '{"element": "string", "content": "Hello"' >>> JSONDeserialiser().deserialise(json) String(content='Hello') """ return self.deserialise_dict(json.loads(element_json))
python
def deserialise(self, element_json: str) -> Element: """ Deserialises the given JSON into an element. >>> json = '{"element": "string", "content": "Hello"' >>> JSONDeserialiser().deserialise(json) String(content='Hello') """ return self.deserialise_dict(json.loads(element_json))
[ "def", "deserialise", "(", "self", ",", "element_json", ":", "str", ")", "->", "Element", ":", "return", "self", ".", "deserialise_dict", "(", "json", ".", "loads", "(", "element_json", ")", ")" ]
Deserialises the given JSON into an element. >>> json = '{"element": "string", "content": "Hello"' >>> JSONDeserialiser().deserialise(json) String(content='Hello')
[ "Deserialises", "the", "given", "JSON", "into", "an", "element", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/json.py#L140-L149
kylef/refract.py
refract/json.py
CompactJSONSerialiser.serialise
def serialise(self, element: Element) -> str: """ Serialises the given element into Compact JSON. >>> CompactJSONSerialiser().serialise(String(content='Hello')) '["string", null, null, "Hello"]' """ return json.dumps(self.serialise_element(element))
python
def serialise(self, element: Element) -> str: """ Serialises the given element into Compact JSON. >>> CompactJSONSerialiser().serialise(String(content='Hello')) '["string", null, null, "Hello"]' """ return json.dumps(self.serialise_element(element))
[ "def", "serialise", "(", "self", ",", "element", ":", "Element", ")", "->", "str", ":", "return", "json", ".", "dumps", "(", "self", ".", "serialise_element", "(", "element", ")", ")" ]
Serialises the given element into Compact JSON. >>> CompactJSONSerialiser().serialise(String(content='Hello')) '["string", null, null, "Hello"]'
[ "Serialises", "the", "given", "element", "into", "Compact", "JSON", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/json.py#L236-L244
kylef/refract.py
refract/json.py
CompactJSONDeserialiser.deserialise
def deserialise(self, content) -> Element: """ Deserialises the given compact JSON into an element. >>> deserialiser = CompactJSONDeserialiser() >>> deserialiser.deserialise('["string", null, null, "Hi"]') String(content='Hi') """ content = json.loads(content) if not isinstance(content, list): raise ValueError('Given content was not compact JSON refract') return self.deserialise_element(content)
python
def deserialise(self, content) -> Element: """ Deserialises the given compact JSON into an element. >>> deserialiser = CompactJSONDeserialiser() >>> deserialiser.deserialise('["string", null, null, "Hi"]') String(content='Hi') """ content = json.loads(content) if not isinstance(content, list): raise ValueError('Given content was not compact JSON refract') return self.deserialise_element(content)
[ "def", "deserialise", "(", "self", ",", "content", ")", "->", "Element", ":", "content", "=", "json", ".", "loads", "(", "content", ")", "if", "not", "isinstance", "(", "content", ",", "list", ")", ":", "raise", "ValueError", "(", "'Given content was not c...
Deserialises the given compact JSON into an element. >>> deserialiser = CompactJSONDeserialiser() >>> deserialiser.deserialise('["string", null, null, "Hi"]') String(content='Hi')
[ "Deserialises", "the", "given", "compact", "JSON", "into", "an", "element", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/json.py#L306-L319
jmvrbanac/Specter
specter/runner.py
SpecterRunner.combine_coverage_reports
def combine_coverage_reports(self, omit, parallel): """ Method to force the combination of parallel coverage reports.""" tmp_cov = coverage.coverage(omit=omit, data_suffix=parallel) tmp_cov.load() tmp_cov.combine() tmp_cov.save()
python
def combine_coverage_reports(self, omit, parallel): """ Method to force the combination of parallel coverage reports.""" tmp_cov = coverage.coverage(omit=omit, data_suffix=parallel) tmp_cov.load() tmp_cov.combine() tmp_cov.save()
[ "def", "combine_coverage_reports", "(", "self", ",", "omit", ",", "parallel", ")", ":", "tmp_cov", "=", "coverage", ".", "coverage", "(", "omit", "=", "omit", ",", "data_suffix", "=", "parallel", ")", "tmp_cov", ".", "load", "(", ")", "tmp_cov", ".", "co...
Method to force the combination of parallel coverage reports.
[ "Method", "to", "force", "the", "combination", "of", "parallel", "coverage", "reports", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/runner.py#L120-L125
meraki-analytics/datapipelines-python
datapipelines/transformers.py
DataTransformer.transforms
def transforms(self) -> Mapping[Type, Iterable[Type]]: """The available data transformers.""" try: return getattr(self.__class__, "transform")._transforms except AttributeError: return {}
python
def transforms(self) -> Mapping[Type, Iterable[Type]]: """The available data transformers.""" try: return getattr(self.__class__, "transform")._transforms except AttributeError: return {}
[ "def", "transforms", "(", "self", ")", "->", "Mapping", "[", "Type", ",", "Iterable", "[", "Type", "]", "]", ":", "try", ":", "return", "getattr", "(", "self", ".", "__class__", ",", "\"transform\"", ")", ".", "_transforms", "except", "AttributeError", "...
The available data transformers.
[ "The", "available", "data", "transformers", "." ]
train
https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/transformers.py#L19-L24
meraki-analytics/datapipelines-python
datapipelines/transformers.py
DataTransformer.transform
def transform(self, target_type: Type[T], value: F, context: PipelineContext = None) -> T: """Transforms an object to a new type. Args: target_type: The type to be converted to. value: The object to be transformed. context: The context of the transformation (mutable). """ pass
python
def transform(self, target_type: Type[T], value: F, context: PipelineContext = None) -> T: """Transforms an object to a new type. Args: target_type: The type to be converted to. value: The object to be transformed. context: The context of the transformation (mutable). """ pass
[ "def", "transform", "(", "self", ",", "target_type", ":", "Type", "[", "T", "]", ",", "value", ":", "F", ",", "context", ":", "PipelineContext", "=", "None", ")", "->", "T", ":", "pass" ]
Transforms an object to a new type. Args: target_type: The type to be converted to. value: The object to be transformed. context: The context of the transformation (mutable).
[ "Transforms", "an", "object", "to", "a", "new", "type", "." ]
train
https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/transformers.py#L27-L35
jmvrbanac/Specter
specter/spec.py
CaseWrapper.serialize
def serialize(self): """ Serializes the CaseWrapper object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed. """ expects = [exp.serialize() for exp in self.expects] converted_dict = { 'id': self.id, 'name': self.pretty_name, 'raw_name': self.name, 'doc': self.doc, 'error': self.error, 'skipped': self.skipped, 'skip_reason': self.skip_reason, 'execute_kwargs': self.safe_execute_kwargs, 'metadata': self.metadata, 'start': self.start_time, 'end': self.end_time, 'expects': expects, 'success': self.success } return remove_empty_entries_from_dict(converted_dict)
python
def serialize(self): """ Serializes the CaseWrapper object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed. """ expects = [exp.serialize() for exp in self.expects] converted_dict = { 'id': self.id, 'name': self.pretty_name, 'raw_name': self.name, 'doc': self.doc, 'error': self.error, 'skipped': self.skipped, 'skip_reason': self.skip_reason, 'execute_kwargs': self.safe_execute_kwargs, 'metadata': self.metadata, 'start': self.start_time, 'end': self.end_time, 'expects': expects, 'success': self.success } return remove_empty_entries_from_dict(converted_dict)
[ "def", "serialize", "(", "self", ")", ":", "expects", "=", "[", "exp", ".", "serialize", "(", ")", "for", "exp", "in", "self", ".", "expects", "]", "converted_dict", "=", "{", "'id'", ":", "self", ".", "id", ",", "'name'", ":", "self", ".", "pretty...
Serializes the CaseWrapper object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed.
[ "Serializes", "the", "CaseWrapper", "object", "for", "collection", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/spec.py#L53-L76
jmvrbanac/Specter
specter/spec.py
Describe.serialize
def serialize(self): """ Serializes the Spec/Describe object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed. """ cases = [case.serialize() for key, case in six.iteritems(self.cases)] specs = [spec.serialize() for spec in self.describes] converted_dict = { 'id': self.id, 'name': self.name, 'class_path': self.real_class_path, 'doc': self.doc, 'cases': cases, 'specs': specs } return converted_dict
python
def serialize(self): """ Serializes the Spec/Describe object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed. """ cases = [case.serialize() for key, case in six.iteritems(self.cases)] specs = [spec.serialize() for spec in self.describes] converted_dict = { 'id': self.id, 'name': self.name, 'class_path': self.real_class_path, 'doc': self.doc, 'cases': cases, 'specs': specs } return converted_dict
[ "def", "serialize", "(", "self", ")", ":", "cases", "=", "[", "case", ".", "serialize", "(", ")", "for", "key", ",", "case", "in", "six", ".", "iteritems", "(", "self", ".", "cases", ")", "]", "specs", "=", "[", "spec", ".", "serialize", "(", ")"...
Serializes the Spec/Describe object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed.
[ "Serializes", "the", "Spec", "/", "Describe", "object", "for", "collection", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/spec.py#L280-L299
jmvrbanac/Specter
specter/spec.py
Describe._run_hooks
def _run_hooks(self): """Calls any registered hooks providing the current state.""" for hook in self.hooks: getattr(self, hook)(self._state)
python
def _run_hooks(self): """Calls any registered hooks providing the current state.""" for hook in self.hooks: getattr(self, hook)(self._state)
[ "def", "_run_hooks", "(", "self", ")", ":", "for", "hook", "in", "self", ".", "hooks", ":", "getattr", "(", "self", ",", "hook", ")", "(", "self", ".", "_state", ")" ]
Calls any registered hooks providing the current state.
[ "Calls", "any", "registered", "hooks", "providing", "the", "current", "state", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/spec.py#L301-L304
kylef/refract.py
refract/elements/array.py
Array.append
def append(self, element): """ Append an element onto the array. >>> array = Array() >>> array.append('test') """ from refract.refraction import refract self.content.append(refract(element))
python
def append(self, element): """ Append an element onto the array. >>> array = Array() >>> array.append('test') """ from refract.refraction import refract self.content.append(refract(element))
[ "def", "append", "(", "self", ",", "element", ")", ":", "from", "refract", ".", "refraction", "import", "refract", "self", ".", "content", ".", "append", "(", "refract", "(", "element", ")", ")" ]
Append an element onto the array. >>> array = Array() >>> array.append('test')
[ "Append", "an", "element", "onto", "the", "array", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/elements/array.py#L60-L69
kylef/refract.py
refract/elements/array.py
Array.insert
def insert(self, index: int, element): """ Insert an element at a given position. >>> array = Array() >>> array.insert(0, Element()) """ from refract.refraction import refract self.content.insert(index, refract(element))
python
def insert(self, index: int, element): """ Insert an element at a given position. >>> array = Array() >>> array.insert(0, Element()) """ from refract.refraction import refract self.content.insert(index, refract(element))
[ "def", "insert", "(", "self", ",", "index", ":", "int", ",", "element", ")", ":", "from", "refract", ".", "refraction", "import", "refract", "self", ".", "content", ".", "insert", "(", "index", ",", "refract", "(", "element", ")", ")" ]
Insert an element at a given position. >>> array = Array() >>> array.insert(0, Element())
[ "Insert", "an", "element", "at", "a", "given", "position", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/elements/array.py#L71-L80
kylef/refract.py
refract/elements/array.py
Array.index
def index(self, element: Element) -> int: """ Return the index in the array of the first item whose value is element. It is an error if there is no such item. >>> element = String('hello') >>> array = Array(content=[element]) >>> array.index(element) 0 """ from refract.refraction import refract return self.content.index(refract(element))
python
def index(self, element: Element) -> int: """ Return the index in the array of the first item whose value is element. It is an error if there is no such item. >>> element = String('hello') >>> array = Array(content=[element]) >>> array.index(element) 0 """ from refract.refraction import refract return self.content.index(refract(element))
[ "def", "index", "(", "self", ",", "element", ":", "Element", ")", "->", "int", ":", "from", "refract", ".", "refraction", "import", "refract", "return", "self", ".", "content", ".", "index", "(", "refract", "(", "element", ")", ")" ]
Return the index in the array of the first item whose value is element. It is an error if there is no such item. >>> element = String('hello') >>> array = Array(content=[element]) >>> array.index(element) 0
[ "Return", "the", "index", "in", "the", "array", "of", "the", "first", "item", "whose", "value", "is", "element", ".", "It", "is", "an", "error", "if", "there", "is", "no", "such", "item", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/elements/array.py#L82-L94
meraki-analytics/datapipelines-python
datapipelines/sources.py
DataSource.provides
def provides(self): # type: Union[Iterable[Type[T]], Type[Any]] """The types of objects the data store provides.""" types = set() any_dispatch = False try: types.update(getattr(self.__class__, "get")._provides) any_dispatch = True except AttributeError: pass try: types.update(getattr(self.__class__, "get_many")._provides) any_dispatch = True except AttributeError: pass return types if any_dispatch else TYPE_WILDCARD
python
def provides(self): # type: Union[Iterable[Type[T]], Type[Any]] """The types of objects the data store provides.""" types = set() any_dispatch = False try: types.update(getattr(self.__class__, "get")._provides) any_dispatch = True except AttributeError: pass try: types.update(getattr(self.__class__, "get_many")._provides) any_dispatch = True except AttributeError: pass return types if any_dispatch else TYPE_WILDCARD
[ "def", "provides", "(", "self", ")", ":", "# type: Union[Iterable[Type[T]], Type[Any]]", "types", "=", "set", "(", ")", "any_dispatch", "=", "False", "try", ":", "types", ".", "update", "(", "getattr", "(", "self", ".", "__class__", ",", "\"get\"", ")", ".",...
The types of objects the data store provides.
[ "The", "types", "of", "objects", "the", "data", "store", "provides", "." ]
train
https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/sources.py#L19-L33
meraki-analytics/datapipelines-python
datapipelines/sources.py
DataSource.get
def get(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> T: """Gets a query from the data source. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object. """ pass
python
def get(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> T: """Gets a query from the data source. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object. """ pass
[ "def", "get", "(", "self", ",", "type", ":", "Type", "[", "T", "]", ",", "query", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "context", ":", "PipelineContext", "=", "None", ")", "->", "T", ":", "pass" ]
Gets a query from the data source. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object.
[ "Gets", "a", "query", "from", "the", "data", "source", "." ]
train
https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/sources.py#L36-L46
meraki-analytics/datapipelines-python
datapipelines/sources.py
DataSource.get_many
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]: """Gets a query from the data source, which contains a request for multiple objects. Args: query: The query being requested (contains a request for multiple objects). context: The context for the extraction (mutable). Returns: The requested objects. """ pass
python
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]: """Gets a query from the data source, which contains a request for multiple objects. Args: query: The query being requested (contains a request for multiple objects). context: The context for the extraction (mutable). Returns: The requested objects. """ pass
[ "def", "get_many", "(", "self", ",", "type", ":", "Type", "[", "T", "]", ",", "query", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "context", ":", "PipelineContext", "=", "None", ")", "->", "Iterable", "[", "T", "]", ":", "pass" ]
Gets a query from the data source, which contains a request for multiple objects. Args: query: The query being requested (contains a request for multiple objects). context: The context for the extraction (mutable). Returns: The requested objects.
[ "Gets", "a", "query", "from", "the", "data", "source", "which", "contains", "a", "request", "for", "multiple", "objects", "." ]
train
https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/sources.py#L49-L59
jmvrbanac/Specter
specter/reporting/__init__.py
ReporterPluginManager.subscribe_all_to_spec
def subscribe_all_to_spec(self, spec): """ Will automatically not subscribe reporters that are not parallel or serial depending on the current mode. """ for reporter in self.reporters: if self.can_use_reporter(reporter, self.parallel): reporter.subscribe_to_spec(spec)
python
def subscribe_all_to_spec(self, spec): """ Will automatically not subscribe reporters that are not parallel or serial depending on the current mode. """ for reporter in self.reporters: if self.can_use_reporter(reporter, self.parallel): reporter.subscribe_to_spec(spec)
[ "def", "subscribe_all_to_spec", "(", "self", ",", "spec", ")", ":", "for", "reporter", "in", "self", ".", "reporters", ":", "if", "self", ".", "can_use_reporter", "(", "reporter", ",", "self", ".", "parallel", ")", ":", "reporter", ".", "subscribe_to_spec", ...
Will automatically not subscribe reporters that are not parallel or serial depending on the current mode.
[ "Will", "automatically", "not", "subscribe", "reporters", "that", "are", "not", "parallel", "or", "serial", "depending", "on", "the", "current", "mode", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/reporting/__init__.py#L77-L83
jmvrbanac/Specter
specter/reporting/console.py
ConsoleReporter.output
def output(self, msg, indent, status=None): """ Alias for print_indent_msg with color determined by status.""" color = None if self.use_color: color = get_color_from_status(status) print_indent_msg(msg, indent, color)
python
def output(self, msg, indent, status=None): """ Alias for print_indent_msg with color determined by status.""" color = None if self.use_color: color = get_color_from_status(status) print_indent_msg(msg, indent, color)
[ "def", "output", "(", "self", ",", "msg", ",", "indent", ",", "status", "=", "None", ")", ":", "color", "=", "None", "if", "self", ".", "use_color", ":", "color", "=", "get_color_from_status", "(", "status", ")", "print_indent_msg", "(", "msg", ",", "i...
Alias for print_indent_msg with color determined by status.
[ "Alias", "for", "print_indent_msg", "with", "color", "determined", "by", "status", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/reporting/console.py#L139-L144
jmvrbanac/Specter
specter/util.py
get_real_last_traceback
def get_real_last_traceback(exception): """ An unfortunate evil... All because Python's traceback cannot determine where my executed code is coming from... """ traceback_blocks = [] _n, _n, exc_traceback = sys.exc_info() tb_list = get_all_tracebacks(exc_traceback)[1:] # Remove already captured tracebacks # TODO(jmv): This must be a better way of doing this. Need to revisit. tb_list = [tb for tb in tb_list if tb not in CAPTURED_TRACEBACKS] CAPTURED_TRACEBACKS.extend(tb_list) for traceback in tb_list: lines, path, line_num = get_source_from_frame(traceback.tb_frame) traceback_lines = get_numbered_source(lines, traceback.tb_lineno, line_num) traceback_lines.insert(0, ' - {0}'.format(path)) traceback_lines.insert(1, ' ------------------') traceback_lines.append(' ------------------') traceback_blocks.append(traceback_lines) traced_lines = ['Error Traceback:'] traced_lines.extend(itertools.chain.from_iterable(traceback_blocks)) traced_lines.append(' - Error | {0}: {1}'.format( type(exception).__name__, exception)) return traced_lines
python
def get_real_last_traceback(exception): """ An unfortunate evil... All because Python's traceback cannot determine where my executed code is coming from... """ traceback_blocks = [] _n, _n, exc_traceback = sys.exc_info() tb_list = get_all_tracebacks(exc_traceback)[1:] # Remove already captured tracebacks # TODO(jmv): This must be a better way of doing this. Need to revisit. tb_list = [tb for tb in tb_list if tb not in CAPTURED_TRACEBACKS] CAPTURED_TRACEBACKS.extend(tb_list) for traceback in tb_list: lines, path, line_num = get_source_from_frame(traceback.tb_frame) traceback_lines = get_numbered_source(lines, traceback.tb_lineno, line_num) traceback_lines.insert(0, ' - {0}'.format(path)) traceback_lines.insert(1, ' ------------------') traceback_lines.append(' ------------------') traceback_blocks.append(traceback_lines) traced_lines = ['Error Traceback:'] traced_lines.extend(itertools.chain.from_iterable(traceback_blocks)) traced_lines.append(' - Error | {0}: {1}'.format( type(exception).__name__, exception)) return traced_lines
[ "def", "get_real_last_traceback", "(", "exception", ")", ":", "traceback_blocks", "=", "[", "]", "_n", ",", "_n", ",", "exc_traceback", "=", "sys", ".", "exc_info", "(", ")", "tb_list", "=", "get_all_tracebacks", "(", "exc_traceback", ")", "[", "1", ":", "...
An unfortunate evil... All because Python's traceback cannot determine where my executed code is coming from...
[ "An", "unfortunate", "evil", "...", "All", "because", "Python", "s", "traceback", "cannot", "determine", "where", "my", "executed", "code", "is", "coming", "from", "..." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/util.py#L155-L183
kylef/refract.py
refract/contrib/apielements.py
HTTPMessage.assets
def assets(self) -> List[Asset]: """ Returns the assets in the transaction. """ return list(filter(is_element(Asset), self.content))
python
def assets(self) -> List[Asset]: """ Returns the assets in the transaction. """ return list(filter(is_element(Asset), self.content))
[ "def", "assets", "(", "self", ")", "->", "List", "[", "Asset", "]", ":", "return", "list", "(", "filter", "(", "is_element", "(", "Asset", ")", ",", "self", ".", "content", ")", ")" ]
Returns the assets in the transaction.
[ "Returns", "the", "assets", "in", "the", "transaction", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/contrib/apielements.py#L79-L84
jmvrbanac/Specter
specter/vendor/ast_decompiler.py
decompile
def decompile(ast, indentation=4, line_length=100, starting_indentation=0): """Decompiles an AST into Python code. Arguments: - ast: code to decompile, using AST objects as generated by the standard library ast module - indentation: indentation level of lines - line_length: if lines become longer than this length, ast_decompiler will try to break them up (but it will not necessarily succeed in all cases) - starting_indentation: indentation level at which to start producing code """ decompiler = Decompiler( indentation=indentation, line_length=line_length, starting_indentation=starting_indentation, ) return decompiler.run(ast)
python
def decompile(ast, indentation=4, line_length=100, starting_indentation=0): """Decompiles an AST into Python code. Arguments: - ast: code to decompile, using AST objects as generated by the standard library ast module - indentation: indentation level of lines - line_length: if lines become longer than this length, ast_decompiler will try to break them up (but it will not necessarily succeed in all cases) - starting_indentation: indentation level at which to start producing code """ decompiler = Decompiler( indentation=indentation, line_length=line_length, starting_indentation=starting_indentation, ) return decompiler.run(ast)
[ "def", "decompile", "(", "ast", ",", "indentation", "=", "4", ",", "line_length", "=", "100", ",", "starting_indentation", "=", "0", ")", ":", "decompiler", "=", "Decompiler", "(", "indentation", "=", "indentation", ",", "line_length", "=", "line_length", ",...
Decompiles an AST into Python code. Arguments: - ast: code to decompile, using AST objects as generated by the standard library ast module - indentation: indentation level of lines - line_length: if lines become longer than this length, ast_decompiler will try to break them up (but it will not necessarily succeed in all cases) - starting_indentation: indentation level at which to start producing code
[ "Decompiles", "an", "AST", "into", "Python", "code", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/vendor/ast_decompiler.py#L94-L110
jmvrbanac/Specter
specter/vendor/ast_decompiler.py
Decompiler.write_expression_list
def write_expression_list(self, nodes, separator=', ', allow_newlines=True, need_parens=True, final_separator_if_multiline=True): """Writes a list of nodes, separated by separator. If allow_newlines, will write the expression over multiple lines if necessary to say within max_line_length. If need_parens, will surround the expression with parentheses in this case. If final_separator_if_multiline, will write a separator at the end of the list if it is divided over multiple lines. """ first = True last_line = len(self.lines) current_line = list(self.current_line) for node in nodes: if first: first = False else: self.write(separator) self.visit(node) if allow_newlines and (self.current_line_length() > self.max_line_length or last_line != len(self.lines)): break else: return # stayed within the limit # reset state del self.lines[last_line:] self.current_line = current_line separator = separator.rstrip() if need_parens: self.write('(') self.write_newline() with self.add_indentation(): num_nodes = len(nodes) for i, node in enumerate(nodes): self.write_indentation() self.visit(node) if final_separator_if_multiline or i < num_nodes - 1: self.write(separator) self.write_newline() self.write_indentation() if need_parens: self.write(')')
python
def write_expression_list(self, nodes, separator=', ', allow_newlines=True, need_parens=True, final_separator_if_multiline=True): """Writes a list of nodes, separated by separator. If allow_newlines, will write the expression over multiple lines if necessary to say within max_line_length. If need_parens, will surround the expression with parentheses in this case. If final_separator_if_multiline, will write a separator at the end of the list if it is divided over multiple lines. """ first = True last_line = len(self.lines) current_line = list(self.current_line) for node in nodes: if first: first = False else: self.write(separator) self.visit(node) if allow_newlines and (self.current_line_length() > self.max_line_length or last_line != len(self.lines)): break else: return # stayed within the limit # reset state del self.lines[last_line:] self.current_line = current_line separator = separator.rstrip() if need_parens: self.write('(') self.write_newline() with self.add_indentation(): num_nodes = len(nodes) for i, node in enumerate(nodes): self.write_indentation() self.visit(node) if final_separator_if_multiline or i < num_nodes - 1: self.write(separator) self.write_newline() self.write_indentation() if need_parens: self.write(')')
[ "def", "write_expression_list", "(", "self", ",", "nodes", ",", "separator", "=", "', '", ",", "allow_newlines", "=", "True", ",", "need_parens", "=", "True", ",", "final_separator_if_multiline", "=", "True", ")", ":", "first", "=", "True", "last_line", "=", ...
Writes a list of nodes, separated by separator. If allow_newlines, will write the expression over multiple lines if necessary to say within max_line_length. If need_parens, will surround the expression with parentheses in this case. If final_separator_if_multiline, will write a separator at the end of the list if it is divided over multiple lines.
[ "Writes", "a", "list", "of", "nodes", "separated", "by", "separator", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/vendor/ast_decompiler.py#L164-L208
kylef/refract.py
refract/elements/base.py
Element.defract
def defract(self): """ Returns the underlying (unrefracted) value of element >>> Element(content='Hello').defract 'Hello' >>> Element(content=Element(content='Hello')).defract 'Hello' >>> Element(content=[Element(content='Hello')]).defract ['Hello'] """ from refract.elements.object import Object def get_value(item): if isinstance(item, KeyValuePair): return (get_value(item.key), get_value(item.value)) elif isinstance(item, list): return [get_value(element) for element in item] elif isinstance(item, Element): if isinstance(item, Object) or item.element == 'object': return dict(get_value(item.content)) return get_value(item.content) return item return get_value(self)
python
def defract(self): """ Returns the underlying (unrefracted) value of element >>> Element(content='Hello').defract 'Hello' >>> Element(content=Element(content='Hello')).defract 'Hello' >>> Element(content=[Element(content='Hello')]).defract ['Hello'] """ from refract.elements.object import Object def get_value(item): if isinstance(item, KeyValuePair): return (get_value(item.key), get_value(item.value)) elif isinstance(item, list): return [get_value(element) for element in item] elif isinstance(item, Element): if isinstance(item, Object) or item.element == 'object': return dict(get_value(item.content)) return get_value(item.content) return item return get_value(self)
[ "def", "defract", "(", "self", ")", ":", "from", "refract", ".", "elements", ".", "object", "import", "Object", "def", "get_value", "(", "item", ")", ":", "if", "isinstance", "(", "item", ",", "KeyValuePair", ")", ":", "return", "(", "get_value", "(", ...
Returns the underlying (unrefracted) value of element >>> Element(content='Hello').defract 'Hello' >>> Element(content=Element(content='Hello')).defract 'Hello' >>> Element(content=[Element(content='Hello')]).defract ['Hello']
[ "Returns", "the", "underlying", "(", "unrefracted", ")", "value", "of", "element" ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/elements/base.py#L104-L132
kylef/refract.py
refract/elements/base.py
Element.children
def children(self): """ Returns all of the children elements. """ if isinstance(self.content, list): return self.content elif isinstance(self.content, Element): return [self.content] else: return []
python
def children(self): """ Returns all of the children elements. """ if isinstance(self.content, list): return self.content elif isinstance(self.content, Element): return [self.content] else: return []
[ "def", "children", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "content", ",", "list", ")", ":", "return", "self", ".", "content", "elif", "isinstance", "(", "self", ".", "content", ",", "Element", ")", ":", "return", "[", "self", "....
Returns all of the children elements.
[ "Returns", "all", "of", "the", "children", "elements", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/elements/base.py#L193-L203
kylef/refract.py
refract/elements/base.py
Element.recursive_children
def recursive_children(self): """ Generator returning all recursive children elements. """ for child in self.children: yield child for recursive_child in child.recursive_children: yield recursive_child
python
def recursive_children(self): """ Generator returning all recursive children elements. """ for child in self.children: yield child for recursive_child in child.recursive_children: yield recursive_child
[ "def", "recursive_children", "(", "self", ")", ":", "for", "child", "in", "self", ".", "children", ":", "yield", "child", "for", "recursive_child", "in", "child", ".", "recursive_children", ":", "yield", "recursive_child" ]
Generator returning all recursive children elements.
[ "Generator", "returning", "all", "recursive", "children", "elements", "." ]
train
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/elements/base.py#L206-L215
jmvrbanac/Specter
specter/expect.py
expect
def expect(obj, caller_args=[]): """Primary method for test assertions in Specter :param obj: The evaluated target object :param caller_args: Is only used when using expecting a raised Exception """ line, module = get_module_and_line('__spec__') src_params = ExpectParams(line, module) expect_obj = ExpectAssert( obj, src_params=src_params, caller_args=caller_args ) _add_expect_to_wrapper(expect_obj) return expect_obj
python
def expect(obj, caller_args=[]): """Primary method for test assertions in Specter :param obj: The evaluated target object :param caller_args: Is only used when using expecting a raised Exception """ line, module = get_module_and_line('__spec__') src_params = ExpectParams(line, module) expect_obj = ExpectAssert( obj, src_params=src_params, caller_args=caller_args ) _add_expect_to_wrapper(expect_obj) return expect_obj
[ "def", "expect", "(", "obj", ",", "caller_args", "=", "[", "]", ")", ":", "line", ",", "module", "=", "get_module_and_line", "(", "'__spec__'", ")", "src_params", "=", "ExpectParams", "(", "line", ",", "module", ")", "expect_obj", "=", "ExpectAssert", "(",...
Primary method for test assertions in Specter :param obj: The evaluated target object :param caller_args: Is only used when using expecting a raised Exception
[ "Primary", "method", "for", "test", "assertions", "in", "Specter" ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/expect.py#L202-L217
jmvrbanac/Specter
specter/expect.py
require
def require(obj, caller_args=[]): """Primary method for test assertions in Specter :param obj: The evaluated target object :param caller_args: Is only used when using expecting a raised Exception """ line, module = get_module_and_line('__spec__') src_params = ExpectParams(line, module) require_obj = RequireAssert( obj, src_params=src_params, caller_args=caller_args ) _add_expect_to_wrapper(require_obj) return require_obj
python
def require(obj, caller_args=[]): """Primary method for test assertions in Specter :param obj: The evaluated target object :param caller_args: Is only used when using expecting a raised Exception """ line, module = get_module_and_line('__spec__') src_params = ExpectParams(line, module) require_obj = RequireAssert( obj, src_params=src_params, caller_args=caller_args ) _add_expect_to_wrapper(require_obj) return require_obj
[ "def", "require", "(", "obj", ",", "caller_args", "=", "[", "]", ")", ":", "line", ",", "module", "=", "get_module_and_line", "(", "'__spec__'", ")", "src_params", "=", "ExpectParams", "(", "line", ",", "module", ")", "require_obj", "=", "RequireAssert", "...
Primary method for test assertions in Specter :param obj: The evaluated target object :param caller_args: Is only used when using expecting a raised Exception
[ "Primary", "method", "for", "test", "assertions", "in", "Specter" ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/expect.py#L220-L235
jmvrbanac/Specter
specter/expect.py
skip
def skip(reason): """The skip decorator allows for you to always bypass a test. :param reason: Expects a string """ def decorator(test_func): if not isinstance(test_func, (type, ClassObjType)): func_data = None if test_func.__name__ == 'DECORATOR_ONCALL': # Call down and save the results func_data = test_func() @functools.wraps(test_func) def skip_wrapper(*args, **kwargs): other_data = { 'real_func': func_data[0] if func_data else test_func, 'metadata': func_data[1] if func_data else None } raise TestSkippedException(test_func, reason, other_data) test_func = skip_wrapper return test_func return decorator
python
def skip(reason): """The skip decorator allows for you to always bypass a test. :param reason: Expects a string """ def decorator(test_func): if not isinstance(test_func, (type, ClassObjType)): func_data = None if test_func.__name__ == 'DECORATOR_ONCALL': # Call down and save the results func_data = test_func() @functools.wraps(test_func) def skip_wrapper(*args, **kwargs): other_data = { 'real_func': func_data[0] if func_data else test_func, 'metadata': func_data[1] if func_data else None } raise TestSkippedException(test_func, reason, other_data) test_func = skip_wrapper return test_func return decorator
[ "def", "skip", "(", "reason", ")", ":", "def", "decorator", "(", "test_func", ")", ":", "if", "not", "isinstance", "(", "test_func", ",", "(", "type", ",", "ClassObjType", ")", ")", ":", "func_data", "=", "None", "if", "test_func", ".", "__name__", "==...
The skip decorator allows for you to always bypass a test. :param reason: Expects a string
[ "The", "skip", "decorator", "allows", "for", "you", "to", "always", "bypass", "a", "test", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/expect.py#L238-L259
jmvrbanac/Specter
specter/expect.py
skip_if
def skip_if(condition, reason=None): """The skip_if decorator allows for you to bypass a test on conditions :param condition: Expects a boolean :param reason: Expects a string """ if condition: return skip(reason) def wrapper(func): return func return wrapper
python
def skip_if(condition, reason=None): """The skip_if decorator allows for you to bypass a test on conditions :param condition: Expects a boolean :param reason: Expects a string """ if condition: return skip(reason) def wrapper(func): return func return wrapper
[ "def", "skip_if", "(", "condition", ",", "reason", "=", "None", ")", ":", "if", "condition", ":", "return", "skip", "(", "reason", ")", "def", "wrapper", "(", "func", ")", ":", "return", "func", "return", "wrapper" ]
The skip_if decorator allows for you to bypass a test on conditions :param condition: Expects a boolean :param reason: Expects a string
[ "The", "skip_if", "decorator", "allows", "for", "you", "to", "bypass", "a", "test", "on", "conditions" ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/expect.py#L262-L273
jmvrbanac/Specter
specter/expect.py
incomplete
def incomplete(test_func): """The incomplete decorator behaves much like a normal skip; however, tests that are marked as incomplete get tracked under a different metric. This allows for you to create a skeleton around all of your features and specifications, and track what tests have been written and what tests are left outstanding. .. code-block:: python # Example of using the incomplete decorator @incomplete def it_should_do_something(self): pass """ if not isinstance(test_func, (type, ClassObjType)): @functools.wraps(test_func) def skip_wrapper(*args, **kwargs): raise TestIncompleteException(test_func, _('Test is incomplete')) return skip_wrapper
python
def incomplete(test_func): """The incomplete decorator behaves much like a normal skip; however, tests that are marked as incomplete get tracked under a different metric. This allows for you to create a skeleton around all of your features and specifications, and track what tests have been written and what tests are left outstanding. .. code-block:: python # Example of using the incomplete decorator @incomplete def it_should_do_something(self): pass """ if not isinstance(test_func, (type, ClassObjType)): @functools.wraps(test_func) def skip_wrapper(*args, **kwargs): raise TestIncompleteException(test_func, _('Test is incomplete')) return skip_wrapper
[ "def", "incomplete", "(", "test_func", ")", ":", "if", "not", "isinstance", "(", "test_func", ",", "(", "type", ",", "ClassObjType", ")", ")", ":", "@", "functools", ".", "wraps", "(", "test_func", ")", "def", "skip_wrapper", "(", "*", "args", ",", "*"...
The incomplete decorator behaves much like a normal skip; however, tests that are marked as incomplete get tracked under a different metric. This allows for you to create a skeleton around all of your features and specifications, and track what tests have been written and what tests are left outstanding. .. code-block:: python # Example of using the incomplete decorator @incomplete def it_should_do_something(self): pass
[ "The", "incomplete", "decorator", "behaves", "much", "like", "a", "normal", "skip", ";", "however", "tests", "that", "are", "marked", "as", "incomplete", "get", "tracked", "under", "a", "different", "metric", ".", "This", "allows", "for", "you", "to", "creat...
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/expect.py#L276-L294
jmvrbanac/Specter
specter/expect.py
metadata
def metadata(**key_value_pairs): """The metadata decorator allows for you to tag specific tests with key/value data for run-time processing or reporting. The common use case is to use metadata to tag a test as a positive or negative test type. .. code-block:: python # Example of using the metadata decorator @metadata(type='negative') def it_shouldnt_do_something(self): pass """ def onTestFunc(func): def DECORATOR_ONCALL(*args, **kwargs): return (func, key_value_pairs) return DECORATOR_ONCALL return onTestFunc
python
def metadata(**key_value_pairs): """The metadata decorator allows for you to tag specific tests with key/value data for run-time processing or reporting. The common use case is to use metadata to tag a test as a positive or negative test type. .. code-block:: python # Example of using the metadata decorator @metadata(type='negative') def it_shouldnt_do_something(self): pass """ def onTestFunc(func): def DECORATOR_ONCALL(*args, **kwargs): return (func, key_value_pairs) return DECORATOR_ONCALL return onTestFunc
[ "def", "metadata", "(", "*", "*", "key_value_pairs", ")", ":", "def", "onTestFunc", "(", "func", ")", ":", "def", "DECORATOR_ONCALL", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "(", "func", ",", "key_value_pairs", ")", "return", "DE...
The metadata decorator allows for you to tag specific tests with key/value data for run-time processing or reporting. The common use case is to use metadata to tag a test as a positive or negative test type. .. code-block:: python # Example of using the metadata decorator @metadata(type='negative') def it_shouldnt_do_something(self): pass
[ "The", "metadata", "decorator", "allows", "for", "you", "to", "tag", "specific", "tests", "with", "key", "/", "value", "data", "for", "run", "-", "time", "processing", "or", "reporting", ".", "The", "common", "use", "case", "is", "to", "use", "metadata", ...
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/expect.py#L297-L313
jmvrbanac/Specter
specter/expect.py
ExpectAssert.serialize
def serialize(self): """Serializes the ExpectAssert object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed. """ converted_dict = { 'success': self.success, 'assertion': str(self), 'required': self.required } return converted_dict
python
def serialize(self): """Serializes the ExpectAssert object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed. """ converted_dict = { 'success': self.success, 'assertion': str(self), 'required': self.required } return converted_dict
[ "def", "serialize", "(", "self", ")", ":", "converted_dict", "=", "{", "'success'", ":", "self", ".", "success", ",", "'assertion'", ":", "str", "(", "self", ")", ",", "'required'", ":", "self", ".", "required", "}", "return", "converted_dict" ]
Serializes the ExpectAssert object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed.
[ "Serializes", "the", "ExpectAssert", "object", "for", "collection", "." ]
train
https://github.com/jmvrbanac/Specter/blob/1f5a729b0aa16242add8c1c754efa268335e3944/specter/expect.py#L41-L53
meraki-analytics/datapipelines-python
datapipelines/sinks.py
DataSink.accepts
def accepts(self): # type: Union[Iterable[Type[T]], Type[Any]] """The types of objects the data sink can store.""" types = set() any_dispatch = False try: types.update(getattr(self.__class__, "put")._accepts) any_dispatch = True except AttributeError: pass try: types.update(getattr(self.__class__, "put_many")._accepts) any_dispatch = True except AttributeError: pass return types if any_dispatch else TYPE_WILDCARD
python
def accepts(self): # type: Union[Iterable[Type[T]], Type[Any]] """The types of objects the data sink can store.""" types = set() any_dispatch = False try: types.update(getattr(self.__class__, "put")._accepts) any_dispatch = True except AttributeError: pass try: types.update(getattr(self.__class__, "put_many")._accepts) any_dispatch = True except AttributeError: pass return types if any_dispatch else TYPE_WILDCARD
[ "def", "accepts", "(", "self", ")", ":", "# type: Union[Iterable[Type[T]], Type[Any]]", "types", "=", "set", "(", ")", "any_dispatch", "=", "False", "try", ":", "types", ".", "update", "(", "getattr", "(", "self", ".", "__class__", ",", "\"put\"", ")", ".", ...
The types of objects the data sink can store.
[ "The", "types", "of", "objects", "the", "data", "sink", "can", "store", "." ]
train
https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/sinks.py#L16-L30
meraki-analytics/datapipelines-python
datapipelines/sinks.py
DataSink.put_many
def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext = None) -> None: """Puts multiple objects of the same type into the data sink. Args: type: The type of the objects being inserted. items: The objects to be inserted. context: The context of the insertion (mutable). """ pass
python
def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext = None) -> None: """Puts multiple objects of the same type into the data sink. Args: type: The type of the objects being inserted. items: The objects to be inserted. context: The context of the insertion (mutable). """ pass
[ "def", "put_many", "(", "self", ",", "type", ":", "Type", "[", "T", "]", ",", "items", ":", "Iterable", "[", "T", "]", ",", "context", ":", "PipelineContext", "=", "None", ")", "->", "None", ":", "pass" ]
Puts multiple objects of the same type into the data sink. Args: type: The type of the objects being inserted. items: The objects to be inserted. context: The context of the insertion (mutable).
[ "Puts", "multiple", "objects", "of", "the", "same", "type", "into", "the", "data", "sink", "." ]
train
https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/sinks.py#L44-L52
internetarchive/warc
warc/__init__.py
open
def open(filename, mode="rb", format = None): """Shorthand for WARCFile(filename, mode). Auto detects file and opens it. """ if format == "auto" or format == None: format = detect_format(filename) if format == "warc": return WARCFile(filename, mode) elif format == "arc": return ARCFile(filename, mode) else: raise IOError("Don't know how to open '%s' files"%format)
python
def open(filename, mode="rb", format = None): """Shorthand for WARCFile(filename, mode). Auto detects file and opens it. """ if format == "auto" or format == None: format = detect_format(filename) if format == "warc": return WARCFile(filename, mode) elif format == "arc": return ARCFile(filename, mode) else: raise IOError("Don't know how to open '%s' files"%format)
[ "def", "open", "(", "filename", ",", "mode", "=", "\"rb\"", ",", "format", "=", "None", ")", ":", "if", "format", "==", "\"auto\"", "or", "format", "==", "None", ":", "format", "=", "detect_format", "(", "filename", ")", "if", "format", "==", "\"warc\"...
Shorthand for WARCFile(filename, mode). Auto detects file and opens it.
[ "Shorthand", "for", "WARCFile", "(", "filename", "mode", ")", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/__init__.py#L24-L38
internetarchive/warc
warc/warc.py
WARCHeader.init_defaults
def init_defaults(self): """Initializes important headers to default values, if not already specified. The WARC-Record-ID header is set to a newly generated UUID. The WARC-Date header is set to the current datetime. The Content-Type is set based on the WARC-Type header. The Content-Length is initialized to 0. """ if "WARC-Record-ID" not in self: self['WARC-Record-ID'] = "<urn:uuid:%s>" % uuid.uuid1() if "WARC-Date" not in self: self['WARC-Date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') if "Content-Type" not in self: self['Content-Type'] = WARCHeader.CONTENT_TYPES.get(self.type, "application/octet-stream")
python
def init_defaults(self): """Initializes important headers to default values, if not already specified. The WARC-Record-ID header is set to a newly generated UUID. The WARC-Date header is set to the current datetime. The Content-Type is set based on the WARC-Type header. The Content-Length is initialized to 0. """ if "WARC-Record-ID" not in self: self['WARC-Record-ID'] = "<urn:uuid:%s>" % uuid.uuid1() if "WARC-Date" not in self: self['WARC-Date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') if "Content-Type" not in self: self['Content-Type'] = WARCHeader.CONTENT_TYPES.get(self.type, "application/octet-stream")
[ "def", "init_defaults", "(", "self", ")", ":", "if", "\"WARC-Record-ID\"", "not", "in", "self", ":", "self", "[", "'WARC-Record-ID'", "]", "=", "\"<urn:uuid:%s>\"", "%", "uuid", ".", "uuid1", "(", ")", "if", "\"WARC-Date\"", "not", "in", "self", ":", "self...
Initializes important headers to default values, if not already specified. The WARC-Record-ID header is set to a newly generated UUID. The WARC-Date header is set to the current datetime. The Content-Type is set based on the WARC-Type header. The Content-Length is initialized to 0.
[ "Initializes", "important", "headers", "to", "default", "values", "if", "not", "already", "specified", ".", "The", "WARC", "-", "Record", "-", "ID", "header", "is", "set", "to", "a", "newly", "generated", "UUID", ".", "The", "WARC", "-", "Date", "header", ...
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L75-L88
internetarchive/warc
warc/warc.py
WARCHeader.write_to
def write_to(self, f): """Writes this header to a file, in the format specified by WARC. """ f.write(self.version + "\r\n") for name, value in self.items(): name = name.title() # Use standard forms for commonly used patterns name = name.replace("Warc-", "WARC-").replace("-Ip-", "-IP-").replace("-Id", "-ID").replace("-Uri", "-URI") f.write(name) f.write(": ") f.write(value) f.write("\r\n") # Header ends with an extra CRLF f.write("\r\n")
python
def write_to(self, f): """Writes this header to a file, in the format specified by WARC. """ f.write(self.version + "\r\n") for name, value in self.items(): name = name.title() # Use standard forms for commonly used patterns name = name.replace("Warc-", "WARC-").replace("-Ip-", "-IP-").replace("-Id", "-ID").replace("-Uri", "-URI") f.write(name) f.write(": ") f.write(value) f.write("\r\n") # Header ends with an extra CRLF f.write("\r\n")
[ "def", "write_to", "(", "self", ",", "f", ")", ":", "f", ".", "write", "(", "self", ".", "version", "+", "\"\\r\\n\"", ")", "for", "name", ",", "value", "in", "self", ".", "items", "(", ")", ":", "name", "=", "name", ".", "title", "(", ")", "# ...
Writes this header to a file, in the format specified by WARC.
[ "Writes", "this", "header", "to", "a", "file", "in", "the", "format", "specified", "by", "WARC", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L90-L104
internetarchive/warc
warc/warc.py
WARCRecord.from_response
def from_response(response): """Creates a WARCRecord from given response object. This must be called before reading the response. The response can be read after this method is called. :param response: An instance of :class:`requests.models.Response`. """ # Get the httplib.HTTPResponse object http_response = response.raw._original_response # HTTP status line, headers and body as strings status_line = "HTTP/1.1 %d %s" % (http_response.status, http_response.reason) headers = str(http_response.msg) body = http_response.read() # Monkey-patch the response object so that it is possible to read from it later. response.raw._fp = StringIO(body) # Build the payload to create warc file. payload = status_line + "\r\n" + headers + "\r\n" + body headers = { "WARC-Type": "response", "WARC-Target-URI": response.request.full_url.encode('utf-8') } return WARCRecord(payload=payload, headers=headers)
python
def from_response(response): """Creates a WARCRecord from given response object. This must be called before reading the response. The response can be read after this method is called. :param response: An instance of :class:`requests.models.Response`. """ # Get the httplib.HTTPResponse object http_response = response.raw._original_response # HTTP status line, headers and body as strings status_line = "HTTP/1.1 %d %s" % (http_response.status, http_response.reason) headers = str(http_response.msg) body = http_response.read() # Monkey-patch the response object so that it is possible to read from it later. response.raw._fp = StringIO(body) # Build the payload to create warc file. payload = status_line + "\r\n" + headers + "\r\n" + body headers = { "WARC-Type": "response", "WARC-Target-URI": response.request.full_url.encode('utf-8') } return WARCRecord(payload=payload, headers=headers)
[ "def", "from_response", "(", "response", ")", ":", "# Get the httplib.HTTPResponse object", "http_response", "=", "response", ".", "raw", ".", "_original_response", "# HTTP status line, headers and body as strings", "status_line", "=", "\"HTTP/1.1 %d %s\"", "%", "(", "http_re...
Creates a WARCRecord from given response object. This must be called before reading the response. The response can be read after this method is called. :param response: An instance of :class:`requests.models.Response`.
[ "Creates", "a", "WARCRecord", "from", "given", "response", "object", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L216-L242
internetarchive/warc
warc/warc.py
WARCFile.write_record
def write_record(self, warc_record): """Adds a warc record to this WARC file. """ warc_record.write_to(self.fileobj) # Each warc record is written as separate member in the gzip file # so that each record can be read independetly. if isinstance(self.fileobj, gzip2.GzipFile): self.fileobj.close_member()
python
def write_record(self, warc_record): """Adds a warc record to this WARC file. """ warc_record.write_to(self.fileobj) # Each warc record is written as separate member in the gzip file # so that each record can be read independetly. if isinstance(self.fileobj, gzip2.GzipFile): self.fileobj.close_member()
[ "def", "write_record", "(", "self", ",", "warc_record", ")", ":", "warc_record", ".", "write_to", "(", "self", ".", "fileobj", ")", "# Each warc record is written as separate member in the gzip file", "# so that each record can be read independetly.", "if", "isinstance", "(",...
Adds a warc record to this WARC file.
[ "Adds", "a", "warc", "record", "to", "this", "WARC", "file", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L265-L272
internetarchive/warc
warc/warc.py
WARCFile.browse
def browse(self): """Utility to browse through the records in the warc file. This returns an iterator over (record, offset, size) for each record in the file. If the file is gzip compressed, the offset and size will corresponds to the compressed file. The payload of each record is limited to 1MB to keep memory consumption under control. """ offset = 0 for record in self.reader: # Just read the first 1MB of the payload. # This will make sure memory consuption is under control and it # is possible to look at the first MB of the payload, which is # typically sufficient to read http headers in the payload. record.payload = StringIO(record.payload.read(1024*1024)) self.reader.finish_reading_current_record() next_offset = self.tell() yield record, offset, next_offset-offset offset = next_offset
python
def browse(self): """Utility to browse through the records in the warc file. This returns an iterator over (record, offset, size) for each record in the file. If the file is gzip compressed, the offset and size will corresponds to the compressed file. The payload of each record is limited to 1MB to keep memory consumption under control. """ offset = 0 for record in self.reader: # Just read the first 1MB of the payload. # This will make sure memory consuption is under control and it # is possible to look at the first MB of the payload, which is # typically sufficient to read http headers in the payload. record.payload = StringIO(record.payload.read(1024*1024)) self.reader.finish_reading_current_record() next_offset = self.tell() yield record, offset, next_offset-offset offset = next_offset
[ "def", "browse", "(", "self", ")", ":", "offset", "=", "0", "for", "record", "in", "self", ".", "reader", ":", "# Just read the first 1MB of the payload.", "# This will make sure memory consuption is under control and it ", "# is possible to look at the first MB of the payload, w...
Utility to browse through the records in the warc file. This returns an iterator over (record, offset, size) for each record in the file. If the file is gzip compressed, the offset and size will corresponds to the compressed file. The payload of each record is limited to 1MB to keep memory consumption under control.
[ "Utility", "to", "browse", "through", "the", "records", "in", "the", "warc", "file", ".", "This", "returns", "an", "iterator", "over", "(", "record", "offset", "size", ")", "for", "each", "record", "in", "the", "file", ".", "If", "the", "file", "is", "...
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L284-L304
internetarchive/warc
warc/warc.py
WARCFile.tell
def tell(self): """Returns the file offset. If this is a compressed file, then the offset in the compressed file is returned. """ if isinstance(self.fileobj, gzip2.GzipFile): return self.fileobj.fileobj.tell() else: return self.fileobj.tell()
python
def tell(self): """Returns the file offset. If this is a compressed file, then the offset in the compressed file is returned. """ if isinstance(self.fileobj, gzip2.GzipFile): return self.fileobj.fileobj.tell() else: return self.fileobj.tell()
[ "def", "tell", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "fileobj", ",", "gzip2", ".", "GzipFile", ")", ":", "return", "self", ".", "fileobj", ".", "fileobj", ".", "tell", "(", ")", "else", ":", "return", "self", ".", "fileobj", ...
Returns the file offset. If this is a compressed file, then the offset in the compressed file is returned.
[ "Returns", "the", "file", "offset", ".", "If", "this", "is", "a", "compressed", "file", "then", "the", "offset", "in", "the", "compressed", "file", "is", "returned", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/warc.py#L306-L313
internetarchive/warc
warc/gzip2.py
GzipFile.close_member
def close_member(self): """Closes the current member being written. """ # The new member is not yet started, no need to close if self._new_member: return self.fileobj.write(self.compress.flush()) write32u(self.fileobj, self.crc) # self.size may exceed 2GB, or even 4GB write32u(self.fileobj, self.size & 0xffffffffL) self.size = 0 self.compress = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0) self._new_member = True
python
def close_member(self): """Closes the current member being written. """ # The new member is not yet started, no need to close if self._new_member: return self.fileobj.write(self.compress.flush()) write32u(self.fileobj, self.crc) # self.size may exceed 2GB, or even 4GB write32u(self.fileobj, self.size & 0xffffffffL) self.size = 0 self.compress = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0) self._new_member = True
[ "def", "close_member", "(", "self", ")", ":", "# The new member is not yet started, no need to close", "if", "self", ".", "_new_member", ":", "return", "self", ".", "fileobj", ".", "write", "(", "self", ".", "compress", ".", "flush", "(", ")", ")", "write32u", ...
Closes the current member being written.
[ "Closes", "the", "current", "member", "being", "written", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L42-L59
internetarchive/warc
warc/gzip2.py
GzipFile._start_member
def _start_member(self): """Starts writing a new member if required. """ if self._new_member: self._init_write(self.name) self._write_gzip_header() self._new_member = False
python
def _start_member(self): """Starts writing a new member if required. """ if self._new_member: self._init_write(self.name) self._write_gzip_header() self._new_member = False
[ "def", "_start_member", "(", "self", ")", ":", "if", "self", ".", "_new_member", ":", "self", ".", "_init_write", "(", "self", ".", "name", ")", "self", ".", "_write_gzip_header", "(", ")", "self", ".", "_new_member", "=", "False" ]
Starts writing a new member if required.
[ "Starts", "writing", "a", "new", "member", "if", "required", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L61-L67
internetarchive/warc
warc/gzip2.py
GzipFile.close
def close(self): """Closes the gzip with care to handle multiple members. """ if self.fileobj is None: return if self.mode == WRITE: self.close_member() self.fileobj = None elif self.mode == READ: self.fileobj = None if self.myfileobj: self.myfileobj.close() self.myfileobj = None
python
def close(self): """Closes the gzip with care to handle multiple members. """ if self.fileobj is None: return if self.mode == WRITE: self.close_member() self.fileobj = None elif self.mode == READ: self.fileobj = None if self.myfileobj: self.myfileobj.close() self.myfileobj = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "fileobj", "is", "None", ":", "return", "if", "self", ".", "mode", "==", "WRITE", ":", "self", ".", "close_member", "(", ")", "self", ".", "fileobj", "=", "None", "elif", "self", ".", "mode"...
Closes the gzip with care to handle multiple members.
[ "Closes", "the", "gzip", "with", "care", "to", "handle", "multiple", "members", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L73-L86
internetarchive/warc
warc/gzip2.py
GzipFile.read_member
def read_member(self): """Returns a file-like object to read one member from the gzip file. """ if self._member_lock is False: self._member_lock = True if self._new_member: try: # Read one byte to move to the next member BaseGzipFile._read(self, 1) assert self._new_member is False except EOFError: return None return self
python
def read_member(self): """Returns a file-like object to read one member from the gzip file. """ if self._member_lock is False: self._member_lock = True if self._new_member: try: # Read one byte to move to the next member BaseGzipFile._read(self, 1) assert self._new_member is False except EOFError: return None return self
[ "def", "read_member", "(", "self", ")", ":", "if", "self", ".", "_member_lock", "is", "False", ":", "self", ".", "_member_lock", "=", "True", "if", "self", ".", "_new_member", ":", "try", ":", "# Read one byte to move to the next member", "BaseGzipFile", ".", ...
Returns a file-like object to read one member from the gzip file.
[ "Returns", "a", "file", "-", "like", "object", "to", "read", "one", "member", "from", "the", "gzip", "file", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L95-L109
internetarchive/warc
warc/gzip2.py
GzipFile.write_member
def write_member(self, data): """Writes the given data as one gzip member. The data can be a string, an iterator that gives strings or a file-like object. """ if isinstance(data, basestring): self.write(data) else: for text in data: self.write(text) self.close_member()
python
def write_member(self, data): """Writes the given data as one gzip member. The data can be a string, an iterator that gives strings or a file-like object. """ if isinstance(data, basestring): self.write(data) else: for text in data: self.write(text) self.close_member()
[ "def", "write_member", "(", "self", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "basestring", ")", ":", "self", ".", "write", "(", "data", ")", "else", ":", "for", "text", "in", "data", ":", "self", ".", "write", "(", "text", ")", ...
Writes the given data as one gzip member. The data can be a string, an iterator that gives strings or a file-like object.
[ "Writes", "the", "given", "data", "as", "one", "gzip", "member", ".", "The", "data", "can", "be", "a", "string", "an", "iterator", "that", "gives", "strings", "or", "a", "file", "-", "like", "object", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/gzip2.py#L111-L121
internetarchive/warc
warc/arc.py
ARCHeader.write_to
def write_to(self, f, version = None): """ Writes out the arc header to the file like object `f`. If the version field is 1, it writes out an arc v1 header, otherwise (and this is default), it outputs a v2 header. """ if not version: version = self.version if version == 1: header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(length)s" elif version == 2: header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(result_code)s %(checksum)s %(location)s %(offset)s %(filename)s %(length)s" header = header%dict(url = self['url'], ip_address = self['ip_address'], date = self['date'], content_type = self['content_type'], result_code = self['result_code'], checksum = self['checksum'], location = self['location'], offset = self['offset'], filename = self['filename'], length = self['length']) f.write(header)
python
def write_to(self, f, version = None): """ Writes out the arc header to the file like object `f`. If the version field is 1, it writes out an arc v1 header, otherwise (and this is default), it outputs a v2 header. """ if not version: version = self.version if version == 1: header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(length)s" elif version == 2: header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(result_code)s %(checksum)s %(location)s %(offset)s %(filename)s %(length)s" header = header%dict(url = self['url'], ip_address = self['ip_address'], date = self['date'], content_type = self['content_type'], result_code = self['result_code'], checksum = self['checksum'], location = self['location'], offset = self['offset'], filename = self['filename'], length = self['length']) f.write(header)
[ "def", "write_to", "(", "self", ",", "f", ",", "version", "=", "None", ")", ":", "if", "not", "version", ":", "version", "=", "self", ".", "version", "if", "version", "==", "1", ":", "header", "=", "\"%(url)s %(ip_address)s %(date)s %(content_type)s %(length)s...
Writes out the arc header to the file like object `f`. If the version field is 1, it writes out an arc v1 header, otherwise (and this is default), it outputs a v2 header.
[ "Writes", "out", "the", "arc", "header", "to", "the", "file", "like", "object", "f", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L69-L94
internetarchive/warc
warc/arc.py
ARCRecord.from_string
def from_string(cls, string, version): """ Constructs an ARC record from a string and returns it. TODO: It might be best to merge this with the _read_arc_record function rather than reimplement the functionality here. """ header, payload = string.split("\n",1) if payload[0] == '\n': # There's an extra payload = payload[1:] if int(version) == 1: arc_header_re = ARC1_HEADER_RE elif int(version) == 2: arc_header_re = ARC2_HEADER_RE matches = arc_header_re.search(header) headers = matches.groupdict() arc_header = ARCHeader(**headers) return cls(header = arc_header, payload = payload, version = version)
python
def from_string(cls, string, version): """ Constructs an ARC record from a string and returns it. TODO: It might be best to merge this with the _read_arc_record function rather than reimplement the functionality here. """ header, payload = string.split("\n",1) if payload[0] == '\n': # There's an extra payload = payload[1:] if int(version) == 1: arc_header_re = ARC1_HEADER_RE elif int(version) == 2: arc_header_re = ARC2_HEADER_RE matches = arc_header_re.search(header) headers = matches.groupdict() arc_header = ARCHeader(**headers) return cls(header = arc_header, payload = payload, version = version)
[ "def", "from_string", "(", "cls", ",", "string", ",", "version", ")", ":", "header", ",", "payload", "=", "string", ".", "split", "(", "\"\\n\"", ",", "1", ")", "if", "payload", "[", "0", "]", "==", "'\\n'", ":", "# There's an extra", "payload", "=", ...
Constructs an ARC record from a string and returns it. TODO: It might be best to merge this with the _read_arc_record function rather than reimplement the functionality here.
[ "Constructs", "an", "ARC", "record", "from", "a", "string", "and", "returns", "it", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L161-L179
internetarchive/warc
warc/arc.py
ARCFile._write_header
def _write_header(self): "Writes out an ARC header" if "org" not in self.file_headers: warnings.warn("Using 'unknown' for Archiving organisation name") self.file_headers['org'] = "Unknown" if "date" not in self.file_headers: now = datetime.datetime.utcnow() warnings.warn("Using '%s' for Archiving time"%now) self.file_headers['date'] = now if "ip_address" not in self.file_headers: warnings.warn("Using '127.0.0.1' as IP address of machine that's archiving") self.file_headers['ip_address'] = "127.0.0.1" if self.version == 1: payload = "1 0 %(org)s\nURL IP-address Archive-date Content-type Archive-length"%dict(org = self.file_headers['org']) elif self.version == 2: payload = "2 0 %(org)s\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length" else: raise IOError("Can't write an ARC file with version '\"%s\"'"%self.version) fname = os.path.basename(self.fileobj.name) header = ARCHeader(url = "filedesc://%s"%fname, ip_address = self.file_headers['ip_address'], date = self.file_headers['date'], content_type = "text/plain", length = len(payload), result_code = "200", checksum = "-", location = "-", offset = str(self.fileobj.tell()), filename = fname) arc_file_header_record = ARCRecord(header, payload%self.file_headers) self.write(arc_file_header_record)
python
def _write_header(self): "Writes out an ARC header" if "org" not in self.file_headers: warnings.warn("Using 'unknown' for Archiving organisation name") self.file_headers['org'] = "Unknown" if "date" not in self.file_headers: now = datetime.datetime.utcnow() warnings.warn("Using '%s' for Archiving time"%now) self.file_headers['date'] = now if "ip_address" not in self.file_headers: warnings.warn("Using '127.0.0.1' as IP address of machine that's archiving") self.file_headers['ip_address'] = "127.0.0.1" if self.version == 1: payload = "1 0 %(org)s\nURL IP-address Archive-date Content-type Archive-length"%dict(org = self.file_headers['org']) elif self.version == 2: payload = "2 0 %(org)s\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length" else: raise IOError("Can't write an ARC file with version '\"%s\"'"%self.version) fname = os.path.basename(self.fileobj.name) header = ARCHeader(url = "filedesc://%s"%fname, ip_address = self.file_headers['ip_address'], date = self.file_headers['date'], content_type = "text/plain", length = len(payload), result_code = "200", checksum = "-", location = "-", offset = str(self.fileobj.tell()), filename = fname) arc_file_header_record = ARCRecord(header, payload%self.file_headers) self.write(arc_file_header_record)
[ "def", "_write_header", "(", "self", ")", ":", "if", "\"org\"", "not", "in", "self", ".", "file_headers", ":", "warnings", ".", "warn", "(", "\"Using 'unknown' for Archiving organisation name\"", ")", "self", ".", "file_headers", "[", "'org'", "]", "=", "\"Unkno...
Writes out an ARC header
[ "Writes", "out", "an", "ARC", "header" ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L264-L295
internetarchive/warc
warc/arc.py
ARCFile.write
def write(self, arc_record): "Writes out the given arc record to the file" if not self.version: self.version = 2 if not self.header_written: self.header_written = True self._write_header() arc_record.write_to(self.fileobj, self.version) self.fileobj.write("\n")
python
def write(self, arc_record): "Writes out the given arc record to the file" if not self.version: self.version = 2 if not self.header_written: self.header_written = True self._write_header() arc_record.write_to(self.fileobj, self.version) self.fileobj.write("\n")
[ "def", "write", "(", "self", ",", "arc_record", ")", ":", "if", "not", "self", ".", "version", ":", "self", ".", "version", "=", "2", "if", "not", "self", ".", "header_written", ":", "self", ".", "header_written", "=", "True", "self", ".", "_write_head...
Writes out the given arc record to the file
[ "Writes", "out", "the", "given", "arc", "record", "to", "the", "file" ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L297-L305
internetarchive/warc
warc/arc.py
ARCFile._read_file_header
def _read_file_header(self): """Reads out the file header for the arc file. If version was not provided, this will autopopulate it.""" header = self.fileobj.readline() payload1 = self.fileobj.readline() payload2 = self.fileobj.readline() version, reserved, organisation = payload1.split(None, 2) self.fileobj.readline() # Lose the separator newline self.header_read = True # print "--------------------------------------------------" # print header,"\n", payload1, "\n", payload2,"\n" # print "--------------------------------------------------" if self.version and int(self.version) != version: raise IOError("Version mismatch. Requested version was '%s' but version in file was '%s'"%(self.version, version)) if version == '1': url, ip_address, date, content_type, length = header.split() self.file_headers = {"ip_address" : ip_address, "date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"), "org" : organisation} self.version = 1 elif version == '2': url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split() self.file_headers = {"ip_address" : ip_address, "date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"), "org" : organisation} self.version = 2 else: raise IOError("Unknown ARC version '%s'"%version)
python
def _read_file_header(self): """Reads out the file header for the arc file. If version was not provided, this will autopopulate it.""" header = self.fileobj.readline() payload1 = self.fileobj.readline() payload2 = self.fileobj.readline() version, reserved, organisation = payload1.split(None, 2) self.fileobj.readline() # Lose the separator newline self.header_read = True # print "--------------------------------------------------" # print header,"\n", payload1, "\n", payload2,"\n" # print "--------------------------------------------------" if self.version and int(self.version) != version: raise IOError("Version mismatch. Requested version was '%s' but version in file was '%s'"%(self.version, version)) if version == '1': url, ip_address, date, content_type, length = header.split() self.file_headers = {"ip_address" : ip_address, "date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"), "org" : organisation} self.version = 1 elif version == '2': url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split() self.file_headers = {"ip_address" : ip_address, "date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"), "org" : organisation} self.version = 2 else: raise IOError("Unknown ARC version '%s'"%version)
[ "def", "_read_file_header", "(", "self", ")", ":", "header", "=", "self", ".", "fileobj", ".", "readline", "(", ")", "payload1", "=", "self", ".", "fileobj", ".", "readline", "(", ")", "payload2", "=", "self", ".", "fileobj", ".", "readline", "(", ")",...
Reads out the file header for the arc file. If version was not provided, this will autopopulate it.
[ "Reads", "out", "the", "file", "header", "for", "the", "arc", "file", ".", "If", "version", "was", "not", "provided", "this", "will", "autopopulate", "it", "." ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L307-L335
internetarchive/warc
warc/arc.py
ARCFile._read_arc_record
def _read_arc_record(self): "Reads out an arc record, formats it and returns it" #XXX:Noufal Stream payload here rather than just read it # r = self.fileobj.readline() # Drop the initial newline # if r == "": # return None # header = self.fileobj.readline() # Strip the initial new lines and read first line header = self.fileobj.readline() while header and header.strip() == "": header = self.fileobj.readline() if header == "": return None if int(self.version) == 1: arc_header_re = ARC1_HEADER_RE elif int(self.version) == 2: arc_header_re = ARC2_HEADER_RE matches = arc_header_re.search(header) headers = matches.groupdict() arc_header = ARCHeader(**headers) payload = self.fileobj.read(int(headers['length'])) self.fileobj.readline() # Munge the separator newline. return ARCRecord(header = arc_header, payload = payload)
python
def _read_arc_record(self): "Reads out an arc record, formats it and returns it" #XXX:Noufal Stream payload here rather than just read it # r = self.fileobj.readline() # Drop the initial newline # if r == "": # return None # header = self.fileobj.readline() # Strip the initial new lines and read first line header = self.fileobj.readline() while header and header.strip() == "": header = self.fileobj.readline() if header == "": return None if int(self.version) == 1: arc_header_re = ARC1_HEADER_RE elif int(self.version) == 2: arc_header_re = ARC2_HEADER_RE matches = arc_header_re.search(header) headers = matches.groupdict() arc_header = ARCHeader(**headers) payload = self.fileobj.read(int(headers['length'])) self.fileobj.readline() # Munge the separator newline. return ARCRecord(header = arc_header, payload = payload)
[ "def", "_read_arc_record", "(", "self", ")", ":", "#XXX:Noufal Stream payload here rather than just read it", "# r = self.fileobj.readline() # Drop the initial newline", "# if r == \"\":", "# return None", "# header = self.fileobj.readline()", "# Strip the initial new lines and read first ...
Reads out an arc record, formats it and returns it
[ "Reads", "out", "an", "arc", "record", "formats", "it", "and", "returns", "it" ]
train
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L337-L366
figo-connect/schwifty
schwifty/bic.py
BIC.from_bank_code
def from_bank_code(cls, country_code, bank_code): """Create a new BIC object from country- and bank-code. Examples: >>> bic = BIC.from_bank_code('DE', '20070000') >>> bic.country_code 'DE' >>> bic.bank_code 'DEUT' >>> bic.location_code 'HH' >>> BIC.from_bank_code('DE', '01010101') Traceback (most recent call last): ... ValueError: Invalid bank code '01010101' for country 'DE' Args: country_code (str): ISO 3166 alpha2 country-code. bank_code (str): Country specific bank-code. Returns: BIC: a BIC object generated from the given country code and bank code. Raises: ValueError: If the given bank code wasn't found in the registry Note: This currently only works for German bank-codes. """ try: return cls(registry.get('bank_code')[(country_code, bank_code)]['bic']) except KeyError: raise ValueError("Invalid bank code {!r} for country {!r}".format(bank_code, country_code))
python
def from_bank_code(cls, country_code, bank_code): """Create a new BIC object from country- and bank-code. Examples: >>> bic = BIC.from_bank_code('DE', '20070000') >>> bic.country_code 'DE' >>> bic.bank_code 'DEUT' >>> bic.location_code 'HH' >>> BIC.from_bank_code('DE', '01010101') Traceback (most recent call last): ... ValueError: Invalid bank code '01010101' for country 'DE' Args: country_code (str): ISO 3166 alpha2 country-code. bank_code (str): Country specific bank-code. Returns: BIC: a BIC object generated from the given country code and bank code. Raises: ValueError: If the given bank code wasn't found in the registry Note: This currently only works for German bank-codes. """ try: return cls(registry.get('bank_code')[(country_code, bank_code)]['bic']) except KeyError: raise ValueError("Invalid bank code {!r} for country {!r}".format(bank_code, country_code))
[ "def", "from_bank_code", "(", "cls", ",", "country_code", ",", "bank_code", ")", ":", "try", ":", "return", "cls", "(", "registry", ".", "get", "(", "'bank_code'", ")", "[", "(", "country_code", ",", "bank_code", ")", "]", "[", "'bic'", "]", ")", "exce...
Create a new BIC object from country- and bank-code. Examples: >>> bic = BIC.from_bank_code('DE', '20070000') >>> bic.country_code 'DE' >>> bic.bank_code 'DEUT' >>> bic.location_code 'HH' >>> BIC.from_bank_code('DE', '01010101') Traceback (most recent call last): ... ValueError: Invalid bank code '01010101' for country 'DE' Args: country_code (str): ISO 3166 alpha2 country-code. bank_code (str): Country specific bank-code. Returns: BIC: a BIC object generated from the given country code and bank code. Raises: ValueError: If the given bank code wasn't found in the registry Note: This currently only works for German bank-codes.
[ "Create", "a", "new", "BIC", "object", "from", "country", "-", "and", "bank", "-", "code", "." ]
train
https://github.com/figo-connect/schwifty/blob/69376fade070dbfdf89c57a0060bc290f7a744bb/schwifty/bic.py#L45-L80
figo-connect/schwifty
schwifty/bic.py
BIC.formatted
def formatted(self): """str: The BIC separated in the blocks bank-, country- and location-code.""" formatted = ' '.join([self.bank_code, self.country_code, self.location_code]) if self.branch_code: formatted += ' ' + self.branch_code return formatted
python
def formatted(self): """str: The BIC separated in the blocks bank-, country- and location-code.""" formatted = ' '.join([self.bank_code, self.country_code, self.location_code]) if self.branch_code: formatted += ' ' + self.branch_code return formatted
[ "def", "formatted", "(", "self", ")", ":", "formatted", "=", "' '", ".", "join", "(", "[", "self", ".", "bank_code", ",", "self", ".", "country_code", ",", "self", ".", "location_code", "]", ")", "if", "self", ".", "branch_code", ":", "formatted", "+="...
str: The BIC separated in the blocks bank-, country- and location-code.
[ "str", ":", "The", "BIC", "separated", "in", "the", "blocks", "bank", "-", "country", "-", "and", "location", "-", "code", "." ]
train
https://github.com/figo-connect/schwifty/blob/69376fade070dbfdf89c57a0060bc290f7a744bb/schwifty/bic.py#L104-L109
figo-connect/schwifty
schwifty/bic.py
BIC.country_bank_code
def country_bank_code(self): """str or None: The country specific bank-code associated with the BIC.""" entry = registry.get('bic').get(self.compact) if entry: return entry.get('bank_code')
python
def country_bank_code(self): """str or None: The country specific bank-code associated with the BIC.""" entry = registry.get('bic').get(self.compact) if entry: return entry.get('bank_code')
[ "def", "country_bank_code", "(", "self", ")", ":", "entry", "=", "registry", ".", "get", "(", "'bic'", ")", ".", "get", "(", "self", ".", "compact", ")", "if", "entry", ":", "return", "entry", ".", "get", "(", "'bank_code'", ")" ]
str or None: The country specific bank-code associated with the BIC.
[ "str", "or", "None", ":", "The", "country", "specific", "bank", "-", "code", "associated", "with", "the", "BIC", "." ]
train
https://github.com/figo-connect/schwifty/blob/69376fade070dbfdf89c57a0060bc290f7a744bb/schwifty/bic.py#L112-L116
figo-connect/schwifty
schwifty/bic.py
BIC.bank_name
def bank_name(self): """str or None: The name of the bank associated with the BIC.""" entry = registry.get('bic').get(self.compact) if entry: return entry.get('name')
python
def bank_name(self): """str or None: The name of the bank associated with the BIC.""" entry = registry.get('bic').get(self.compact) if entry: return entry.get('name')
[ "def", "bank_name", "(", "self", ")", ":", "entry", "=", "registry", ".", "get", "(", "'bic'", ")", ".", "get", "(", "self", ".", "compact", ")", "if", "entry", ":", "return", "entry", ".", "get", "(", "'name'", ")" ]
str or None: The name of the bank associated with the BIC.
[ "str", "or", "None", ":", "The", "name", "of", "the", "bank", "associated", "with", "the", "BIC", "." ]
train
https://github.com/figo-connect/schwifty/blob/69376fade070dbfdf89c57a0060bc290f7a744bb/schwifty/bic.py#L119-L123
figo-connect/schwifty
schwifty/bic.py
BIC.bank_short_name
def bank_short_name(self): """str or None: The short name of the bank associated with the BIC.""" entry = registry.get('bic').get(self.compact) if entry: return entry.get('short_name')
python
def bank_short_name(self): """str or None: The short name of the bank associated with the BIC.""" entry = registry.get('bic').get(self.compact) if entry: return entry.get('short_name')
[ "def", "bank_short_name", "(", "self", ")", ":", "entry", "=", "registry", ".", "get", "(", "'bic'", ")", ".", "get", "(", "self", ".", "compact", ")", "if", "entry", ":", "return", "entry", ".", "get", "(", "'short_name'", ")" ]
str or None: The short name of the bank associated with the BIC.
[ "str", "or", "None", ":", "The", "short", "name", "of", "the", "bank", "associated", "with", "the", "BIC", "." ]
train
https://github.com/figo-connect/schwifty/blob/69376fade070dbfdf89c57a0060bc290f7a744bb/schwifty/bic.py#L126-L130
figo-connect/schwifty
schwifty/iban.py
IBAN.generate
def generate(cls, country_code, bank_code, account_code): """Generate an IBAN from it's components. If the bank-code and/or account-number have less digits than required by their country specific representation, the respective component is padded with zeros. Examples: To generate an IBAN do the following:: >>> bank_code = '37040044' >>> account_code = '532013000' >>> iban = IBAN.generate('DE', bank_code, account_code) >>> iban.formatted 'DE89 3704 0044 0532 0130 00' Args: country_code (str): The ISO 3166 alpha-2 country code. bank_code (str): The country specific bank-code. account_code (str): The customer specific account-code. """ spec = _get_iban_spec(country_code) bank_code_length = code_length(spec, 'bank_code') branch_code_length = code_length(spec, 'branch_code') bank_and_branch_code_length = bank_code_length + branch_code_length account_code_length = code_length(spec, 'account_code') if len(bank_code) > bank_and_branch_code_length: raise ValueError( "Bank code exceeds maximum size {}".format(bank_and_branch_code_length)) if len(account_code) > account_code_length: raise ValueError( "Account code exceeds maximum size {}".format(account_code_length)) bank_code = bank_code.rjust(bank_and_branch_code_length, '0') account_code = account_code.rjust(account_code_length, '0') iban = country_code + '??' + bank_code + account_code return cls(iban)
python
def generate(cls, country_code, bank_code, account_code): """Generate an IBAN from it's components. If the bank-code and/or account-number have less digits than required by their country specific representation, the respective component is padded with zeros. Examples: To generate an IBAN do the following:: >>> bank_code = '37040044' >>> account_code = '532013000' >>> iban = IBAN.generate('DE', bank_code, account_code) >>> iban.formatted 'DE89 3704 0044 0532 0130 00' Args: country_code (str): The ISO 3166 alpha-2 country code. bank_code (str): The country specific bank-code. account_code (str): The customer specific account-code. """ spec = _get_iban_spec(country_code) bank_code_length = code_length(spec, 'bank_code') branch_code_length = code_length(spec, 'branch_code') bank_and_branch_code_length = bank_code_length + branch_code_length account_code_length = code_length(spec, 'account_code') if len(bank_code) > bank_and_branch_code_length: raise ValueError( "Bank code exceeds maximum size {}".format(bank_and_branch_code_length)) if len(account_code) > account_code_length: raise ValueError( "Account code exceeds maximum size {}".format(account_code_length)) bank_code = bank_code.rjust(bank_and_branch_code_length, '0') account_code = account_code.rjust(account_code_length, '0') iban = country_code + '??' + bank_code + account_code return cls(iban)
[ "def", "generate", "(", "cls", ",", "country_code", ",", "bank_code", ",", "account_code", ")", ":", "spec", "=", "_get_iban_spec", "(", "country_code", ")", "bank_code_length", "=", "code_length", "(", "spec", ",", "'bank_code'", ")", "branch_code_length", "=",...
Generate an IBAN from it's components. If the bank-code and/or account-number have less digits than required by their country specific representation, the respective component is padded with zeros. Examples: To generate an IBAN do the following:: >>> bank_code = '37040044' >>> account_code = '532013000' >>> iban = IBAN.generate('DE', bank_code, account_code) >>> iban.formatted 'DE89 3704 0044 0532 0130 00' Args: country_code (str): The ISO 3166 alpha-2 country code. bank_code (str): The country specific bank-code. account_code (str): The customer specific account-code.
[ "Generate", "an", "IBAN", "from", "it", "s", "components", "." ]
train
https://github.com/figo-connect/schwifty/blob/69376fade070dbfdf89c57a0060bc290f7a744bb/schwifty/iban.py#L75-L113
figo-connect/schwifty
schwifty/iban.py
IBAN.formatted
def formatted(self): """str: The IBAN formatted in blocks of 4 digits.""" return ' '.join(self.compact[i:i + 4] for i in range(0, len(self.compact), 4))
python
def formatted(self): """str: The IBAN formatted in blocks of 4 digits.""" return ' '.join(self.compact[i:i + 4] for i in range(0, len(self.compact), 4))
[ "def", "formatted", "(", "self", ")", ":", "return", "' '", ".", "join", "(", "self", ".", "compact", "[", "i", ":", "i", "+", "4", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "self", ".", "compact", ")", ",", "4", ")", ")" ]
str: The IBAN formatted in blocks of 4 digits.
[ "str", ":", "The", "IBAN", "formatted", "in", "blocks", "of", "4", "digits", "." ]
train
https://github.com/figo-connect/schwifty/blob/69376fade070dbfdf89c57a0060bc290f7a744bb/schwifty/iban.py#L145-L147
ZELLMECHANIK-DRESDEN/dclab
dclab/cli.py
tdms2rtdc
def tdms2rtdc(): """Convert .tdms datasets to the hdf5-based .rtdc file format""" parser = tdms2rtdc_parser() args = parser.parse_args() path_tdms = pathlib.Path(args.tdms_path).resolve() path_rtdc = pathlib.Path(args.rtdc_path) # Determine whether input path is a tdms file or a directory if path_tdms.is_dir(): files_tdms = fmt_tdms.get_tdms_files(path_tdms) if path_rtdc.is_file(): raise ValueError("rtdc_path is a file: {}".format(path_rtdc)) files_rtdc = [] for ff in files_tdms: ff = pathlib.Path(ff) rp = ff.relative_to(path_tdms) # determine output file name (same relative path) rpr = path_rtdc / rp.with_suffix(".rtdc") files_rtdc.append(rpr) else: files_tdms = [path_tdms] files_rtdc = [path_rtdc] for ii in range(len(files_tdms)): ff = pathlib.Path(files_tdms[ii]) fr = pathlib.Path(files_rtdc[ii]) print_info("Converting {:d}/{:d}: {}".format( ii + 1, len(files_tdms), ff)) # load dataset ds = load.load_file(ff) # create directory if not fr.parent.exists(): fr.parent.mkdir(parents=True) # determine features to export features = [] if args.compute_features: tocomp = dfn.feature_names else: tocomp = ds._events for feat in tocomp: if feat not in dfn.scalar_feature_names: if not ds[feat]: # ignore non-existent contour, image, mask, or trace continue elif feat not in ds: # ignore non-existent feature continue features.append(feat) # export as hdf5 ds.export.hdf5(path=fr, features=features, filtered=False, override=True)
python
def tdms2rtdc(): """Convert .tdms datasets to the hdf5-based .rtdc file format""" parser = tdms2rtdc_parser() args = parser.parse_args() path_tdms = pathlib.Path(args.tdms_path).resolve() path_rtdc = pathlib.Path(args.rtdc_path) # Determine whether input path is a tdms file or a directory if path_tdms.is_dir(): files_tdms = fmt_tdms.get_tdms_files(path_tdms) if path_rtdc.is_file(): raise ValueError("rtdc_path is a file: {}".format(path_rtdc)) files_rtdc = [] for ff in files_tdms: ff = pathlib.Path(ff) rp = ff.relative_to(path_tdms) # determine output file name (same relative path) rpr = path_rtdc / rp.with_suffix(".rtdc") files_rtdc.append(rpr) else: files_tdms = [path_tdms] files_rtdc = [path_rtdc] for ii in range(len(files_tdms)): ff = pathlib.Path(files_tdms[ii]) fr = pathlib.Path(files_rtdc[ii]) print_info("Converting {:d}/{:d}: {}".format( ii + 1, len(files_tdms), ff)) # load dataset ds = load.load_file(ff) # create directory if not fr.parent.exists(): fr.parent.mkdir(parents=True) # determine features to export features = [] if args.compute_features: tocomp = dfn.feature_names else: tocomp = ds._events for feat in tocomp: if feat not in dfn.scalar_feature_names: if not ds[feat]: # ignore non-existent contour, image, mask, or trace continue elif feat not in ds: # ignore non-existent feature continue features.append(feat) # export as hdf5 ds.export.hdf5(path=fr, features=features, filtered=False, override=True)
[ "def", "tdms2rtdc", "(", ")", ":", "parser", "=", "tdms2rtdc_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "path_tdms", "=", "pathlib", ".", "Path", "(", "args", ".", "tdms_path", ")", ".", "resolve", "(", ")", "path_rtdc", "="...
Convert .tdms datasets to the hdf5-based .rtdc file format
[ "Convert", ".", "tdms", "datasets", "to", "the", "hdf5", "-", "based", ".", "rtdc", "file", "format" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/cli.py#L21-L75
ZELLMECHANIK-DRESDEN/dclab
dclab/cli.py
verify_dataset
def verify_dataset(): """Perform checks on experimental datasets""" parser = verify_dataset_parser() args = parser.parse_args() path_in = pathlib.Path(args.path).resolve() viol, aler, info = load.check_dataset(path_in) print_info("Checking {}".format(path_in)) for inf in info: print_info(inf) for ale in aler: print_alert(ale) for vio in viol: print_violation(vio) print_info("Check Complete: {} violations and {} alerts".format(len(viol), len(aler)))
python
def verify_dataset(): """Perform checks on experimental datasets""" parser = verify_dataset_parser() args = parser.parse_args() path_in = pathlib.Path(args.path).resolve() viol, aler, info = load.check_dataset(path_in) print_info("Checking {}".format(path_in)) for inf in info: print_info(inf) for ale in aler: print_alert(ale) for vio in viol: print_violation(vio) print_info("Check Complete: {} violations and {} alerts".format(len(viol), len(aler)))
[ "def", "verify_dataset", "(", ")", ":", "parser", "=", "verify_dataset_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "path_in", "=", "pathlib", ".", "Path", "(", "args", ".", "path", ")", ".", "resolve", "(", ")", "viol", ",", ...
Perform checks on experimental datasets
[ "Perform", "checks", "on", "experimental", "datasets" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/cli.py#L105-L119
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/config.py
load_from_file
def load_from_file(cfg_file): """Load the configuration from a file Parameters ---------- cfg_file: str Path to configuration file Returns ------- cfg : CaseInsensitiveDict Dictionary with configuration parameters """ path = pathlib.Path(cfg_file).resolve() with path.open('r') as f: code = f.readlines() cfg = CaseInsensitiveDict() for line in code: # We deal with comments and empty lines # We need to check line length first and then we look for # a hash. line = line.split("#")[0].strip() if len(line) != 0: if line.startswith("[") and line.endswith("]"): section = line[1:-1].lower() if section not in cfg: cfg[section] = CaseInsensitiveDict() continue var, val = line.split("=", 1) var = var.strip().lower() val = val.strip("' ").strip('" ').strip() # convert parameter value to correct type if (section in dfn.config_funcs and var in dfn.config_funcs[section]): # standard parameter with known type val = dfn.config_funcs[section][var](val) else: # unknown parameter (e.g. plotting in Shape-Out), guess type var, val = keyval_str2typ(var, val) if len(var) != 0 and len(str(val)) != 0: cfg[section][var] = val return cfg
python
def load_from_file(cfg_file): """Load the configuration from a file Parameters ---------- cfg_file: str Path to configuration file Returns ------- cfg : CaseInsensitiveDict Dictionary with configuration parameters """ path = pathlib.Path(cfg_file).resolve() with path.open('r') as f: code = f.readlines() cfg = CaseInsensitiveDict() for line in code: # We deal with comments and empty lines # We need to check line length first and then we look for # a hash. line = line.split("#")[0].strip() if len(line) != 0: if line.startswith("[") and line.endswith("]"): section = line[1:-1].lower() if section not in cfg: cfg[section] = CaseInsensitiveDict() continue var, val = line.split("=", 1) var = var.strip().lower() val = val.strip("' ").strip('" ').strip() # convert parameter value to correct type if (section in dfn.config_funcs and var in dfn.config_funcs[section]): # standard parameter with known type val = dfn.config_funcs[section][var](val) else: # unknown parameter (e.g. plotting in Shape-Out), guess type var, val = keyval_str2typ(var, val) if len(var) != 0 and len(str(val)) != 0: cfg[section][var] = val return cfg
[ "def", "load_from_file", "(", "cfg_file", ")", ":", "path", "=", "pathlib", ".", "Path", "(", "cfg_file", ")", ".", "resolve", "(", ")", "with", "path", ".", "open", "(", "'r'", ")", "as", "f", ":", "code", "=", "f", ".", "readlines", "(", ")", "...
Load the configuration from a file Parameters ---------- cfg_file: str Path to configuration file Returns ------- cfg : CaseInsensitiveDict Dictionary with configuration parameters
[ "Load", "the", "configuration", "from", "a", "file" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L190-L234
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/config.py
keyval_str2typ
def keyval_str2typ(var, val): """Convert a variable from a string to its correct type Parameters ---------- var: str The variable name val: str The value of the variable represented as a string Returns ------- varout: str Stripped lowercase `var` valout: any type The value converted from string to its presumed type Notes ----- This method is heuristic and is only intended for usage in dclab. See Also -------- keyval_typ2str: the opposite """ if not (isinstance(val, str_types)): # already a type: return var.strip(), val var = var.strip().lower() val = val.strip() # Find values if len(var) != 0 and len(val) != 0: # check for float if val.startswith("[") and val.endswith("]"): if len(val.strip("[],")) == 0: # empty list values = [] else: values = val.strip("[],").split(",") values = [float(v) for v in values] return var, values elif val.lower() in ["true", "y"]: return var, True elif val.lower() in ["false", "n"]: return var, False elif val[0] in ["'", '"'] and val[-1] in ["'", '"']: return var, val.strip("'").strip('"').strip() elif val in dfn.scalar_feature_names: return var, val else: try: return var, float(val.replace(",", ".")) except ValueError: return var, val
python
def keyval_str2typ(var, val): """Convert a variable from a string to its correct type Parameters ---------- var: str The variable name val: str The value of the variable represented as a string Returns ------- varout: str Stripped lowercase `var` valout: any type The value converted from string to its presumed type Notes ----- This method is heuristic and is only intended for usage in dclab. See Also -------- keyval_typ2str: the opposite """ if not (isinstance(val, str_types)): # already a type: return var.strip(), val var = var.strip().lower() val = val.strip() # Find values if len(var) != 0 and len(val) != 0: # check for float if val.startswith("[") and val.endswith("]"): if len(val.strip("[],")) == 0: # empty list values = [] else: values = val.strip("[],").split(",") values = [float(v) for v in values] return var, values elif val.lower() in ["true", "y"]: return var, True elif val.lower() in ["false", "n"]: return var, False elif val[0] in ["'", '"'] and val[-1] in ["'", '"']: return var, val.strip("'").strip('"').strip() elif val in dfn.scalar_feature_names: return var, val else: try: return var, float(val.replace(",", ".")) except ValueError: return var, val
[ "def", "keyval_str2typ", "(", "var", ",", "val", ")", ":", "if", "not", "(", "isinstance", "(", "val", ",", "str_types", ")", ")", ":", "# already a type:", "return", "var", ".", "strip", "(", ")", ",", "val", "var", "=", "var", ".", "strip", "(", ...
Convert a variable from a string to its correct type Parameters ---------- var: str The variable name val: str The value of the variable represented as a string Returns ------- varout: str Stripped lowercase `var` valout: any type The value converted from string to its presumed type Notes ----- This method is heuristic and is only intended for usage in dclab. See Also -------- keyval_typ2str: the opposite
[ "Convert", "a", "variable", "from", "a", "string", "to", "its", "correct", "type" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L237-L291
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/config.py
keyval_typ2str
def keyval_typ2str(var, val): """Convert a variable to a string Parameters ---------- var: str The variable name val: any type The value of the variable Returns ------- varout: str Stripped lowercase `var` valout: any type The value converted to a useful string representation See Also -------- keyval_str2typ: the opposite """ varout = var.strip() if isinstance(val, list): data = ", ".join([keyval_typ2str(var, it)[1] for it in val]) valout = "["+data+"]" elif isinstance(val, float): valout = "{:.12f}".format(val) else: valout = "{}".format(val) return varout, valout
python
def keyval_typ2str(var, val): """Convert a variable to a string Parameters ---------- var: str The variable name val: any type The value of the variable Returns ------- varout: str Stripped lowercase `var` valout: any type The value converted to a useful string representation See Also -------- keyval_str2typ: the opposite """ varout = var.strip() if isinstance(val, list): data = ", ".join([keyval_typ2str(var, it)[1] for it in val]) valout = "["+data+"]" elif isinstance(val, float): valout = "{:.12f}".format(val) else: valout = "{}".format(val) return varout, valout
[ "def", "keyval_typ2str", "(", "var", ",", "val", ")", ":", "varout", "=", "var", ".", "strip", "(", ")", "if", "isinstance", "(", "val", ",", "list", ")", ":", "data", "=", "\", \"", ".", "join", "(", "[", "keyval_typ2str", "(", "var", ",", "it", ...
Convert a variable to a string Parameters ---------- var: str The variable name val: any type The value of the variable Returns ------- varout: str Stripped lowercase `var` valout: any type The value converted to a useful string representation See Also -------- keyval_str2typ: the opposite
[ "Convert", "a", "variable", "to", "a", "string" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L294-L323
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/config.py
Configuration._init_default_values
def _init_default_values(self): """Set default initial values The default values are hard-coded for backwards compatibility and for several functionalities in dclab. """ # Do not filter out invalid event values self["filtering"]["remove invalid events"] = False # Enable filters switch is mandatory self["filtering"]["enable filters"] = True # Limit events integer to downsample output data self["filtering"]["limit events"] = 0 # Polygon filter list self["filtering"]["polygon filters"] = [] # Defaults to no hierarchy parent self["filtering"]["hierarchy parent"] = "none" # Check for missing min/max values and set them to zero for item in dfn.scalar_feature_names: appends = [" min", " max"] for a in appends: self["filtering"][item + a] = 0
python
def _init_default_values(self): """Set default initial values The default values are hard-coded for backwards compatibility and for several functionalities in dclab. """ # Do not filter out invalid event values self["filtering"]["remove invalid events"] = False # Enable filters switch is mandatory self["filtering"]["enable filters"] = True # Limit events integer to downsample output data self["filtering"]["limit events"] = 0 # Polygon filter list self["filtering"]["polygon filters"] = [] # Defaults to no hierarchy parent self["filtering"]["hierarchy parent"] = "none" # Check for missing min/max values and set them to zero for item in dfn.scalar_feature_names: appends = [" min", " max"] for a in appends: self["filtering"][item + a] = 0
[ "def", "_init_default_values", "(", "self", ")", ":", "# Do not filter out invalid event values", "self", "[", "\"filtering\"", "]", "[", "\"remove invalid events\"", "]", "=", "False", "# Enable filters switch is mandatory", "self", "[", "\"filtering\"", "]", "[", "\"ena...
Set default initial values The default values are hard-coded for backwards compatibility and for several functionalities in dclab.
[ "Set", "default", "initial", "values" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L130-L150
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/config.py
Configuration.save
def save(self, filename): """Save the configuration to a file""" filename = pathlib.Path(filename) out = [] keys = sorted(list(self.keys())) for key in keys: out.append("[{}]".format(key)) section = self[key] ikeys = list(section.keys()) ikeys.sort() for ikey in ikeys: var, val = keyval_typ2str(ikey, section[ikey]) out.append("{} = {}".format(var, val)) out.append("") with filename.open("w") as f: for i in range(len(out)): # win-like line endings out[i] = out[i]+"\n" f.writelines(out)
python
def save(self, filename): """Save the configuration to a file""" filename = pathlib.Path(filename) out = [] keys = sorted(list(self.keys())) for key in keys: out.append("[{}]".format(key)) section = self[key] ikeys = list(section.keys()) ikeys.sort() for ikey in ikeys: var, val = keyval_typ2str(ikey, section[ikey]) out.append("{} = {}".format(var, val)) out.append("") with filename.open("w") as f: for i in range(len(out)): # win-like line endings out[i] = out[i]+"\n" f.writelines(out)
[ "def", "save", "(", "self", ",", "filename", ")", ":", "filename", "=", "pathlib", ".", "Path", "(", "filename", ")", "out", "=", "[", "]", "keys", "=", "sorted", "(", "list", "(", "self", ".", "keys", "(", ")", ")", ")", "for", "key", "in", "k...
Save the configuration to a file
[ "Save", "the", "configuration", "to", "a", "file" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L160-L179
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/config.py
Configuration.update
def update(self, newcfg): """Update current config with a dictionary""" for key in newcfg.keys(): if key not in self._cfg: self._cfg[key] = CaseInsensitiveDict() for skey in newcfg[key]: self._cfg[key][skey] = newcfg[key][skey]
python
def update(self, newcfg): """Update current config with a dictionary""" for key in newcfg.keys(): if key not in self._cfg: self._cfg[key] = CaseInsensitiveDict() for skey in newcfg[key]: self._cfg[key][skey] = newcfg[key][skey]
[ "def", "update", "(", "self", ",", "newcfg", ")", ":", "for", "key", "in", "newcfg", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "self", ".", "_cfg", ":", "self", ".", "_cfg", "[", "key", "]", "=", "CaseInsensitiveDict", "(", ")", "for"...
Update current config with a dictionary
[ "Update", "current", "config", "with", "a", "dictionary" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/config.py#L181-L187
ZELLMECHANIK-DRESDEN/dclab
dclab/features/emodulus.py
convert
def convert(area_um, deform, emodulus, channel_width_in, channel_width_out, flow_rate_in, flow_rate_out, viscosity_in, viscosity_out, inplace=False): """convert area-deformation-emodulus triplet The conversion formula is described in :cite:`Mietke2015`. Parameters ---------- area_um: ndarray Convex cell area [µm²] deform: ndarray Deformation emodulus: ndarray Young's Modulus [kPa] channel_width_in: float Original channel width [µm] channel_width_out: float Target channel width [µm] flow_rate_in: float Original flow rate [µl/s] flow_rate_in: float Target flow rate [µl/s] viscosity_in: float Original viscosity [mPa*s] viscosity_out: float Target viscosity [mPa*s] inplace: bool If True, override input arrays with corrected data Returns ------- area_um_corr: ndarray Corrected cell area [µm²] deform_corr: ndarray Deformation (a copy if `inplace` is False) emodulus_corr: ndarray Corrected emodulus [kPa] """ copy = not inplace # make sure area_um_corr is not an integer array area_um_corr = np.array(area_um, dtype=float, copy=copy) deform_corr = np.array(deform, copy=copy) emodulus_corr = np.array(emodulus, copy=copy) if channel_width_in != channel_width_out: area_um_corr *= (channel_width_out / channel_width_in)**2 if (flow_rate_in != flow_rate_out or viscosity_in != viscosity_out or channel_width_in != channel_width_out): emodulus_corr *= (flow_rate_out / flow_rate_in) \ * (viscosity_out / viscosity_in) \ * (channel_width_in / channel_width_out)**3 return area_um_corr, deform_corr, emodulus_corr
python
def convert(area_um, deform, emodulus, channel_width_in, channel_width_out, flow_rate_in, flow_rate_out, viscosity_in, viscosity_out, inplace=False): """convert area-deformation-emodulus triplet The conversion formula is described in :cite:`Mietke2015`. Parameters ---------- area_um: ndarray Convex cell area [µm²] deform: ndarray Deformation emodulus: ndarray Young's Modulus [kPa] channel_width_in: float Original channel width [µm] channel_width_out: float Target channel width [µm] flow_rate_in: float Original flow rate [µl/s] flow_rate_in: float Target flow rate [µl/s] viscosity_in: float Original viscosity [mPa*s] viscosity_out: float Target viscosity [mPa*s] inplace: bool If True, override input arrays with corrected data Returns ------- area_um_corr: ndarray Corrected cell area [µm²] deform_corr: ndarray Deformation (a copy if `inplace` is False) emodulus_corr: ndarray Corrected emodulus [kPa] """ copy = not inplace # make sure area_um_corr is not an integer array area_um_corr = np.array(area_um, dtype=float, copy=copy) deform_corr = np.array(deform, copy=copy) emodulus_corr = np.array(emodulus, copy=copy) if channel_width_in != channel_width_out: area_um_corr *= (channel_width_out / channel_width_in)**2 if (flow_rate_in != flow_rate_out or viscosity_in != viscosity_out or channel_width_in != channel_width_out): emodulus_corr *= (flow_rate_out / flow_rate_in) \ * (viscosity_out / viscosity_in) \ * (channel_width_in / channel_width_out)**3 return area_um_corr, deform_corr, emodulus_corr
[ "def", "convert", "(", "area_um", ",", "deform", ",", "emodulus", ",", "channel_width_in", ",", "channel_width_out", ",", "flow_rate_in", ",", "flow_rate_out", ",", "viscosity_in", ",", "viscosity_out", ",", "inplace", "=", "False", ")", ":", "copy", "=", "not...
convert area-deformation-emodulus triplet The conversion formula is described in :cite:`Mietke2015`. Parameters ---------- area_um: ndarray Convex cell area [µm²] deform: ndarray Deformation emodulus: ndarray Young's Modulus [kPa] channel_width_in: float Original channel width [µm] channel_width_out: float Target channel width [µm] flow_rate_in: float Original flow rate [µl/s] flow_rate_in: float Target flow rate [µl/s] viscosity_in: float Original viscosity [mPa*s] viscosity_out: float Target viscosity [mPa*s] inplace: bool If True, override input arrays with corrected data Returns ------- area_um_corr: ndarray Corrected cell area [µm²] deform_corr: ndarray Deformation (a copy if `inplace` is False) emodulus_corr: ndarray Corrected emodulus [kPa]
[ "convert", "area", "-", "deformation", "-", "emodulus", "triplet" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/emodulus.py#L15-L72
ZELLMECHANIK-DRESDEN/dclab
dclab/features/emodulus.py
corrpix_deform_delta
def corrpix_deform_delta(area_um, px_um=0.34): """Deformation correction term for pixelation effects The contour in RT-DC measurements is computed on a pixelated grid. Due to sampling problems, the measured deformation is overestimated and must be corrected. The correction formula is described in :cite:`Herold2017`. Parameters ---------- area_um: float or ndarray Apparent (2D image) area in µm² of the event(s) px_um: float The detector pixel size in µm. inplace: bool Change the deformation values in-place Returns ------- deform_delta: float or ndarray Error of the deformation of the event(s) that must be subtracted from `deform`. deform_corr = deform - deform_delta """ # A triple-exponential decay can be used to correct for pixelation # for apparent cell areas between 10 and 1250µm². # For 99 different radii between 0.4 μm and 20 μm circular objects were # simulated on a pixel grid with the pixel resolution of 340 nm/pix. At # each radius 1000 random starting points were created and the # obtained contours were analyzed in the same fashion as RT-DC data. # A convex hull on the contour was used to calculate the size (as area) # and the deformation. # The pixel size correction `pxcorr` takes into account the pixel size # in the pixelation correction formula. pxcorr = (.34 / px_um)**2 offs = 0.0012 exp1 = 0.020 * np.exp(-area_um * pxcorr / 7.1) exp2 = 0.010 * np.exp(-area_um * pxcorr / 38.6) exp3 = 0.005 * np.exp(-area_um * pxcorr / 296) delta = offs + exp1 + exp2 + exp3 return delta
python
def corrpix_deform_delta(area_um, px_um=0.34): """Deformation correction term for pixelation effects The contour in RT-DC measurements is computed on a pixelated grid. Due to sampling problems, the measured deformation is overestimated and must be corrected. The correction formula is described in :cite:`Herold2017`. Parameters ---------- area_um: float or ndarray Apparent (2D image) area in µm² of the event(s) px_um: float The detector pixel size in µm. inplace: bool Change the deformation values in-place Returns ------- deform_delta: float or ndarray Error of the deformation of the event(s) that must be subtracted from `deform`. deform_corr = deform - deform_delta """ # A triple-exponential decay can be used to correct for pixelation # for apparent cell areas between 10 and 1250µm². # For 99 different radii between 0.4 μm and 20 μm circular objects were # simulated on a pixel grid with the pixel resolution of 340 nm/pix. At # each radius 1000 random starting points were created and the # obtained contours were analyzed in the same fashion as RT-DC data. # A convex hull on the contour was used to calculate the size (as area) # and the deformation. # The pixel size correction `pxcorr` takes into account the pixel size # in the pixelation correction formula. pxcorr = (.34 / px_um)**2 offs = 0.0012 exp1 = 0.020 * np.exp(-area_um * pxcorr / 7.1) exp2 = 0.010 * np.exp(-area_um * pxcorr / 38.6) exp3 = 0.005 * np.exp(-area_um * pxcorr / 296) delta = offs + exp1 + exp2 + exp3 return delta
[ "def", "corrpix_deform_delta", "(", "area_um", ",", "px_um", "=", "0.34", ")", ":", "# A triple-exponential decay can be used to correct for pixelation", "# for apparent cell areas between 10 and 1250µm².", "# For 99 different radii between 0.4 μm and 20 μm circular objects were", "# simul...
Deformation correction term for pixelation effects The contour in RT-DC measurements is computed on a pixelated grid. Due to sampling problems, the measured deformation is overestimated and must be corrected. The correction formula is described in :cite:`Herold2017`. Parameters ---------- area_um: float or ndarray Apparent (2D image) area in µm² of the event(s) px_um: float The detector pixel size in µm. inplace: bool Change the deformation values in-place Returns ------- deform_delta: float or ndarray Error of the deformation of the event(s) that must be subtracted from `deform`. deform_corr = deform - deform_delta
[ "Deformation", "correction", "term", "for", "pixelation", "effects" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/emodulus.py#L75-L117
ZELLMECHANIK-DRESDEN/dclab
dclab/features/emodulus.py
get_emodulus
def get_emodulus(area_um, deform, medium="CellCarrier", channel_width=20.0, flow_rate=0.16, px_um=0.34, temperature=23.0, copy=True): """Compute apparent Young's modulus using a look-up table Parameters ---------- area_um: float or ndarray Apparent (2D image) area [µm²] of the event(s) deform: float or ndarray The deformation (1-circularity) of the event(s) medium: str or float The medium to compute the viscosity for. If a string in ["CellCarrier", "CellCarrier B"] is given, the viscosity will be computed. If a float is given, this value will be used as the viscosity in mPa*s. channel_width: float The channel width [µm] flow_rate: float Flow rate [µl/s] px_um: float The detector pixel size [µm] used for pixelation correction. Set to zero to disable. temperature: float or ndarray Temperature [°C] of the event(s) copy: bool Copy input arrays. If set to false, input arrays are overridden. Returns ------- elasticity: float or ndarray Apparent Young's modulus in kPa Notes ----- - The look-up table used was computed with finite elements methods according to :cite:`Mokbel2017`. - The computation of the Young's modulus takes into account corrections for the viscosity (medium, channel width, flow rate, and temperature) :cite:`Mietke2015` and corrections for pixelation of the area and the deformation which are computed from a (pixelated) image :cite:`Herold2017`. See Also -------- dclab.features.emodulus_viscosity.get_viscosity: compute viscosity for known media """ # copy input arrays so we can use in-place calculations deform = np.array(deform, copy=copy, dtype=float) area_um = np.array(area_um, copy=copy, dtype=float) # Get lut data lut_path = resource_filename("dclab.features", "emodulus_lut.txt") with pathlib.Path(lut_path).open("rb") as lufd: lut = np.loadtxt(lufd) # These meta data are the simulation parameters of the lut lut_channel_width = 20.0 lut_flow_rate = 0.04 lut_visco = 15.0 # Compute viscosity if isinstance(medium, (float, int)): visco = medium else: visco = get_viscosity(medium=medium, channel_width=channel_width, flow_rate=flow_rate, temperature=temperature) # Corrections # We correct the lut, because it contains less points than # the event data. Furthermore, the lut could be cached # in the future, if this takes up a lot of time. convert(area_um=lut[:, 0], deform=lut[:, 1], emodulus=lut[:, 2], channel_width_in=lut_channel_width, channel_width_out=channel_width, flow_rate_in=lut_flow_rate, flow_rate_out=flow_rate, viscosity_in=lut_visco, viscosity_out=visco, inplace=True) if px_um: # Correct deformation for pixelation effect (subtract ddelt). ddelt = corrpix_deform_delta(area_um=area_um, px_um=px_um) deform -= ddelt # Normalize interpolation data such that the spacing for # area and deformation is about the same during interpolation. area_norm = lut[:, 0].max() normalize(lut[:, 0], area_norm) normalize(area_um, area_norm) defo_norm = lut[:, 1].max() normalize(lut[:, 1], defo_norm) normalize(deform, defo_norm) # Perform interpolation emod = spint.griddata((lut[:, 0], lut[:, 1]), lut[:, 2], (area_um, deform), method='linear') return emod
python
def get_emodulus(area_um, deform, medium="CellCarrier", channel_width=20.0, flow_rate=0.16, px_um=0.34, temperature=23.0, copy=True): """Compute apparent Young's modulus using a look-up table Parameters ---------- area_um: float or ndarray Apparent (2D image) area [µm²] of the event(s) deform: float or ndarray The deformation (1-circularity) of the event(s) medium: str or float The medium to compute the viscosity for. If a string in ["CellCarrier", "CellCarrier B"] is given, the viscosity will be computed. If a float is given, this value will be used as the viscosity in mPa*s. channel_width: float The channel width [µm] flow_rate: float Flow rate [µl/s] px_um: float The detector pixel size [µm] used for pixelation correction. Set to zero to disable. temperature: float or ndarray Temperature [°C] of the event(s) copy: bool Copy input arrays. If set to false, input arrays are overridden. Returns ------- elasticity: float or ndarray Apparent Young's modulus in kPa Notes ----- - The look-up table used was computed with finite elements methods according to :cite:`Mokbel2017`. - The computation of the Young's modulus takes into account corrections for the viscosity (medium, channel width, flow rate, and temperature) :cite:`Mietke2015` and corrections for pixelation of the area and the deformation which are computed from a (pixelated) image :cite:`Herold2017`. See Also -------- dclab.features.emodulus_viscosity.get_viscosity: compute viscosity for known media """ # copy input arrays so we can use in-place calculations deform = np.array(deform, copy=copy, dtype=float) area_um = np.array(area_um, copy=copy, dtype=float) # Get lut data lut_path = resource_filename("dclab.features", "emodulus_lut.txt") with pathlib.Path(lut_path).open("rb") as lufd: lut = np.loadtxt(lufd) # These meta data are the simulation parameters of the lut lut_channel_width = 20.0 lut_flow_rate = 0.04 lut_visco = 15.0 # Compute viscosity if isinstance(medium, (float, int)): visco = medium else: visco = get_viscosity(medium=medium, channel_width=channel_width, flow_rate=flow_rate, temperature=temperature) # Corrections # We correct the lut, because it contains less points than # the event data. Furthermore, the lut could be cached # in the future, if this takes up a lot of time. convert(area_um=lut[:, 0], deform=lut[:, 1], emodulus=lut[:, 2], channel_width_in=lut_channel_width, channel_width_out=channel_width, flow_rate_in=lut_flow_rate, flow_rate_out=flow_rate, viscosity_in=lut_visco, viscosity_out=visco, inplace=True) if px_um: # Correct deformation for pixelation effect (subtract ddelt). ddelt = corrpix_deform_delta(area_um=area_um, px_um=px_um) deform -= ddelt # Normalize interpolation data such that the spacing for # area and deformation is about the same during interpolation. area_norm = lut[:, 0].max() normalize(lut[:, 0], area_norm) normalize(area_um, area_norm) defo_norm = lut[:, 1].max() normalize(lut[:, 1], defo_norm) normalize(deform, defo_norm) # Perform interpolation emod = spint.griddata((lut[:, 0], lut[:, 1]), lut[:, 2], (area_um, deform), method='linear') return emod
[ "def", "get_emodulus", "(", "area_um", ",", "deform", ",", "medium", "=", "\"CellCarrier\"", ",", "channel_width", "=", "20.0", ",", "flow_rate", "=", "0.16", ",", "px_um", "=", "0.34", ",", "temperature", "=", "23.0", ",", "copy", "=", "True", ")", ":",...
Compute apparent Young's modulus using a look-up table Parameters ---------- area_um: float or ndarray Apparent (2D image) area [µm²] of the event(s) deform: float or ndarray The deformation (1-circularity) of the event(s) medium: str or float The medium to compute the viscosity for. If a string in ["CellCarrier", "CellCarrier B"] is given, the viscosity will be computed. If a float is given, this value will be used as the viscosity in mPa*s. channel_width: float The channel width [µm] flow_rate: float Flow rate [µl/s] px_um: float The detector pixel size [µm] used for pixelation correction. Set to zero to disable. temperature: float or ndarray Temperature [°C] of the event(s) copy: bool Copy input arrays. If set to false, input arrays are overridden. Returns ------- elasticity: float or ndarray Apparent Young's modulus in kPa Notes ----- - The look-up table used was computed with finite elements methods according to :cite:`Mokbel2017`. - The computation of the Young's modulus takes into account corrections for the viscosity (medium, channel width, flow rate, and temperature) :cite:`Mietke2015` and corrections for pixelation of the area and the deformation which are computed from a (pixelated) image :cite:`Herold2017`. See Also -------- dclab.features.emodulus_viscosity.get_viscosity: compute viscosity for known media
[ "Compute", "apparent", "Young", "s", "modulus", "using", "a", "look", "-", "up", "table" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/emodulus.py#L120-L220
openstax/cnx-archive
cnxarchive/views/search.py
search
def search(request): """Search API.""" empty_response = json.dumps({ u'query': { u'limits': [], u'per_page': DEFAULT_PER_PAGE, u'page': 1, }, u'results': { u'items': [], u'total': 0, u'limits': [], }, }) params = request.params resp = request.response resp.status = '200 OK' resp.content_type = 'application/json' search_terms = params.get('q', '') query_type = params.get('t', None) if query_type is None or query_type not in QUERY_TYPES: query_type = DEFAULT_QUERY_TYPE try: per_page = int(params.get('per_page', '')) except (TypeError, ValueError, IndexError): per_page = None if per_page is None or per_page <= 0: per_page = DEFAULT_PER_PAGE try: page = int(params.get('page', '')) except (TypeError, ValueError, IndexError): page = None if page is None or page <= 0: page = 1 query = Query.from_raw_query(search_terms) if not(query.filters or query.terms): resp.body = empty_response return resp db_results = cache.search( query, query_type, nocache=params.get('nocache', '').lower() == 'true') authors = db_results.auxiliary['authors'] # create a mapping for author id to index in auxiliary authors list author_mapping = {} for i, author in enumerate(authors): author_mapping[author['id']] = i results = {} limits = [] for k, v in query.terms + query.filters: limits.append({'tag': k, 'value': v}) if v in author_mapping: limits[-1]['index'] = author_mapping[v] results['query'] = { 'limits': limits, 'sort': query.sorts, 'per_page': per_page, 'page': page, } results['results'] = {'total': len(db_results), 'items': []} for record in db_results[((page - 1) * per_page):(page * per_page)]: results['results']['items'].append({ 'id': '{}@{}'.format(record['id'], record['version']), 'mediaType': record['mediaType'], 'title': record['title'], # provide the index in the auxiliary authors list 'authors': [{ 'index': author_mapping[a['id']], 'id': a['id'], } for a in record['authors']], 'keywords': record['keywords'], 'summarySnippet': record['abstract'], 'bodySnippet': record['headline'], 'pubDate': record['pubDate'], 'weight': record['weight'], }) result_limits = [] for count_name, values in db_results.counts.items(): if not values: continue result_limits.append({'tag': count_name, 'values': []}) for keyword, count in values: value = {'value': keyword, 'count': count} # if it's an author, provide the index in auxiliary # authors list as well if keyword in author_mapping: value['index'] = author_mapping[keyword] result_limits[-1]['values'].append(value) results['results']['limits'] = result_limits # Add the supplemental result information. results['results']['auxiliary'] = db_results.auxiliary # In the case where a search is performed with an authorId # has a filter, it is possible for the database to return # no results even if the author exists in the database. # Therefore, the database is queried a second time for # contact information associated with only the authorIds. # The author information is then used to update the # results returned by the first database query. if len(db_results) <= 0: authors_results = [] limits = results['query']['limits'] index = 0 statement = SQL['get-users-by-ids'] with db_connect() as db_connection: with db_connection.cursor() as cursor: for idx, limit in enumerate(limits): if limit['tag'] == 'authorID': author = limit['value'] arguments = (author,) cursor.execute(statement, arguments) author_db_result = cursor.fetchall() if author_db_result: author_db_result = author_db_result[0][0] else: author_db_result = {'id': author, 'fullname': None} authors_results.append(author_db_result) limit['index'] = index index = index + 1 limits[idx] = limit results['query']['limits'] = limits results['results']['auxiliary']['authors'] = authors_results resp.body = json.dumps(results) return resp
python
def search(request): """Search API.""" empty_response = json.dumps({ u'query': { u'limits': [], u'per_page': DEFAULT_PER_PAGE, u'page': 1, }, u'results': { u'items': [], u'total': 0, u'limits': [], }, }) params = request.params resp = request.response resp.status = '200 OK' resp.content_type = 'application/json' search_terms = params.get('q', '') query_type = params.get('t', None) if query_type is None or query_type not in QUERY_TYPES: query_type = DEFAULT_QUERY_TYPE try: per_page = int(params.get('per_page', '')) except (TypeError, ValueError, IndexError): per_page = None if per_page is None or per_page <= 0: per_page = DEFAULT_PER_PAGE try: page = int(params.get('page', '')) except (TypeError, ValueError, IndexError): page = None if page is None or page <= 0: page = 1 query = Query.from_raw_query(search_terms) if not(query.filters or query.terms): resp.body = empty_response return resp db_results = cache.search( query, query_type, nocache=params.get('nocache', '').lower() == 'true') authors = db_results.auxiliary['authors'] # create a mapping for author id to index in auxiliary authors list author_mapping = {} for i, author in enumerate(authors): author_mapping[author['id']] = i results = {} limits = [] for k, v in query.terms + query.filters: limits.append({'tag': k, 'value': v}) if v in author_mapping: limits[-1]['index'] = author_mapping[v] results['query'] = { 'limits': limits, 'sort': query.sorts, 'per_page': per_page, 'page': page, } results['results'] = {'total': len(db_results), 'items': []} for record in db_results[((page - 1) * per_page):(page * per_page)]: results['results']['items'].append({ 'id': '{}@{}'.format(record['id'], record['version']), 'mediaType': record['mediaType'], 'title': record['title'], # provide the index in the auxiliary authors list 'authors': [{ 'index': author_mapping[a['id']], 'id': a['id'], } for a in record['authors']], 'keywords': record['keywords'], 'summarySnippet': record['abstract'], 'bodySnippet': record['headline'], 'pubDate': record['pubDate'], 'weight': record['weight'], }) result_limits = [] for count_name, values in db_results.counts.items(): if not values: continue result_limits.append({'tag': count_name, 'values': []}) for keyword, count in values: value = {'value': keyword, 'count': count} # if it's an author, provide the index in auxiliary # authors list as well if keyword in author_mapping: value['index'] = author_mapping[keyword] result_limits[-1]['values'].append(value) results['results']['limits'] = result_limits # Add the supplemental result information. results['results']['auxiliary'] = db_results.auxiliary # In the case where a search is performed with an authorId # has a filter, it is possible for the database to return # no results even if the author exists in the database. # Therefore, the database is queried a second time for # contact information associated with only the authorIds. # The author information is then used to update the # results returned by the first database query. if len(db_results) <= 0: authors_results = [] limits = results['query']['limits'] index = 0 statement = SQL['get-users-by-ids'] with db_connect() as db_connection: with db_connection.cursor() as cursor: for idx, limit in enumerate(limits): if limit['tag'] == 'authorID': author = limit['value'] arguments = (author,) cursor.execute(statement, arguments) author_db_result = cursor.fetchall() if author_db_result: author_db_result = author_db_result[0][0] else: author_db_result = {'id': author, 'fullname': None} authors_results.append(author_db_result) limit['index'] = index index = index + 1 limits[idx] = limit results['query']['limits'] = limits results['results']['auxiliary']['authors'] = authors_results resp.body = json.dumps(results) return resp
[ "def", "search", "(", "request", ")", ":", "empty_response", "=", "json", ".", "dumps", "(", "{", "u'query'", ":", "{", "u'limits'", ":", "[", "]", ",", "u'per_page'", ":", "DEFAULT_PER_PAGE", ",", "u'page'", ":", "1", ",", "}", ",", "u'results'", ":",...
Search API.
[ "Search", "API", "." ]
train
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/search.py#L40-L172
xenon-middleware/pyxenon
xenon/exceptions.py
make_exception
def make_exception(method, e): """Creates an exception for a given method, and RpcError.""" x = e.details() name = x[:x.find(':')].split('.')[-1] if name in globals(): cls = globals()[name] else: cls = UnknownRpcException # noqa return cls(method, e.code(), e.details())
python
def make_exception(method, e): """Creates an exception for a given method, and RpcError.""" x = e.details() name = x[:x.find(':')].split('.')[-1] if name in globals(): cls = globals()[name] else: cls = UnknownRpcException # noqa return cls(method, e.code(), e.details())
[ "def", "make_exception", "(", "method", ",", "e", ")", ":", "x", "=", "e", ".", "details", "(", ")", "name", "=", "x", "[", ":", "x", ".", "find", "(", "':'", ")", "]", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if", "name", "in", ...
Creates an exception for a given method, and RpcError.
[ "Creates", "an", "exception", "for", "a", "given", "method", "and", "RpcError", "." ]
train
https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/exceptions.py#L18-L27
robmcmullen/atrcopy
atrcopy/utils.py
text_to_int
def text_to_int(text, default_base="hex"): """ Convert text to int, raising exeception on invalid input """ if text.startswith("0x"): value = int(text[2:], 16) elif text.startswith("$"): value = int(text[1:], 16) elif text.startswith("#"): value = int(text[1:], 10) elif text.startswith("%"): value = int(text[1:], 2) else: if default_base == "dec": value = int(text) else: value = int(text, 16) return value
python
def text_to_int(text, default_base="hex"): """ Convert text to int, raising exeception on invalid input """ if text.startswith("0x"): value = int(text[2:], 16) elif text.startswith("$"): value = int(text[1:], 16) elif text.startswith("#"): value = int(text[1:], 10) elif text.startswith("%"): value = int(text[1:], 2) else: if default_base == "dec": value = int(text) else: value = int(text, 16) return value
[ "def", "text_to_int", "(", "text", ",", "default_base", "=", "\"hex\"", ")", ":", "if", "text", ".", "startswith", "(", "\"0x\"", ")", ":", "value", "=", "int", "(", "text", "[", "2", ":", "]", ",", "16", ")", "elif", "text", ".", "startswith", "("...
Convert text to int, raising exeception on invalid input
[ "Convert", "text", "to", "int", "raising", "exeception", "on", "invalid", "input" ]
train
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/utils.py#L44-L60
robmcmullen/atrcopy
atrcopy/utils.py
VTOC.assign_sector_numbers
def assign_sector_numbers(self, dirent, sector_list): """ Map out the sectors and link the sectors together raises NotEnoughSpaceOnDisk if the whole file won't fit. It will not allow partial writes. """ num = len(sector_list) order = self.reserve_space(num) if len(order) != num: raise errors.InvalidFile("VTOC reserved space for %d sectors. Sectors needed: %d" % (len(order), num)) file_length = 0 last_sector = None for sector, sector_num in zip(sector_list.sectors, order): sector.sector_num = sector_num sector.file_num = dirent.file_num file_length += sector.used if last_sector is not None: last_sector.next_sector_num = sector_num last_sector = sector if last_sector is not None: last_sector.next_sector_num = 0 sector_list.file_length = file_length
python
def assign_sector_numbers(self, dirent, sector_list): """ Map out the sectors and link the sectors together raises NotEnoughSpaceOnDisk if the whole file won't fit. It will not allow partial writes. """ num = len(sector_list) order = self.reserve_space(num) if len(order) != num: raise errors.InvalidFile("VTOC reserved space for %d sectors. Sectors needed: %d" % (len(order), num)) file_length = 0 last_sector = None for sector, sector_num in zip(sector_list.sectors, order): sector.sector_num = sector_num sector.file_num = dirent.file_num file_length += sector.used if last_sector is not None: last_sector.next_sector_num = sector_num last_sector = sector if last_sector is not None: last_sector.next_sector_num = 0 sector_list.file_length = file_length
[ "def", "assign_sector_numbers", "(", "self", ",", "dirent", ",", "sector_list", ")", ":", "num", "=", "len", "(", "sector_list", ")", "order", "=", "self", ".", "reserve_space", "(", "num", ")", "if", "len", "(", "order", ")", "!=", "num", ":", "raise"...
Map out the sectors and link the sectors together raises NotEnoughSpaceOnDisk if the whole file won't fit. It will not allow partial writes.
[ "Map", "out", "the", "sectors", "and", "link", "the", "sectors", "together" ]
train
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/utils.py#L322-L343
ZELLMECHANIK-DRESDEN/dclab
dclab/downsampling.py
downsample_rand
def downsample_rand(a, samples, remove_invalid=False, ret_idx=False): """Downsampling by randomly removing points Parameters ---------- a: 1d ndarray The input array to downsample samples: int The desired number of samples remove_invalid: bool Remove nan and inf values before downsampling ret_idx: bool Also return a boolean array that corresponds to the downsampled indices in `a`. Returns ------- dsa: 1d ndarray of size `samples` The pseudo-randomly downsampled array `a` idx: 1d boolean array with same shape as `a` Only returned if `ret_idx` is True. A boolean array such that `a[idx] == dsa` """ # fixed random state for this method rs = np.random.RandomState(seed=47).get_state() np.random.set_state(rs) samples = int(samples) if remove_invalid: # slice out nans and infs bad = np.isnan(a) | np.isinf(a) pool = a[~bad] else: pool = a if samples and (samples < pool.shape[0]): keep = np.zeros_like(pool, dtype=bool) keep_ids = np.random.choice(np.arange(pool.size), size=samples, replace=False) keep[keep_ids] = True dsa = pool[keep] else: keep = np.ones_like(pool, dtype=bool) dsa = pool if remove_invalid: # translate the kept values back to the original array idx = np.zeros(a.size, dtype=bool) idx[~bad] = keep else: idx = keep if ret_idx: return dsa, idx else: return dsa
python
def downsample_rand(a, samples, remove_invalid=False, ret_idx=False): """Downsampling by randomly removing points Parameters ---------- a: 1d ndarray The input array to downsample samples: int The desired number of samples remove_invalid: bool Remove nan and inf values before downsampling ret_idx: bool Also return a boolean array that corresponds to the downsampled indices in `a`. Returns ------- dsa: 1d ndarray of size `samples` The pseudo-randomly downsampled array `a` idx: 1d boolean array with same shape as `a` Only returned if `ret_idx` is True. A boolean array such that `a[idx] == dsa` """ # fixed random state for this method rs = np.random.RandomState(seed=47).get_state() np.random.set_state(rs) samples = int(samples) if remove_invalid: # slice out nans and infs bad = np.isnan(a) | np.isinf(a) pool = a[~bad] else: pool = a if samples and (samples < pool.shape[0]): keep = np.zeros_like(pool, dtype=bool) keep_ids = np.random.choice(np.arange(pool.size), size=samples, replace=False) keep[keep_ids] = True dsa = pool[keep] else: keep = np.ones_like(pool, dtype=bool) dsa = pool if remove_invalid: # translate the kept values back to the original array idx = np.zeros(a.size, dtype=bool) idx[~bad] = keep else: idx = keep if ret_idx: return dsa, idx else: return dsa
[ "def", "downsample_rand", "(", "a", ",", "samples", ",", "remove_invalid", "=", "False", ",", "ret_idx", "=", "False", ")", ":", "# fixed random state for this method", "rs", "=", "np", ".", "random", ".", "RandomState", "(", "seed", "=", "47", ")", ".", "...
Downsampling by randomly removing points Parameters ---------- a: 1d ndarray The input array to downsample samples: int The desired number of samples remove_invalid: bool Remove nan and inf values before downsampling ret_idx: bool Also return a boolean array that corresponds to the downsampled indices in `a`. Returns ------- dsa: 1d ndarray of size `samples` The pseudo-randomly downsampled array `a` idx: 1d boolean array with same shape as `a` Only returned if `ret_idx` is True. A boolean array such that `a[idx] == dsa`
[ "Downsampling", "by", "randomly", "removing", "points" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/downsampling.py#L11-L68
ZELLMECHANIK-DRESDEN/dclab
dclab/downsampling.py
downsample_grid
def downsample_grid(a, b, samples, ret_idx=False): """Content-based downsampling for faster visualization The arrays `a` and `b` make up a 2D scatter plot with high and low density values. This method takes out points at indices with high density. Parameters ---------- a, b: 1d ndarrays The input arrays to downsample samples: int The desired number of samples remove_invalid: bool Remove nan and inf values before downsampling ret_idx: bool Also return a boolean array that corresponds to the downsampled indices in `a` and `b`. Returns ------- dsa, dsb: 1d ndarrays of shape (samples,) The arrays `a` and `b` downsampled by evenly selecting points and pseudo-randomly adding or removing points to match `samples`. idx: 1d boolean array with same shape as `a` Only returned if `ret_idx` is True. A boolean array such that `a[idx] == dsa` """ # fixed random state for this method rs = np.random.RandomState(seed=47).get_state() samples = int(samples) if samples and samples < a.size: # The events to keep keep = np.zeros_like(a, dtype=bool) # 1. Produce evenly distributed samples # Choosing grid-size: # - large numbers tend to show actual structures of the sample, # which is not desired for plotting # - small numbers tend will not result in too few samples and, # in order to reach the desired samples, the data must be # upsampled again. # 300 is about the size of the plot in marker sizes and yields # good results. grid_size = 300 xpx = norm(a, a, b) * grid_size ypx = norm(b, b, a) * grid_size # The events on the grid to process toproc = np.ones((grid_size, grid_size), dtype=bool) for ii in range(xpx.size): xi = xpx[ii] yi = ypx[ii] # filter for overlapping events if valid(xi, yi) and toproc[int(xi-1), int(yi-1)]: toproc[int(xi-1), int(yi-1)] = False # include event keep[ii] = True # 2. Make sure that we reach `samples` by adding or # removing events. diff = np.sum(keep) - samples if diff > 0: # Too many samples rem_indices = np.where(keep)[0] np.random.set_state(rs) rem = np.random.choice(rem_indices, size=diff, replace=False) keep[rem] = False elif diff < 0: # Not enough samples add_indices = np.where(~keep)[0] np.random.set_state(rs) add = np.random.choice(add_indices, size=abs(diff), replace=False) keep[add] = True assert np.sum(keep) == samples, "sanity check" asd = a[keep] bsd = b[keep] assert np.allclose(a[keep], asd, equal_nan=True), "sanity check" assert np.allclose(b[keep], bsd, equal_nan=True), "sanity check" else: keep = np.ones_like(a, dtype=bool) asd = a bsd = b if ret_idx: return asd, bsd, keep else: return asd, bsd
python
def downsample_grid(a, b, samples, ret_idx=False): """Content-based downsampling for faster visualization The arrays `a` and `b` make up a 2D scatter plot with high and low density values. This method takes out points at indices with high density. Parameters ---------- a, b: 1d ndarrays The input arrays to downsample samples: int The desired number of samples remove_invalid: bool Remove nan and inf values before downsampling ret_idx: bool Also return a boolean array that corresponds to the downsampled indices in `a` and `b`. Returns ------- dsa, dsb: 1d ndarrays of shape (samples,) The arrays `a` and `b` downsampled by evenly selecting points and pseudo-randomly adding or removing points to match `samples`. idx: 1d boolean array with same shape as `a` Only returned if `ret_idx` is True. A boolean array such that `a[idx] == dsa` """ # fixed random state for this method rs = np.random.RandomState(seed=47).get_state() samples = int(samples) if samples and samples < a.size: # The events to keep keep = np.zeros_like(a, dtype=bool) # 1. Produce evenly distributed samples # Choosing grid-size: # - large numbers tend to show actual structures of the sample, # which is not desired for plotting # - small numbers tend will not result in too few samples and, # in order to reach the desired samples, the data must be # upsampled again. # 300 is about the size of the plot in marker sizes and yields # good results. grid_size = 300 xpx = norm(a, a, b) * grid_size ypx = norm(b, b, a) * grid_size # The events on the grid to process toproc = np.ones((grid_size, grid_size), dtype=bool) for ii in range(xpx.size): xi = xpx[ii] yi = ypx[ii] # filter for overlapping events if valid(xi, yi) and toproc[int(xi-1), int(yi-1)]: toproc[int(xi-1), int(yi-1)] = False # include event keep[ii] = True # 2. Make sure that we reach `samples` by adding or # removing events. diff = np.sum(keep) - samples if diff > 0: # Too many samples rem_indices = np.where(keep)[0] np.random.set_state(rs) rem = np.random.choice(rem_indices, size=diff, replace=False) keep[rem] = False elif diff < 0: # Not enough samples add_indices = np.where(~keep)[0] np.random.set_state(rs) add = np.random.choice(add_indices, size=abs(diff), replace=False) keep[add] = True assert np.sum(keep) == samples, "sanity check" asd = a[keep] bsd = b[keep] assert np.allclose(a[keep], asd, equal_nan=True), "sanity check" assert np.allclose(b[keep], bsd, equal_nan=True), "sanity check" else: keep = np.ones_like(a, dtype=bool) asd = a bsd = b if ret_idx: return asd, bsd, keep else: return asd, bsd
[ "def", "downsample_grid", "(", "a", ",", "b", ",", "samples", ",", "ret_idx", "=", "False", ")", ":", "# fixed random state for this method", "rs", "=", "np", ".", "random", ".", "RandomState", "(", "seed", "=", "47", ")", ".", "get_state", "(", ")", "sa...
Content-based downsampling for faster visualization The arrays `a` and `b` make up a 2D scatter plot with high and low density values. This method takes out points at indices with high density. Parameters ---------- a, b: 1d ndarrays The input arrays to downsample samples: int The desired number of samples remove_invalid: bool Remove nan and inf values before downsampling ret_idx: bool Also return a boolean array that corresponds to the downsampled indices in `a` and `b`. Returns ------- dsa, dsb: 1d ndarrays of shape (samples,) The arrays `a` and `b` downsampled by evenly selecting points and pseudo-randomly adding or removing points to match `samples`. idx: 1d boolean array with same shape as `a` Only returned if `ret_idx` is True. A boolean array such that `a[idx] == dsa`
[ "Content", "-", "based", "downsampling", "for", "faster", "visualization" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/downsampling.py#L72-L167
ZELLMECHANIK-DRESDEN/dclab
dclab/downsampling.py
valid
def valid(a, b): """Check whether `a` and `b` are not inf or nan""" return ~(np.isnan(a) | np.isinf(a) | np.isnan(b) | np.isinf(b))
python
def valid(a, b): """Check whether `a` and `b` are not inf or nan""" return ~(np.isnan(a) | np.isinf(a) | np.isnan(b) | np.isinf(b))
[ "def", "valid", "(", "a", ",", "b", ")", ":", "return", "~", "(", "np", ".", "isnan", "(", "a", ")", "|", "np", ".", "isinf", "(", "a", ")", "|", "np", ".", "isnan", "(", "b", ")", "|", "np", ".", "isinf", "(", "b", ")", ")" ]
Check whether `a` and `b` are not inf or nan
[ "Check", "whether", "a", "and", "b", "are", "not", "inf", "or", "nan" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/downsampling.py#L170-L172
ZELLMECHANIK-DRESDEN/dclab
dclab/downsampling.py
norm
def norm(a, ref1, ref2): """ Normalize `a` with min/max values of `ref1`, using all elements of `ref1` where the `ref1` and `ref2` are not nan or inf""" ref = ref1[valid(ref1, ref2)] return (a-ref.min())/(ref.max()-ref.min())
python
def norm(a, ref1, ref2): """ Normalize `a` with min/max values of `ref1`, using all elements of `ref1` where the `ref1` and `ref2` are not nan or inf""" ref = ref1[valid(ref1, ref2)] return (a-ref.min())/(ref.max()-ref.min())
[ "def", "norm", "(", "a", ",", "ref1", ",", "ref2", ")", ":", "ref", "=", "ref1", "[", "valid", "(", "ref1", ",", "ref2", ")", "]", "return", "(", "a", "-", "ref", ".", "min", "(", ")", ")", "/", "(", "ref", ".", "max", "(", ")", "-", "ref...
Normalize `a` with min/max values of `ref1`, using all elements of `ref1` where the `ref1` and `ref2` are not nan or inf
[ "Normalize", "a", "with", "min", "/", "max", "values", "of", "ref1", "using", "all", "elements", "of", "ref1", "where", "the", "ref1", "and", "ref2", "are", "not", "nan", "or", "inf" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/downsampling.py#L175-L180
ZELLMECHANIK-DRESDEN/dclab
dclab/external/statsmodels/nonparametric/kernels.py
gaussian
def gaussian(h, Xi, x): """ Gaussian Kernel for continuous variables Parameters ---------- h : 1-D ndarray, shape (K,) The bandwidths used to estimate the value of the kernel function. Xi : 1-D ndarray, shape (K,) The value of the training set. x : 1-D ndarray, shape (K,) The value at which the kernel density is being estimated. Returns ------- kernel_value : ndarray, shape (nobs, K) The value of the kernel function at each training point for each var. """ return (1. / np.sqrt(2 * np.pi)) * np.exp(-(Xi - x)**2 / (h**2 * 2.))
python
def gaussian(h, Xi, x): """ Gaussian Kernel for continuous variables Parameters ---------- h : 1-D ndarray, shape (K,) The bandwidths used to estimate the value of the kernel function. Xi : 1-D ndarray, shape (K,) The value of the training set. x : 1-D ndarray, shape (K,) The value at which the kernel density is being estimated. Returns ------- kernel_value : ndarray, shape (nobs, K) The value of the kernel function at each training point for each var. """ return (1. / np.sqrt(2 * np.pi)) * np.exp(-(Xi - x)**2 / (h**2 * 2.))
[ "def", "gaussian", "(", "h", ",", "Xi", ",", "x", ")", ":", "return", "(", "1.", "/", "np", ".", "sqrt", "(", "2", "*", "np", ".", "pi", ")", ")", "*", "np", ".", "exp", "(", "-", "(", "Xi", "-", "x", ")", "**", "2", "/", "(", "h", "*...
Gaussian Kernel for continuous variables Parameters ---------- h : 1-D ndarray, shape (K,) The bandwidths used to estimate the value of the kernel function. Xi : 1-D ndarray, shape (K,) The value of the training set. x : 1-D ndarray, shape (K,) The value at which the kernel density is being estimated. Returns ------- kernel_value : ndarray, shape (nobs, K) The value of the kernel function at each training point for each var.
[ "Gaussian", "Kernel", "for", "continuous", "variables", "Parameters", "----------", "h", ":", "1", "-", "D", "ndarray", "shape", "(", "K", ")", "The", "bandwidths", "used", "to", "estimate", "the", "value", "of", "the", "kernel", "function", ".", "Xi", ":"...
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/external/statsmodels/nonparametric/kernels.py#L19-L37
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/fmt_hdf5.py
RTDC_HDF5.parse_config
def parse_config(h5path): """Parse the RT-DC configuration of an hdf5 file""" with h5py.File(h5path, mode="r") as fh5: h5attrs = dict(fh5.attrs) # Convert byte strings to unicode strings # https://github.com/h5py/h5py/issues/379 for key in h5attrs: if isinstance(h5attrs[key], bytes): h5attrs[key] = h5attrs[key].decode("utf-8") config = Configuration() for key in h5attrs: section, pname = key.split(":") if pname not in dfn.config_funcs[section]: # Add the value as a string but issue a warning config[section][pname] = h5attrs[key] msg = "Unknown key '{}' in section [{}]!".format( pname, section) warnings.warn(msg, UnknownKeyWarning) else: typ = dfn.config_funcs[section][pname] config[section][pname] = typ(h5attrs[key]) return config
python
def parse_config(h5path): """Parse the RT-DC configuration of an hdf5 file""" with h5py.File(h5path, mode="r") as fh5: h5attrs = dict(fh5.attrs) # Convert byte strings to unicode strings # https://github.com/h5py/h5py/issues/379 for key in h5attrs: if isinstance(h5attrs[key], bytes): h5attrs[key] = h5attrs[key].decode("utf-8") config = Configuration() for key in h5attrs: section, pname = key.split(":") if pname not in dfn.config_funcs[section]: # Add the value as a string but issue a warning config[section][pname] = h5attrs[key] msg = "Unknown key '{}' in section [{}]!".format( pname, section) warnings.warn(msg, UnknownKeyWarning) else: typ = dfn.config_funcs[section][pname] config[section][pname] = typ(h5attrs[key]) return config
[ "def", "parse_config", "(", "h5path", ")", ":", "with", "h5py", ".", "File", "(", "h5path", ",", "mode", "=", "\"r\"", ")", "as", "fh5", ":", "h5attrs", "=", "dict", "(", "fh5", ".", "attrs", ")", "# Convert byte strings to unicode strings", "# https://githu...
Parse the RT-DC configuration of an hdf5 file
[ "Parse", "the", "RT", "-", "DC", "configuration", "of", "an", "hdf5", "file" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hdf5.py#L144-L167
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/fmt_hdf5.py
RTDC_HDF5.hash
def hash(self): """Hash value based on file name and content""" if self._hash is None: tohash = [self.path.name] # Hash a maximum of ~1MB of the hdf5 file tohash.append(hashfile(self.path, blocksize=65536, count=20)) self._hash = hashobj(tohash) return self._hash
python
def hash(self): """Hash value based on file name and content""" if self._hash is None: tohash = [self.path.name] # Hash a maximum of ~1MB of the hdf5 file tohash.append(hashfile(self.path, blocksize=65536, count=20)) self._hash = hashobj(tohash) return self._hash
[ "def", "hash", "(", "self", ")", ":", "if", "self", ".", "_hash", "is", "None", ":", "tohash", "=", "[", "self", ".", "path", ".", "name", "]", "# Hash a maximum of ~1MB of the hdf5 file", "tohash", ".", "append", "(", "hashfile", "(", "self", ".", "path...
Hash value based on file name and content
[ "Hash", "value", "based", "on", "file", "name", "and", "content" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hdf5.py#L170-L177
ZELLMECHANIK-DRESDEN/dclab
dclab/kde_methods.py
bin_num_doane
def bin_num_doane(a): """Compute number of bins based on Doane's formula""" bad = np.isnan(a) | np.isinf(a) data = a[~bad] acc = bin_width_doane(a) num = np.int(np.round((data.max() - data.min()) / acc)) return num
python
def bin_num_doane(a): """Compute number of bins based on Doane's formula""" bad = np.isnan(a) | np.isinf(a) data = a[~bad] acc = bin_width_doane(a) num = np.int(np.round((data.max() - data.min()) / acc)) return num
[ "def", "bin_num_doane", "(", "a", ")", ":", "bad", "=", "np", ".", "isnan", "(", "a", ")", "|", "np", ".", "isinf", "(", "a", ")", "data", "=", "a", "[", "~", "bad", "]", "acc", "=", "bin_width_doane", "(", "a", ")", "num", "=", "np", ".", ...
Compute number of bins based on Doane's formula
[ "Compute", "number", "of", "bins", "based", "on", "Doane", "s", "formula" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L14-L20
ZELLMECHANIK-DRESDEN/dclab
dclab/kde_methods.py
bin_width_doane
def bin_width_doane(a): """Compute accuracy (bin width) based on Doane's formula References ---------- - `<https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width>`_ - `<https://stats.stackexchange.com/questions/55134/ doanes-formula-for-histogram-binning>`_ """ bad = np.isnan(a) | np.isinf(a) data = a[~bad] n = data.size g1 = skew(data) sigma_g1 = np.sqrt(6 * (n - 2) / ((n + 1) * (n + 3))) k = 1 + np.log2(n) + np.log2(1 + np.abs(g1) / sigma_g1) acc = (data.max() - data.min()) / k return acc
python
def bin_width_doane(a): """Compute accuracy (bin width) based on Doane's formula References ---------- - `<https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width>`_ - `<https://stats.stackexchange.com/questions/55134/ doanes-formula-for-histogram-binning>`_ """ bad = np.isnan(a) | np.isinf(a) data = a[~bad] n = data.size g1 = skew(data) sigma_g1 = np.sqrt(6 * (n - 2) / ((n + 1) * (n + 3))) k = 1 + np.log2(n) + np.log2(1 + np.abs(g1) / sigma_g1) acc = (data.max() - data.min()) / k return acc
[ "def", "bin_width_doane", "(", "a", ")", ":", "bad", "=", "np", ".", "isnan", "(", "a", ")", "|", "np", ".", "isinf", "(", "a", ")", "data", "=", "a", "[", "~", "bad", "]", "n", "=", "data", ".", "size", "g1", "=", "skew", "(", "data", ")",...
Compute accuracy (bin width) based on Doane's formula References ---------- - `<https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width>`_ - `<https://stats.stackexchange.com/questions/55134/ doanes-formula-for-histogram-binning>`_
[ "Compute", "accuracy", "(", "bin", "width", ")", "based", "on", "Doane", "s", "formula" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L23-L39
ZELLMECHANIK-DRESDEN/dclab
dclab/kde_methods.py
ignore_nan_inf
def ignore_nan_inf(kde_method): """Ignores nans and infs from the input data Invalid positions in the resulting density are set to nan. """ def new_kde_method(events_x, events_y, xout=None, yout=None, *args, **kwargs): bad_in = get_bad_vals(events_x, events_y) if xout is None: density = np.zeros_like(events_x, dtype=float) bad_out = bad_in xo = yo = None else: density = np.zeros_like(xout, dtype=float) bad_out = get_bad_vals(xout, yout) xo = xout[~bad_out] yo = yout[~bad_out] # Filter events ev_x = events_x[~bad_in] ev_y = events_y[~bad_in] density[~bad_out] = kde_method(ev_x, ev_y, xo, yo, *args, **kwargs) density[bad_out] = np.nan return density doc_add = "\n Notes\n" +\ " -----\n" +\ " This is a wrapped version that ignores nan and inf values." new_kde_method.__doc__ = kde_method.__doc__ + doc_add return new_kde_method
python
def ignore_nan_inf(kde_method): """Ignores nans and infs from the input data Invalid positions in the resulting density are set to nan. """ def new_kde_method(events_x, events_y, xout=None, yout=None, *args, **kwargs): bad_in = get_bad_vals(events_x, events_y) if xout is None: density = np.zeros_like(events_x, dtype=float) bad_out = bad_in xo = yo = None else: density = np.zeros_like(xout, dtype=float) bad_out = get_bad_vals(xout, yout) xo = xout[~bad_out] yo = yout[~bad_out] # Filter events ev_x = events_x[~bad_in] ev_y = events_y[~bad_in] density[~bad_out] = kde_method(ev_x, ev_y, xo, yo, *args, **kwargs) density[bad_out] = np.nan return density doc_add = "\n Notes\n" +\ " -----\n" +\ " This is a wrapped version that ignores nan and inf values." new_kde_method.__doc__ = kde_method.__doc__ + doc_add return new_kde_method
[ "def", "ignore_nan_inf", "(", "kde_method", ")", ":", "def", "new_kde_method", "(", "events_x", ",", "events_y", ",", "xout", "=", "None", ",", "yout", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "bad_in", "=", "get_bad_vals", "("...
Ignores nans and infs from the input data Invalid positions in the resulting density are set to nan.
[ "Ignores", "nans", "and", "infs", "from", "the", "input", "data" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L46-L77
ZELLMECHANIK-DRESDEN/dclab
dclab/kde_methods.py
kde_gauss
def kde_gauss(events_x, events_y, xout=None, yout=None): """ Gaussian Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `scipy.stats.gaussian_kde` """ valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError("Both `xout` and `yout` must be (un)set.") if yout is None and yout is None: xout = events_x yout = events_y try: estimator = gaussian_kde([events_x.flatten(), events_y.flatten()]) density = estimator.evaluate([xout.flatten(), yout.flatten()]) except np.linalg.LinAlgError: # LinAlgError occurs when matrix to solve is singular (issue #117) density = np.zeros(xout.shape)*np.nan return density.reshape(xout.shape)
python
def kde_gauss(events_x, events_y, xout=None, yout=None): """ Gaussian Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `scipy.stats.gaussian_kde` """ valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError("Both `xout` and `yout` must be (un)set.") if yout is None and yout is None: xout = events_x yout = events_y try: estimator = gaussian_kde([events_x.flatten(), events_y.flatten()]) density = estimator.evaluate([xout.flatten(), yout.flatten()]) except np.linalg.LinAlgError: # LinAlgError occurs when matrix to solve is singular (issue #117) density = np.zeros(xout.shape)*np.nan return density.reshape(xout.shape)
[ "def", "kde_gauss", "(", "events_x", ",", "events_y", ",", "xout", "=", "None", ",", "yout", "=", "None", ")", ":", "valid_combi", "=", "(", "(", "xout", "is", "None", "and", "yout", "is", "None", ")", "or", "(", "xout", "is", "not", "None", "and",...
Gaussian Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `scipy.stats.gaussian_kde`
[ "Gaussian", "Kernel", "Density", "Estimation" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L82-L119
ZELLMECHANIK-DRESDEN/dclab
dclab/kde_methods.py
kde_histogram
def kde_histogram(events_x, events_y, xout=None, yout=None, bins=None): """ Histogram-based Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. bins: tuple (binsx, binsy) The number of bins to use for the histogram. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `numpy.histogram2d` `scipy.interpolate.RectBivariateSpline` """ valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError("Both `xout` and `yout` must be (un)set.") if yout is None and yout is None: xout = events_x yout = events_y if bins is None: bins = (max(5, bin_num_doane(events_x)), max(5, bin_num_doane(events_y))) # Compute the histogram hist2d, xedges, yedges = np.histogram2d(x=events_x, y=events_y, bins=bins, normed=True) xip = xedges[1:]-(xedges[1]-xedges[0])/2 yip = yedges[1:]-(yedges[1]-yedges[0])/2 estimator = RectBivariateSpline(x=xip, y=yip, z=hist2d) density = estimator.ev(xout, yout) density[density < 0] = 0 return density.reshape(xout.shape)
python
def kde_histogram(events_x, events_y, xout=None, yout=None, bins=None): """ Histogram-based Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. bins: tuple (binsx, binsy) The number of bins to use for the histogram. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `numpy.histogram2d` `scipy.interpolate.RectBivariateSpline` """ valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError("Both `xout` and `yout` must be (un)set.") if yout is None and yout is None: xout = events_x yout = events_y if bins is None: bins = (max(5, bin_num_doane(events_x)), max(5, bin_num_doane(events_y))) # Compute the histogram hist2d, xedges, yedges = np.histogram2d(x=events_x, y=events_y, bins=bins, normed=True) xip = xedges[1:]-(xedges[1]-xedges[0])/2 yip = yedges[1:]-(yedges[1]-yedges[0])/2 estimator = RectBivariateSpline(x=xip, y=yip, z=hist2d) density = estimator.ev(xout, yout) density[density < 0] = 0 return density.reshape(xout.shape)
[ "def", "kde_histogram", "(", "events_x", ",", "events_y", ",", "xout", "=", "None", ",", "yout", "=", "None", ",", "bins", "=", "None", ")", ":", "valid_combi", "=", "(", "(", "xout", "is", "None", "and", "yout", "is", "None", ")", "or", "(", "xout...
Histogram-based Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. bins: tuple (binsx, binsy) The number of bins to use for the histogram. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `numpy.histogram2d` `scipy.interpolate.RectBivariateSpline`
[ "Histogram", "-", "based", "Kernel", "Density", "Estimation" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L124-L174
ZELLMECHANIK-DRESDEN/dclab
dclab/kde_methods.py
kde_none
def kde_none(events_x, events_y, xout=None, yout=None): """ No Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) Notes ----- This method is a convenience method that always returns ones in the shape that the other methods in this module produce. """ valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError("Both `xout` and `yout` must be (un)set.") if yout is None and yout is None: xout = events_x yout = events_y return np.ones(xout.shape)
python
def kde_none(events_x, events_y, xout=None, yout=None): """ No Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) Notes ----- This method is a convenience method that always returns ones in the shape that the other methods in this module produce. """ valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError("Both `xout` and `yout` must be (un)set.") if yout is None and yout is None: xout = events_x yout = events_y return np.ones(xout.shape)
[ "def", "kde_none", "(", "events_x", ",", "events_y", ",", "xout", "=", "None", ",", "yout", "=", "None", ")", ":", "valid_combi", "=", "(", "(", "xout", "is", "None", "and", "yout", "is", "None", ")", "or", "(", "xout", "is", "not", "None", "and", ...
No Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) Notes ----- This method is a convenience method that always returns ones in the shape that the other methods in this module produce.
[ "No", "Kernel", "Density", "Estimation" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L177-L209
ZELLMECHANIK-DRESDEN/dclab
dclab/kde_methods.py
kde_multivariate
def kde_multivariate(events_x, events_y, xout=None, yout=None, bw=None): """ Multivariate Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. bw: tuple (bwx, bwy) or None The bandwith for kernel density estimation. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `statsmodels.nonparametric.kernel_density.KDEMultivariate` """ valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError("Both `xout` and `yout` must be (un)set.") if yout is None and yout is None: xout = events_x yout = events_y if bw is None: # divide by 2 to make it comparable to histogram KDE bw = (bin_width_doane(events_x) / 2, bin_width_doane(events_y) / 2) positions = np.vstack([xout.flatten(), yout.flatten()]) estimator_ly = KDEMultivariate(data=[events_x.flatten(), events_y.flatten()], var_type='cc', bw=bw) density = estimator_ly.pdf(positions) return density.reshape(xout.shape)
python
def kde_multivariate(events_x, events_y, xout=None, yout=None, bw=None): """ Multivariate Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. bw: tuple (bwx, bwy) or None The bandwith for kernel density estimation. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `statsmodels.nonparametric.kernel_density.KDEMultivariate` """ valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError("Both `xout` and `yout` must be (un)set.") if yout is None and yout is None: xout = events_x yout = events_y if bw is None: # divide by 2 to make it comparable to histogram KDE bw = (bin_width_doane(events_x) / 2, bin_width_doane(events_y) / 2) positions = np.vstack([xout.flatten(), yout.flatten()]) estimator_ly = KDEMultivariate(data=[events_x.flatten(), events_y.flatten()], var_type='cc', bw=bw) density = estimator_ly.pdf(positions) return density.reshape(xout.shape)
[ "def", "kde_multivariate", "(", "events_x", ",", "events_y", ",", "xout", "=", "None", ",", "yout", "=", "None", ",", "bw", "=", "None", ")", ":", "valid_combi", "=", "(", "(", "xout", "is", "None", "and", "yout", "is", "None", ")", "or", "(", "xou...
Multivariate Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. bw: tuple (bwx, bwy) or None The bandwith for kernel density estimation. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `statsmodels.nonparametric.kernel_density.KDEMultivariate`
[ "Multivariate", "Kernel", "Density", "Estimation" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L214-L257
openstax/cnx-archive
cnxarchive/views/legacy_redirect.py
redirect_legacy_content
def redirect_legacy_content(request): """Redirect from legacy /content/id/version to new /contents/uuid@version. Handles collection context (book) as well. """ routing_args = request.matchdict objid = routing_args['objid'] objver = routing_args.get('objver') filename = routing_args.get('filename') id, version = _convert_legacy_id(objid, objver) if not id: raise httpexceptions.HTTPNotFound() # We always use 301 redirects (HTTPMovedPermanently) here # because we want search engines to move to the newer links # We cache these redirects only briefly because, even when versioned, # legacy collection versions don't include the minor version, # so the latest archive url could change if filename: with db_connect() as db_connection: with db_connection.cursor() as cursor: args = dict(id=id, version=version, filename=filename) cursor.execute(SQL['get-resourceid-by-filename'], args) try: res = cursor.fetchone() resourceid = res[0] raise httpexceptions.HTTPMovedPermanently( request.route_path('resource', hash=resourceid, ignore=u'/{}'.format(filename)), headers=[("Cache-Control", "max-age=60, public")]) except TypeError: # None returned raise httpexceptions.HTTPNotFound() ident_hash = join_ident_hash(id, version) params = request.params if params.get('collection'): # page in book objid, objver = split_legacy_hash(params['collection']) book_uuid, book_version = _convert_legacy_id(objid, objver) if book_uuid: id, ident_hash = \ _get_page_in_book(id, version, book_uuid, book_version) raise httpexceptions.HTTPMovedPermanently( request.route_path('content', ident_hash=ident_hash), headers=[("Cache-Control", "max-age=60, public")])
python
def redirect_legacy_content(request): """Redirect from legacy /content/id/version to new /contents/uuid@version. Handles collection context (book) as well. """ routing_args = request.matchdict objid = routing_args['objid'] objver = routing_args.get('objver') filename = routing_args.get('filename') id, version = _convert_legacy_id(objid, objver) if not id: raise httpexceptions.HTTPNotFound() # We always use 301 redirects (HTTPMovedPermanently) here # because we want search engines to move to the newer links # We cache these redirects only briefly because, even when versioned, # legacy collection versions don't include the minor version, # so the latest archive url could change if filename: with db_connect() as db_connection: with db_connection.cursor() as cursor: args = dict(id=id, version=version, filename=filename) cursor.execute(SQL['get-resourceid-by-filename'], args) try: res = cursor.fetchone() resourceid = res[0] raise httpexceptions.HTTPMovedPermanently( request.route_path('resource', hash=resourceid, ignore=u'/{}'.format(filename)), headers=[("Cache-Control", "max-age=60, public")]) except TypeError: # None returned raise httpexceptions.HTTPNotFound() ident_hash = join_ident_hash(id, version) params = request.params if params.get('collection'): # page in book objid, objver = split_legacy_hash(params['collection']) book_uuid, book_version = _convert_legacy_id(objid, objver) if book_uuid: id, ident_hash = \ _get_page_in_book(id, version, book_uuid, book_version) raise httpexceptions.HTTPMovedPermanently( request.route_path('content', ident_hash=ident_hash), headers=[("Cache-Control", "max-age=60, public")])
[ "def", "redirect_legacy_content", "(", "request", ")", ":", "routing_args", "=", "request", ".", "matchdict", "objid", "=", "routing_args", "[", "'objid'", "]", "objver", "=", "routing_args", ".", "get", "(", "'objver'", ")", "filename", "=", "routing_args", "...
Redirect from legacy /content/id/version to new /contents/uuid@version. Handles collection context (book) as well.
[ "Redirect", "from", "legacy", "/", "content", "/", "id", "/", "version", "to", "new", "/", "contents", "/", "uuid@version", "." ]
train
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/legacy_redirect.py#L72-L119
ZELLMECHANIK-DRESDEN/dclab
dclab/isoelastics/__init__.py
Isoelastics._add
def _add(self, isoel, col1, col2, method, meta): """Convenience method for population self._data""" self._data[method][col1][col2]["isoelastics"] = isoel self._data[method][col1][col2]["meta"] = meta # Use advanced slicing to flip the data columns isoel_flip = [iso[:, [1, 0, 2]] for iso in isoel] self._data[method][col2][col1]["isoelastics"] = isoel_flip self._data[method][col2][col1]["meta"] = meta
python
def _add(self, isoel, col1, col2, method, meta): """Convenience method for population self._data""" self._data[method][col1][col2]["isoelastics"] = isoel self._data[method][col1][col2]["meta"] = meta # Use advanced slicing to flip the data columns isoel_flip = [iso[:, [1, 0, 2]] for iso in isoel] self._data[method][col2][col1]["isoelastics"] = isoel_flip self._data[method][col2][col1]["meta"] = meta
[ "def", "_add", "(", "self", ",", "isoel", ",", "col1", ",", "col2", ",", "method", ",", "meta", ")", ":", "self", ".", "_data", "[", "method", "]", "[", "col1", "]", "[", "col2", "]", "[", "\"isoelastics\"", "]", "=", "isoel", "self", ".", "_data...
Convenience method for population self._data
[ "Convenience", "method", "for", "population", "self", ".", "_data" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L33-L41
ZELLMECHANIK-DRESDEN/dclab
dclab/isoelastics/__init__.py
Isoelastics.add
def add(self, isoel, col1, col2, channel_width, flow_rate, viscosity, method): """Add isoelastics Parameters ---------- isoel: list of ndarrays Each list item resembles one isoelastic line stored as an array of shape (N,3). The last column contains the emodulus data. col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) channel_width: float Channel width in µm flow_rate: float Flow rate through the channel in µl/s viscosity: float Viscosity of the medium in mPa*s method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). Notes ----- The following isoelastics are automatically added for user convenience: - isoelastics with `col1` and `col2` interchanged - isoelastics for circularity if deformation was given """ if method not in VALID_METHODS: validstr = ",".join(VALID_METHODS) raise ValueError("`method` must be one of {}!".format(validstr)) for col in [col1, col2]: if col not in dfn.scalar_feature_names: raise ValueError("Not a valid feature name: {}".format(col)) meta = [channel_width, flow_rate, viscosity] # Add the feature data self._add(isoel, col1, col2, method, meta) # Also add the feature data for circularity if "deform" in [col1, col2]: col1c, col2c = col1, col2 if col1c == "deform": deform_ax = 0 col1c = "circ" else: deform_ax = 1 col2c = "circ" iso_circ = [] for iso in isoel: iso = iso.copy() iso[:, deform_ax] = 1 - iso[:, deform_ax] iso_circ.append(iso) self._add(iso_circ, col1c, col2c, method, meta)
python
def add(self, isoel, col1, col2, channel_width, flow_rate, viscosity, method): """Add isoelastics Parameters ---------- isoel: list of ndarrays Each list item resembles one isoelastic line stored as an array of shape (N,3). The last column contains the emodulus data. col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) channel_width: float Channel width in µm flow_rate: float Flow rate through the channel in µl/s viscosity: float Viscosity of the medium in mPa*s method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). Notes ----- The following isoelastics are automatically added for user convenience: - isoelastics with `col1` and `col2` interchanged - isoelastics for circularity if deformation was given """ if method not in VALID_METHODS: validstr = ",".join(VALID_METHODS) raise ValueError("`method` must be one of {}!".format(validstr)) for col in [col1, col2]: if col not in dfn.scalar_feature_names: raise ValueError("Not a valid feature name: {}".format(col)) meta = [channel_width, flow_rate, viscosity] # Add the feature data self._add(isoel, col1, col2, method, meta) # Also add the feature data for circularity if "deform" in [col1, col2]: col1c, col2c = col1, col2 if col1c == "deform": deform_ax = 0 col1c = "circ" else: deform_ax = 1 col2c = "circ" iso_circ = [] for iso in isoel: iso = iso.copy() iso[:, deform_ax] = 1 - iso[:, deform_ax] iso_circ.append(iso) self._add(iso_circ, col1c, col2c, method, meta)
[ "def", "add", "(", "self", ",", "isoel", ",", "col1", ",", "col2", ",", "channel_width", ",", "flow_rate", ",", "viscosity", ",", "method", ")", ":", "if", "method", "not", "in", "VALID_METHODS", ":", "validstr", "=", "\",\"", ".", "join", "(", "VALID_...
Add isoelastics Parameters ---------- isoel: list of ndarrays Each list item resembles one isoelastic line stored as an array of shape (N,3). The last column contains the emodulus data. col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) channel_width: float Channel width in µm flow_rate: float Flow rate through the channel in µl/s viscosity: float Viscosity of the medium in mPa*s method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). Notes ----- The following isoelastics are automatically added for user convenience: - isoelastics with `col1` and `col2` interchanged - isoelastics for circularity if deformation was given
[ "Add", "isoelastics" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L43-L102
ZELLMECHANIK-DRESDEN/dclab
dclab/isoelastics/__init__.py
Isoelastics.add_px_err
def add_px_err(isoel, col1, col2, px_um, inplace=False): """Undo pixelation correction Isoelasticity lines are already corrected for pixelation effects as described in Mapping of Deformation to Apparent Young's Modulus in Real-Time Deformability Cytometry Christoph Herold, arXiv:1704.00572 [cond-mat.soft] (2017) https://arxiv.org/abs/1704.00572. If the isoealsticity lines are displayed with deformation data that are not corrected, then the lines must be "un"-corrected, i.e. the pixelation error must be added to the lines to match the experimental data. Parameters ---------- isoel: list of 2d ndarrays of shape (N, 3) Each item in the list corresponds to one isoelasticity line. The first column is defined by `col1`, the second by `col2`, and the third column is the emodulus. col1, col2: str Define the fist to columns of each isoelasticity line. One of ["area_um", "circ", "deform"] px_um: float Pixel size [µm] """ Isoelastics.check_col12(col1, col2) if "deform" in [col1, col2]: # add error for deformation sign = +1 else: # subtract error for circularity sign = -1 if col1 == "area_um": area_ax = 0 deci_ax = 1 else: area_ax = 1 deci_ax = 0 new_isoel = [] for iso in isoel: iso = np.array(iso, copy=not inplace) ddeci = feat_emod.corrpix_deform_delta(area_um=iso[:, area_ax], px_um=px_um) iso[:, deci_ax] += sign * ddeci new_isoel.append(iso) return new_isoel
python
def add_px_err(isoel, col1, col2, px_um, inplace=False): """Undo pixelation correction Isoelasticity lines are already corrected for pixelation effects as described in Mapping of Deformation to Apparent Young's Modulus in Real-Time Deformability Cytometry Christoph Herold, arXiv:1704.00572 [cond-mat.soft] (2017) https://arxiv.org/abs/1704.00572. If the isoealsticity lines are displayed with deformation data that are not corrected, then the lines must be "un"-corrected, i.e. the pixelation error must be added to the lines to match the experimental data. Parameters ---------- isoel: list of 2d ndarrays of shape (N, 3) Each item in the list corresponds to one isoelasticity line. The first column is defined by `col1`, the second by `col2`, and the third column is the emodulus. col1, col2: str Define the fist to columns of each isoelasticity line. One of ["area_um", "circ", "deform"] px_um: float Pixel size [µm] """ Isoelastics.check_col12(col1, col2) if "deform" in [col1, col2]: # add error for deformation sign = +1 else: # subtract error for circularity sign = -1 if col1 == "area_um": area_ax = 0 deci_ax = 1 else: area_ax = 1 deci_ax = 0 new_isoel = [] for iso in isoel: iso = np.array(iso, copy=not inplace) ddeci = feat_emod.corrpix_deform_delta(area_um=iso[:, area_ax], px_um=px_um) iso[:, deci_ax] += sign * ddeci new_isoel.append(iso) return new_isoel
[ "def", "add_px_err", "(", "isoel", ",", "col1", ",", "col2", ",", "px_um", ",", "inplace", "=", "False", ")", ":", "Isoelastics", ".", "check_col12", "(", "col1", ",", "col2", ")", "if", "\"deform\"", "in", "[", "col1", ",", "col2", "]", ":", "# add ...
Undo pixelation correction Isoelasticity lines are already corrected for pixelation effects as described in Mapping of Deformation to Apparent Young's Modulus in Real-Time Deformability Cytometry Christoph Herold, arXiv:1704.00572 [cond-mat.soft] (2017) https://arxiv.org/abs/1704.00572. If the isoealsticity lines are displayed with deformation data that are not corrected, then the lines must be "un"-corrected, i.e. the pixelation error must be added to the lines to match the experimental data. Parameters ---------- isoel: list of 2d ndarrays of shape (N, 3) Each item in the list corresponds to one isoelasticity line. The first column is defined by `col1`, the second by `col2`, and the third column is the emodulus. col1, col2: str Define the fist to columns of each isoelasticity line. One of ["area_um", "circ", "deform"] px_um: float Pixel size [µm]
[ "Undo", "pixelation", "correction" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L105-L154
ZELLMECHANIK-DRESDEN/dclab
dclab/isoelastics/__init__.py
Isoelastics.convert
def convert(isoel, col1, col2, channel_width_in, channel_width_out, flow_rate_in, flow_rate_out, viscosity_in, viscosity_out, inplace=False): """Convert isoelastics in area_um-deform space Parameters ---------- isoel: list of 2d ndarrays of shape (N, 3) Each item in the list corresponds to one isoelasticity line. The first column is defined by `col1`, the second by `col2`, and the third column is the emodulus. col1, col2: str Define the fist to columns of each isoelasticity line. One of ["area_um", "circ", "deform"] channel_width_in: float Original channel width [µm] channel_width_out: float Target channel width [µm] flow_rate_in: float Original flow rate [µl/s] flow_rate_in: float Target flow rate [µl/s] viscosity_in: float Original viscosity [mPa*s] viscosity_out: float Target viscosity [mPa*s] Notes ----- If only the positions of the isoelastics are of interest and not the value of the elastic modulus, then it is sufficient to supply values for the channel width and set the values for flow rate and viscosity to a constant (e.g. 1). See Also -------- dclab.features.emodulus.convert: conversion method used """ Isoelastics.check_col12(col1, col2) if col1 == "area_um": area_ax = 0 defo_ax = 1 else: area_ax = 1 defo_ax = 0 new_isoel = [] for iso in isoel: iso = np.array(iso, copy=not inplace) feat_emod.convert(area_um=iso[:, area_ax], deform=iso[:, defo_ax], emodulus=iso[:, 2], channel_width_in=channel_width_in, channel_width_out=channel_width_out, flow_rate_in=flow_rate_in, flow_rate_out=flow_rate_out, viscosity_in=viscosity_in, viscosity_out=viscosity_out, inplace=True) new_isoel.append(iso) return new_isoel
python
def convert(isoel, col1, col2, channel_width_in, channel_width_out, flow_rate_in, flow_rate_out, viscosity_in, viscosity_out, inplace=False): """Convert isoelastics in area_um-deform space Parameters ---------- isoel: list of 2d ndarrays of shape (N, 3) Each item in the list corresponds to one isoelasticity line. The first column is defined by `col1`, the second by `col2`, and the third column is the emodulus. col1, col2: str Define the fist to columns of each isoelasticity line. One of ["area_um", "circ", "deform"] channel_width_in: float Original channel width [µm] channel_width_out: float Target channel width [µm] flow_rate_in: float Original flow rate [µl/s] flow_rate_in: float Target flow rate [µl/s] viscosity_in: float Original viscosity [mPa*s] viscosity_out: float Target viscosity [mPa*s] Notes ----- If only the positions of the isoelastics are of interest and not the value of the elastic modulus, then it is sufficient to supply values for the channel width and set the values for flow rate and viscosity to a constant (e.g. 1). See Also -------- dclab.features.emodulus.convert: conversion method used """ Isoelastics.check_col12(col1, col2) if col1 == "area_um": area_ax = 0 defo_ax = 1 else: area_ax = 1 defo_ax = 0 new_isoel = [] for iso in isoel: iso = np.array(iso, copy=not inplace) feat_emod.convert(area_um=iso[:, area_ax], deform=iso[:, defo_ax], emodulus=iso[:, 2], channel_width_in=channel_width_in, channel_width_out=channel_width_out, flow_rate_in=flow_rate_in, flow_rate_out=flow_rate_out, viscosity_in=viscosity_in, viscosity_out=viscosity_out, inplace=True) new_isoel.append(iso) return new_isoel
[ "def", "convert", "(", "isoel", ",", "col1", ",", "col2", ",", "channel_width_in", ",", "channel_width_out", ",", "flow_rate_in", ",", "flow_rate_out", ",", "viscosity_in", ",", "viscosity_out", ",", "inplace", "=", "False", ")", ":", "Isoelastics", ".", "chec...
Convert isoelastics in area_um-deform space Parameters ---------- isoel: list of 2d ndarrays of shape (N, 3) Each item in the list corresponds to one isoelasticity line. The first column is defined by `col1`, the second by `col2`, and the third column is the emodulus. col1, col2: str Define the fist to columns of each isoelasticity line. One of ["area_um", "circ", "deform"] channel_width_in: float Original channel width [µm] channel_width_out: float Target channel width [µm] flow_rate_in: float Original flow rate [µl/s] flow_rate_in: float Target flow rate [µl/s] viscosity_in: float Original viscosity [mPa*s] viscosity_out: float Target viscosity [mPa*s] Notes ----- If only the positions of the isoelastics are of interest and not the value of the elastic modulus, then it is sufficient to supply values for the channel width and set the values for flow rate and viscosity to a constant (e.g. 1). See Also -------- dclab.features.emodulus.convert: conversion method used
[ "Convert", "isoelastics", "in", "area_um", "-", "deform", "space" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L168-L231
ZELLMECHANIK-DRESDEN/dclab
dclab/isoelastics/__init__.py
Isoelastics.get
def get(self, col1, col2, method, channel_width, flow_rate=None, viscosity=None, add_px_err=False, px_um=None): """Get isoelastics Parameters ---------- col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). channel_width: float Channel width in µm flow_rate: float or `None` Flow rate through the channel in µl/s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). viscosity: float or `None` Viscosity of the medium in mPa*s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). add_px_err: bool If True, add pixelation errors according to C. Herold (2017), https://arxiv.org/abs/1704.00572 px_um: float Pixel size [µm], used for pixelation error computation See Also -------- dclab.features.emodulus.convert: conversion in-between channel sizes and viscosities dclab.features.emodulus.corrpix_deform_delta: pixelation error that is applied to the deformation data """ if method not in VALID_METHODS: validstr = ",".join(VALID_METHODS) raise ValueError("`method` must be one of {}!".format(validstr)) for col in [col1, col2]: if col not in dfn.scalar_feature_names: raise ValueError("Not a valid feature name: {}".format(col)) if "isoelastics" not in self._data[method][col2][col1]: msg = "No isoelastics matching {}, {}, {}".format(col1, col2, method) raise KeyError(msg) isoel = self._data[method][col1][col2]["isoelastics"] meta = self._data[method][col1][col2]["meta"] if flow_rate is None: flow_rate = meta[1] if viscosity is None: viscosity = meta[2] isoel_ret = self.convert(isoel, col1, col2, channel_width_in=meta[0], channel_width_out=channel_width, flow_rate_in=meta[1], flow_rate_out=flow_rate, viscosity_in=meta[2], viscosity_out=viscosity, inplace=False) if add_px_err: self.add_px_err(isoel=isoel_ret, col1=col1, col2=col2, px_um=px_um, inplace=True) return isoel_ret
python
def get(self, col1, col2, method, channel_width, flow_rate=None, viscosity=None, add_px_err=False, px_um=None): """Get isoelastics Parameters ---------- col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). channel_width: float Channel width in µm flow_rate: float or `None` Flow rate through the channel in µl/s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). viscosity: float or `None` Viscosity of the medium in mPa*s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). add_px_err: bool If True, add pixelation errors according to C. Herold (2017), https://arxiv.org/abs/1704.00572 px_um: float Pixel size [µm], used for pixelation error computation See Also -------- dclab.features.emodulus.convert: conversion in-between channel sizes and viscosities dclab.features.emodulus.corrpix_deform_delta: pixelation error that is applied to the deformation data """ if method not in VALID_METHODS: validstr = ",".join(VALID_METHODS) raise ValueError("`method` must be one of {}!".format(validstr)) for col in [col1, col2]: if col not in dfn.scalar_feature_names: raise ValueError("Not a valid feature name: {}".format(col)) if "isoelastics" not in self._data[method][col2][col1]: msg = "No isoelastics matching {}, {}, {}".format(col1, col2, method) raise KeyError(msg) isoel = self._data[method][col1][col2]["isoelastics"] meta = self._data[method][col1][col2]["meta"] if flow_rate is None: flow_rate = meta[1] if viscosity is None: viscosity = meta[2] isoel_ret = self.convert(isoel, col1, col2, channel_width_in=meta[0], channel_width_out=channel_width, flow_rate_in=meta[1], flow_rate_out=flow_rate, viscosity_in=meta[2], viscosity_out=viscosity, inplace=False) if add_px_err: self.add_px_err(isoel=isoel_ret, col1=col1, col2=col2, px_um=px_um, inplace=True) return isoel_ret
[ "def", "get", "(", "self", ",", "col1", ",", "col2", ",", "method", ",", "channel_width", ",", "flow_rate", "=", "None", ",", "viscosity", "=", "None", ",", "add_px_err", "=", "False", ",", "px_um", "=", "None", ")", ":", "if", "method", "not", "in",...
Get isoelastics Parameters ---------- col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). channel_width: float Channel width in µm flow_rate: float or `None` Flow rate through the channel in µl/s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). viscosity: float or `None` Viscosity of the medium in mPa*s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). add_px_err: bool If True, add pixelation errors according to C. Herold (2017), https://arxiv.org/abs/1704.00572 px_um: float Pixel size [µm], used for pixelation error computation See Also -------- dclab.features.emodulus.convert: conversion in-between channel sizes and viscosities dclab.features.emodulus.corrpix_deform_delta: pixelation error that is applied to the deformation data
[ "Get", "isoelastics" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L233-L310
ZELLMECHANIK-DRESDEN/dclab
dclab/isoelastics/__init__.py
Isoelastics.get_with_rtdcbase
def get_with_rtdcbase(self, col1, col2, method, dataset, viscosity=None, add_px_err=False): """Convenience method that extracts the metadata from RTDCBase Parameters ---------- col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). dataset: dclab.rtdc_dataset.RTDCBase The dataset from which to obtain the metadata. viscosity: float or `None` Viscosity of the medium in mPa*s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). add_px_err: bool If True, add pixelation errors according to C. Herold (2017), https://arxiv.org/abs/1704.00572 """ cfg = dataset.config return self.get(col1=col1, col2=col2, method=method, channel_width=cfg["setup"]["channel width"], flow_rate=cfg["setup"]["flow rate"], viscosity=viscosity, add_px_err=add_px_err, px_um=cfg["imaging"]["pixel size"])
python
def get_with_rtdcbase(self, col1, col2, method, dataset, viscosity=None, add_px_err=False): """Convenience method that extracts the metadata from RTDCBase Parameters ---------- col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). dataset: dclab.rtdc_dataset.RTDCBase The dataset from which to obtain the metadata. viscosity: float or `None` Viscosity of the medium in mPa*s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). add_px_err: bool If True, add pixelation errors according to C. Herold (2017), https://arxiv.org/abs/1704.00572 """ cfg = dataset.config return self.get(col1=col1, col2=col2, method=method, channel_width=cfg["setup"]["channel width"], flow_rate=cfg["setup"]["flow rate"], viscosity=viscosity, add_px_err=add_px_err, px_um=cfg["imaging"]["pixel size"])
[ "def", "get_with_rtdcbase", "(", "self", ",", "col1", ",", "col2", ",", "method", ",", "dataset", ",", "viscosity", "=", "None", ",", "add_px_err", "=", "False", ")", ":", "cfg", "=", "dataset", ".", "config", "return", "self", ".", "get", "(", "col1",...
Convenience method that extracts the metadata from RTDCBase Parameters ---------- col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). dataset: dclab.rtdc_dataset.RTDCBase The dataset from which to obtain the metadata. viscosity: float or `None` Viscosity of the medium in mPa*s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). add_px_err: bool If True, add pixelation errors according to C. Herold (2017), https://arxiv.org/abs/1704.00572
[ "Convenience", "method", "that", "extracts", "the", "metadata", "from", "RTDCBase" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L312-L346
ZELLMECHANIK-DRESDEN/dclab
dclab/isoelastics/__init__.py
Isoelastics.load_data
def load_data(self, path): """Load isoelastics from a text file The text file is loaded with `numpy.loadtxt` and must have three columns, representing the two data columns and the elastic modulus with units defined in `definitions.py`. The file header must have a section defining meta data of the content like so: # [...] # # - column 1: area_um # - column 2: deform # - column 3: emodulus # - channel width [um]: 20 # - flow rate [ul/s]: 0.04 # - viscosity [mPa*s]: 15 # - method: analytical # # [...] Parameters ---------- path: str Path to a isoelastics text file """ path = pathlib.Path(path).resolve() # Get metadata meta = {} with path.open() as fd: while True: line = fd.readline().strip() if line.startswith("# - "): line = line.strip("#- ") var, val = line.split(":") if val.strip().replace(".", "").isdigit(): # channel width, flow rate, viscosity val = float(val) else: # columns, calculation val = val.strip().lower() meta[var.strip()] = val elif line and not line.startswith("#"): break assert meta["column 1"] in dfn.scalar_feature_names assert meta["column 2"] in dfn.scalar_feature_names assert meta["column 3"] == "emodulus" assert meta["method"] in VALID_METHODS # Load isoelasics with path.open("rb") as isfd: isodata = np.loadtxt(isfd) # Slice out individual isoelastics emoduli = np.unique(isodata[:, 2]) isoel = [] for emod in emoduli: where = isodata[:, 2] == emod isoel.append(isodata[where]) # Add isoelastics to instance self.add(isoel=isoel, col1=meta["column 1"], col2=meta["column 2"], channel_width=meta["channel width [um]"], flow_rate=meta["flow rate [ul/s]"], viscosity=meta["viscosity [mPa*s]"], method=meta["method"])
python
def load_data(self, path): """Load isoelastics from a text file The text file is loaded with `numpy.loadtxt` and must have three columns, representing the two data columns and the elastic modulus with units defined in `definitions.py`. The file header must have a section defining meta data of the content like so: # [...] # # - column 1: area_um # - column 2: deform # - column 3: emodulus # - channel width [um]: 20 # - flow rate [ul/s]: 0.04 # - viscosity [mPa*s]: 15 # - method: analytical # # [...] Parameters ---------- path: str Path to a isoelastics text file """ path = pathlib.Path(path).resolve() # Get metadata meta = {} with path.open() as fd: while True: line = fd.readline().strip() if line.startswith("# - "): line = line.strip("#- ") var, val = line.split(":") if val.strip().replace(".", "").isdigit(): # channel width, flow rate, viscosity val = float(val) else: # columns, calculation val = val.strip().lower() meta[var.strip()] = val elif line and not line.startswith("#"): break assert meta["column 1"] in dfn.scalar_feature_names assert meta["column 2"] in dfn.scalar_feature_names assert meta["column 3"] == "emodulus" assert meta["method"] in VALID_METHODS # Load isoelasics with path.open("rb") as isfd: isodata = np.loadtxt(isfd) # Slice out individual isoelastics emoduli = np.unique(isodata[:, 2]) isoel = [] for emod in emoduli: where = isodata[:, 2] == emod isoel.append(isodata[where]) # Add isoelastics to instance self.add(isoel=isoel, col1=meta["column 1"], col2=meta["column 2"], channel_width=meta["channel width [um]"], flow_rate=meta["flow rate [ul/s]"], viscosity=meta["viscosity [mPa*s]"], method=meta["method"])
[ "def", "load_data", "(", "self", ",", "path", ")", ":", "path", "=", "pathlib", ".", "Path", "(", "path", ")", ".", "resolve", "(", ")", "# Get metadata", "meta", "=", "{", "}", "with", "path", ".", "open", "(", ")", "as", "fd", ":", "while", "Tr...
Load isoelastics from a text file The text file is loaded with `numpy.loadtxt` and must have three columns, representing the two data columns and the elastic modulus with units defined in `definitions.py`. The file header must have a section defining meta data of the content like so: # [...] # # - column 1: area_um # - column 2: deform # - column 3: emodulus # - channel width [um]: 20 # - flow rate [ul/s]: 0.04 # - viscosity [mPa*s]: 15 # - method: analytical # # [...] Parameters ---------- path: str Path to a isoelastics text file
[ "Load", "isoelastics", "from", "a", "text", "file" ]
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L348-L417
openstax/cnx-archive
cnxarchive/views/robots.py
robots
def robots(request): """Return a simple "don't index me" robots.txt file.""" resp = request.response resp.status = '200 OK' resp.content_type = 'text/plain' resp.body = """ User-Agent: * Disallow: / """ return resp
python
def robots(request): """Return a simple "don't index me" robots.txt file.""" resp = request.response resp.status = '200 OK' resp.content_type = 'text/plain' resp.body = """ User-Agent: * Disallow: / """ return resp
[ "def", "robots", "(", "request", ")", ":", "resp", "=", "request", ".", "response", "resp", ".", "status", "=", "'200 OK'", "resp", ".", "content_type", "=", "'text/plain'", "resp", ".", "body", "=", "\"\"\"\nUser-Agent: *\nDisallow: /\n\"\"\"", "return", "resp"...
Return a simple "don't index me" robots.txt file.
[ "Return", "a", "simple", "don", "t", "index", "me", "robots", ".", "txt", "file", "." ]
train
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/robots.py#L14-L24