repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
proteanhq/protean
src/protean/core/provider/__init__.py
Providers._initialize_providers
def _initialize_providers(self): """Read config file and initialize providers""" configured_providers = active_config.DATABASES provider_objects = {} if not isinstance(configured_providers, dict) or configured_providers == {}: raise ConfigurationError( "'DATABASES' config must be a dict and at least one " "provider must be defined") if 'default' not in configured_providers: raise ConfigurationError( "You must define a 'default' provider") for provider_name, conn_info in configured_providers.items(): provider_full_path = conn_info['PROVIDER'] provider_module, provider_class = provider_full_path.rsplit('.', maxsplit=1) provider_cls = getattr(importlib.import_module(provider_module), provider_class) provider_objects[provider_name] = provider_cls(conn_info) return provider_objects
python
def _initialize_providers(self): """Read config file and initialize providers""" configured_providers = active_config.DATABASES provider_objects = {} if not isinstance(configured_providers, dict) or configured_providers == {}: raise ConfigurationError( "'DATABASES' config must be a dict and at least one " "provider must be defined") if 'default' not in configured_providers: raise ConfigurationError( "You must define a 'default' provider") for provider_name, conn_info in configured_providers.items(): provider_full_path = conn_info['PROVIDER'] provider_module, provider_class = provider_full_path.rsplit('.', maxsplit=1) provider_cls = getattr(importlib.import_module(provider_module), provider_class) provider_objects[provider_name] = provider_cls(conn_info) return provider_objects
[ "def", "_initialize_providers", "(", "self", ")", ":", "configured_providers", "=", "active_config", ".", "DATABASES", "provider_objects", "=", "{", "}", "if", "not", "isinstance", "(", "configured_providers", ",", "dict", ")", "or", "configured_providers", "==", ...
Read config file and initialize providers
[ "Read", "config", "file", "and", "initialize", "providers" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/provider/__init__.py#L14-L35
proteanhq/protean
src/protean/core/provider/__init__.py
Providers.get_provider
def get_provider(self, provider_name='default'): """Fetch provider with the name specified in Configuration file""" try: if self._providers is None: self._providers = self._initialize_providers() return self._providers[provider_name] except KeyError: raise AssertionError(f'No Provider registered with name {provider_name}')
python
def get_provider(self, provider_name='default'): """Fetch provider with the name specified in Configuration file""" try: if self._providers is None: self._providers = self._initialize_providers() return self._providers[provider_name] except KeyError: raise AssertionError(f'No Provider registered with name {provider_name}')
[ "def", "get_provider", "(", "self", ",", "provider_name", "=", "'default'", ")", ":", "try", ":", "if", "self", ".", "_providers", "is", "None", ":", "self", ".", "_providers", "=", "self", ".", "_initialize_providers", "(", ")", "return", "self", ".", "...
Fetch provider with the name specified in Configuration file
[ "Fetch", "provider", "with", "the", "name", "specified", "in", "Configuration", "file" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/provider/__init__.py#L37-L44
proteanhq/protean
src/protean/core/provider/__init__.py
Providers.get_connection
def get_connection(self, provider_name='default'): """Fetch connection from Provider""" try: return self._providers[provider_name].get_connection() except KeyError: raise AssertionError(f'No Provider registered with name {provider_name}')
python
def get_connection(self, provider_name='default'): """Fetch connection from Provider""" try: return self._providers[provider_name].get_connection() except KeyError: raise AssertionError(f'No Provider registered with name {provider_name}')
[ "def", "get_connection", "(", "self", ",", "provider_name", "=", "'default'", ")", ":", "try", ":", "return", "self", ".", "_providers", "[", "provider_name", "]", ".", "get_connection", "(", ")", "except", "KeyError", ":", "raise", "AssertionError", "(", "f...
Fetch connection from Provider
[ "Fetch", "connection", "from", "Provider" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/provider/__init__.py#L46-L51
proteanhq/protean
src/protean/conf/__init__.py
Config.update_defaults
def update_defaults(self, ext_config): """ Update the default settings for an extension from an object""" for setting in dir(ext_config): if setting.isupper() and not hasattr(self, setting): setattr(self, setting, getattr(ext_config, setting))
python
def update_defaults(self, ext_config): """ Update the default settings for an extension from an object""" for setting in dir(ext_config): if setting.isupper() and not hasattr(self, setting): setattr(self, setting, getattr(ext_config, setting))
[ "def", "update_defaults", "(", "self", ",", "ext_config", ")", ":", "for", "setting", "in", "dir", "(", "ext_config", ")", ":", "if", "setting", ".", "isupper", "(", ")", "and", "not", "hasattr", "(", "self", ",", "setting", ")", ":", "setattr", "(", ...
Update the default settings for an extension from an object
[ "Update", "the", "default", "settings", "for", "an", "extension", "from", "an", "object" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/conf/__init__.py#L59-L63
deep-compute/deeputil
deeputil/keep_running.py
keeprunning
def keeprunning(wait_secs=0, exit_on_success=False, on_success=None, on_error=None, on_done=None): ''' Example 1: dosomething needs to run until completion condition without needing to have a loop in its code. Also, when error happens, we should NOT terminate execution >>> from deeputil import AttrDict >>> @keeprunning(wait_secs=1) ... def dosomething(state): ... state.i += 1 ... print (state) ... if state.i % 2 == 0: ... print("Error happened") ... 1 / 0 # create an error condition ... if state.i >= 7: ... print ("Done") ... raise keeprunning.terminate ... >>> state = AttrDict(i=0) >>> dosomething(state) AttrDict({'i': 1}) AttrDict({'i': 2}) Error happened AttrDict({'i': 3}) AttrDict({'i': 4}) Error happened AttrDict({'i': 5}) AttrDict({'i': 6}) Error happened AttrDict({'i': 7}) Done Example 2: In case you want to log exceptions while dosomething keeps running, or perform any other action when an exceptions arise >>> def some_error(__exc__): ... print (__exc__) ... >>> @keeprunning(on_error=some_error) ... def dosomething(state): ... state.i += 1 ... print (state) ... if state.i % 2 == 0: ... print("Error happened") ... 1 / 0 # create an error condition ... if state.i >= 7: ... print ("Done") ... raise keeprunning.terminate ... >>> state = AttrDict(i=0) >>> dosomething(state) AttrDict({'i': 1}) AttrDict({'i': 2}) Error happened division by zero AttrDict({'i': 3}) AttrDict({'i': 4}) Error happened division by zero AttrDict({'i': 5}) AttrDict({'i': 6}) Error happened division by zero AttrDict({'i': 7}) Done Example 3: Full set of arguments that can be passed in @keeprunning() with class implementations >>> # Class that has some class variables ... class Demo(object): ... SUCCESS_MSG = 'Yay!!' ... DONE_MSG = 'STOPPED AT NOTHING!' ... ERROR_MSG = 'Error' ... ... # Functions to be called by @keeprunning ... def success(self): ... print((self.SUCCESS_MSG)) ... ... def failure(self, __exc__): ... print((self.ERROR_MSG, __exc__)) ... ... def task_done(self): ... print((self.DONE_MSG)) ... ... #Actual use of keeprunning with all arguments passed ... @keeprunning(wait_secs=1, exit_on_success=False, ... on_success=success, on_error=failure, on_done=task_done) ... def dosomething(self, state): ... state.i += 1 ... print (state) ... if state.i % 2 == 0: ... print("Error happened") ... # create an error condition ... 1 / 0 ... if state.i >= 7: ... print ("Done") ... raise keeprunning.terminate ... >>> demo = Demo() >>> state = AttrDict(i=0) >>> demo.dosomething(state) AttrDict({'i': 1}) Yay!! AttrDict({'i': 2}) Error happened ('Error', ZeroDivisionError('division by zero')) AttrDict({'i': 3}) Yay!! AttrDict({'i': 4}) Error happened ('Error', ZeroDivisionError('division by zero')) AttrDict({'i': 5}) Yay!! AttrDict({'i': 6}) Error happened ('Error', ZeroDivisionError('division by zero')) AttrDict({'i': 7}) Done STOPPED AT NOTHING! ''' def decfn(fn): def _call_callback(cb, fargs): if not cb: return # get the getargspec fn in inspect module (python 2/3 support) G = getattr(inspect, 'getfullargspec', getattr(inspect, 'getargspec')) cb_args = G(cb).args cb_args = dict([(a, fargs.get(a, None)) for a in cb_args]) cb(**cb_args) def _fn(*args, **kwargs): fargs = inspect.getcallargs(fn, *args, **kwargs) fargs.update(dict(__fn__=fn, __exc__=None)) while 1: try: fn(*args, **kwargs) if exit_on_success: break except (SystemExit, KeyboardInterrupt): raise except KeepRunningTerminate: break except Exception as exc: fargs.update(dict(__exc__=exc)) _call_callback(on_error, fargs) fargs.update(dict(__exc__=None)) if wait_secs: time.sleep(wait_secs) continue _call_callback(on_success, fargs) _call_callback(on_done, fargs) return _fn return decfn
python
def keeprunning(wait_secs=0, exit_on_success=False, on_success=None, on_error=None, on_done=None): ''' Example 1: dosomething needs to run until completion condition without needing to have a loop in its code. Also, when error happens, we should NOT terminate execution >>> from deeputil import AttrDict >>> @keeprunning(wait_secs=1) ... def dosomething(state): ... state.i += 1 ... print (state) ... if state.i % 2 == 0: ... print("Error happened") ... 1 / 0 # create an error condition ... if state.i >= 7: ... print ("Done") ... raise keeprunning.terminate ... >>> state = AttrDict(i=0) >>> dosomething(state) AttrDict({'i': 1}) AttrDict({'i': 2}) Error happened AttrDict({'i': 3}) AttrDict({'i': 4}) Error happened AttrDict({'i': 5}) AttrDict({'i': 6}) Error happened AttrDict({'i': 7}) Done Example 2: In case you want to log exceptions while dosomething keeps running, or perform any other action when an exceptions arise >>> def some_error(__exc__): ... print (__exc__) ... >>> @keeprunning(on_error=some_error) ... def dosomething(state): ... state.i += 1 ... print (state) ... if state.i % 2 == 0: ... print("Error happened") ... 1 / 0 # create an error condition ... if state.i >= 7: ... print ("Done") ... raise keeprunning.terminate ... >>> state = AttrDict(i=0) >>> dosomething(state) AttrDict({'i': 1}) AttrDict({'i': 2}) Error happened division by zero AttrDict({'i': 3}) AttrDict({'i': 4}) Error happened division by zero AttrDict({'i': 5}) AttrDict({'i': 6}) Error happened division by zero AttrDict({'i': 7}) Done Example 3: Full set of arguments that can be passed in @keeprunning() with class implementations >>> # Class that has some class variables ... class Demo(object): ... SUCCESS_MSG = 'Yay!!' ... DONE_MSG = 'STOPPED AT NOTHING!' ... ERROR_MSG = 'Error' ... ... # Functions to be called by @keeprunning ... def success(self): ... print((self.SUCCESS_MSG)) ... ... def failure(self, __exc__): ... print((self.ERROR_MSG, __exc__)) ... ... def task_done(self): ... print((self.DONE_MSG)) ... ... #Actual use of keeprunning with all arguments passed ... @keeprunning(wait_secs=1, exit_on_success=False, ... on_success=success, on_error=failure, on_done=task_done) ... def dosomething(self, state): ... state.i += 1 ... print (state) ... if state.i % 2 == 0: ... print("Error happened") ... # create an error condition ... 1 / 0 ... if state.i >= 7: ... print ("Done") ... raise keeprunning.terminate ... >>> demo = Demo() >>> state = AttrDict(i=0) >>> demo.dosomething(state) AttrDict({'i': 1}) Yay!! AttrDict({'i': 2}) Error happened ('Error', ZeroDivisionError('division by zero')) AttrDict({'i': 3}) Yay!! AttrDict({'i': 4}) Error happened ('Error', ZeroDivisionError('division by zero')) AttrDict({'i': 5}) Yay!! AttrDict({'i': 6}) Error happened ('Error', ZeroDivisionError('division by zero')) AttrDict({'i': 7}) Done STOPPED AT NOTHING! ''' def decfn(fn): def _call_callback(cb, fargs): if not cb: return # get the getargspec fn in inspect module (python 2/3 support) G = getattr(inspect, 'getfullargspec', getattr(inspect, 'getargspec')) cb_args = G(cb).args cb_args = dict([(a, fargs.get(a, None)) for a in cb_args]) cb(**cb_args) def _fn(*args, **kwargs): fargs = inspect.getcallargs(fn, *args, **kwargs) fargs.update(dict(__fn__=fn, __exc__=None)) while 1: try: fn(*args, **kwargs) if exit_on_success: break except (SystemExit, KeyboardInterrupt): raise except KeepRunningTerminate: break except Exception as exc: fargs.update(dict(__exc__=exc)) _call_callback(on_error, fargs) fargs.update(dict(__exc__=None)) if wait_secs: time.sleep(wait_secs) continue _call_callback(on_success, fargs) _call_callback(on_done, fargs) return _fn return decfn
[ "def", "keeprunning", "(", "wait_secs", "=", "0", ",", "exit_on_success", "=", "False", ",", "on_success", "=", "None", ",", "on_error", "=", "None", ",", "on_done", "=", "None", ")", ":", "def", "decfn", "(", "fn", ")", ":", "def", "_call_callback", "...
Example 1: dosomething needs to run until completion condition without needing to have a loop in its code. Also, when error happens, we should NOT terminate execution >>> from deeputil import AttrDict >>> @keeprunning(wait_secs=1) ... def dosomething(state): ... state.i += 1 ... print (state) ... if state.i % 2 == 0: ... print("Error happened") ... 1 / 0 # create an error condition ... if state.i >= 7: ... print ("Done") ... raise keeprunning.terminate ... >>> state = AttrDict(i=0) >>> dosomething(state) AttrDict({'i': 1}) AttrDict({'i': 2}) Error happened AttrDict({'i': 3}) AttrDict({'i': 4}) Error happened AttrDict({'i': 5}) AttrDict({'i': 6}) Error happened AttrDict({'i': 7}) Done Example 2: In case you want to log exceptions while dosomething keeps running, or perform any other action when an exceptions arise >>> def some_error(__exc__): ... print (__exc__) ... >>> @keeprunning(on_error=some_error) ... def dosomething(state): ... state.i += 1 ... print (state) ... if state.i % 2 == 0: ... print("Error happened") ... 1 / 0 # create an error condition ... if state.i >= 7: ... print ("Done") ... raise keeprunning.terminate ... >>> state = AttrDict(i=0) >>> dosomething(state) AttrDict({'i': 1}) AttrDict({'i': 2}) Error happened division by zero AttrDict({'i': 3}) AttrDict({'i': 4}) Error happened division by zero AttrDict({'i': 5}) AttrDict({'i': 6}) Error happened division by zero AttrDict({'i': 7}) Done Example 3: Full set of arguments that can be passed in @keeprunning() with class implementations >>> # Class that has some class variables ... class Demo(object): ... SUCCESS_MSG = 'Yay!!' ... DONE_MSG = 'STOPPED AT NOTHING!' ... ERROR_MSG = 'Error' ... ... # Functions to be called by @keeprunning ... def success(self): ... print((self.SUCCESS_MSG)) ... ... def failure(self, __exc__): ... print((self.ERROR_MSG, __exc__)) ... ... def task_done(self): ... print((self.DONE_MSG)) ... ... #Actual use of keeprunning with all arguments passed ... @keeprunning(wait_secs=1, exit_on_success=False, ... on_success=success, on_error=failure, on_done=task_done) ... def dosomething(self, state): ... state.i += 1 ... print (state) ... if state.i % 2 == 0: ... print("Error happened") ... # create an error condition ... 1 / 0 ... if state.i >= 7: ... print ("Done") ... raise keeprunning.terminate ... >>> demo = Demo() >>> state = AttrDict(i=0) >>> demo.dosomething(state) AttrDict({'i': 1}) Yay!! AttrDict({'i': 2}) Error happened ('Error', ZeroDivisionError('division by zero')) AttrDict({'i': 3}) Yay!! AttrDict({'i': 4}) Error happened ('Error', ZeroDivisionError('division by zero')) AttrDict({'i': 5}) Yay!! AttrDict({'i': 6}) Error happened ('Error', ZeroDivisionError('division by zero')) AttrDict({'i': 7}) Done STOPPED AT NOTHING!
[ "Example", "1", ":", "dosomething", "needs", "to", "run", "until", "completion", "condition", "without", "needing", "to", "have", "a", "loop", "in", "its", "code", ".", "Also", "when", "error", "happens", "we", "should", "NOT", "terminate", "execution" ]
train
https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/keep_running.py#L10-L171
proteanhq/protean
src/protean/core/field/utils.py
fetch_entity_cls_from_registry
def fetch_entity_cls_from_registry(entity): """Util Method to fetch an Entity class from an entity's name""" # Defensive check to ensure we only process if `to_cls` is a string if isinstance(entity, str): try: return repo_factory.get_entity(entity) except AssertionError: # Entity has not been registered (yet) # FIXME print a helpful debug message raise else: return entity
python
def fetch_entity_cls_from_registry(entity): """Util Method to fetch an Entity class from an entity's name""" # Defensive check to ensure we only process if `to_cls` is a string if isinstance(entity, str): try: return repo_factory.get_entity(entity) except AssertionError: # Entity has not been registered (yet) # FIXME print a helpful debug message raise else: return entity
[ "def", "fetch_entity_cls_from_registry", "(", "entity", ")", ":", "# Defensive check to ensure we only process if `to_cls` is a string", "if", "isinstance", "(", "entity", ",", "str", ")", ":", "try", ":", "return", "repo_factory", ".", "get_entity", "(", "entity", ")",...
Util Method to fetch an Entity class from an entity's name
[ "Util", "Method", "to", "fetch", "an", "Entity", "class", "from", "an", "entity", "s", "name" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/field/utils.py#L5-L16
proteanhq/protean
src/protean/core/repository/factory.py
RepositoryFactory.register
def register(self, entity_cls, provider_name=None): """ Register the given model with the factory :param entity_cls: Entity class to be registered :param provider: Optional provider to associate with Entity class """ self._validate_entity_cls(entity_cls) # Register the entity if not registered already entity_name = fully_qualified_name(entity_cls) provider_name = provider_name or entity_cls.meta_.provider or 'default' try: entity = self._get_entity_by_class(entity_cls) if entity: # This probably is an accidental re-registration of the entity # and we should warn the user of a possible repository confusion raise ConfigurationError( f'Entity {entity_name} has already been registered') except AssertionError: # Entity has not been registered yet. Let's go ahead and add it to the registry. entity_record = RepositoryFactory.EntityRecord( name=entity_cls.__name__, qualname=entity_name, entity_cls=entity_cls, provider_name=provider_name, model_cls=None ) self._registry[entity_name] = entity_record logger.debug( f'Registered entity {entity_name} with provider {provider_name}')
python
def register(self, entity_cls, provider_name=None): """ Register the given model with the factory :param entity_cls: Entity class to be registered :param provider: Optional provider to associate with Entity class """ self._validate_entity_cls(entity_cls) # Register the entity if not registered already entity_name = fully_qualified_name(entity_cls) provider_name = provider_name or entity_cls.meta_.provider or 'default' try: entity = self._get_entity_by_class(entity_cls) if entity: # This probably is an accidental re-registration of the entity # and we should warn the user of a possible repository confusion raise ConfigurationError( f'Entity {entity_name} has already been registered') except AssertionError: # Entity has not been registered yet. Let's go ahead and add it to the registry. entity_record = RepositoryFactory.EntityRecord( name=entity_cls.__name__, qualname=entity_name, entity_cls=entity_cls, provider_name=provider_name, model_cls=None ) self._registry[entity_name] = entity_record logger.debug( f'Registered entity {entity_name} with provider {provider_name}')
[ "def", "register", "(", "self", ",", "entity_cls", ",", "provider_name", "=", "None", ")", ":", "self", ".", "_validate_entity_cls", "(", "entity_cls", ")", "# Register the entity if not registered already", "entity_name", "=", "fully_qualified_name", "(", "entity_cls",...
Register the given model with the factory :param entity_cls: Entity class to be registered :param provider: Optional provider to associate with Entity class
[ "Register", "the", "given", "model", "with", "the", "factory", ":", "param", "entity_cls", ":", "Entity", "class", "to", "be", "registered", ":", "param", "provider", ":", "Optional", "provider", "to", "associate", "with", "Entity", "class" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/repository/factory.py#L34-L64
proteanhq/protean
src/protean/core/repository/factory.py
RepositoryFactory._find_entity_in_records_by_class_name
def _find_entity_in_records_by_class_name(self, entity_name): """Fetch by Entity Name in values""" records = { key: value for (key, value) in self._registry.items() if value.name == entity_name } # If more than one record was found, we are dealing with the case of # an Entity name present in multiple places (packages or plugins). Throw an error # and ask for a fully qualified Entity name to be specified if len(records) > 1: raise ConfigurationError( f'Entity with name {entity_name} has been registered twice. ' f'Please use fully qualified Entity name to specify the exact Entity.') elif len(records) == 1: return next(iter(records.values())) else: raise AssertionError(f'No Entity registered with name {entity_name}')
python
def _find_entity_in_records_by_class_name(self, entity_name): """Fetch by Entity Name in values""" records = { key: value for (key, value) in self._registry.items() if value.name == entity_name } # If more than one record was found, we are dealing with the case of # an Entity name present in multiple places (packages or plugins). Throw an error # and ask for a fully qualified Entity name to be specified if len(records) > 1: raise ConfigurationError( f'Entity with name {entity_name} has been registered twice. ' f'Please use fully qualified Entity name to specify the exact Entity.') elif len(records) == 1: return next(iter(records.values())) else: raise AssertionError(f'No Entity registered with name {entity_name}')
[ "def", "_find_entity_in_records_by_class_name", "(", "self", ",", "entity_name", ")", ":", "records", "=", "{", "key", ":", "value", "for", "(", "key", ",", "value", ")", "in", "self", ".", "_registry", ".", "items", "(", ")", "if", "value", ".", "name",...
Fetch by Entity Name in values
[ "Fetch", "by", "Entity", "Name", "in", "values" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/repository/factory.py#L66-L83
proteanhq/protean
src/protean/core/repository/factory.py
RepositoryFactory._get_entity_by_class
def _get_entity_by_class(self, entity_cls): """Fetch Entity record with Entity class details""" entity_qualname = fully_qualified_name(entity_cls) if entity_qualname in self._registry: return self._registry[entity_qualname] else: return self._find_entity_in_records_by_class_name(entity_cls.__name__)
python
def _get_entity_by_class(self, entity_cls): """Fetch Entity record with Entity class details""" entity_qualname = fully_qualified_name(entity_cls) if entity_qualname in self._registry: return self._registry[entity_qualname] else: return self._find_entity_in_records_by_class_name(entity_cls.__name__)
[ "def", "_get_entity_by_class", "(", "self", ",", "entity_cls", ")", ":", "entity_qualname", "=", "fully_qualified_name", "(", "entity_cls", ")", "if", "entity_qualname", "in", "self", ".", "_registry", ":", "return", "self", ".", "_registry", "[", "entity_qualname...
Fetch Entity record with Entity class details
[ "Fetch", "Entity", "record", "with", "Entity", "class", "details" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/repository/factory.py#L85-L91
proteanhq/protean
src/protean/core/repository/factory.py
RepositoryFactory._get_entity_by_name
def _get_entity_by_name(self, entity_name): """Fetch Entity record with an Entity name""" if entity_name in self._registry: return self._registry[entity_name] else: return self._find_entity_in_records_by_class_name(entity_name)
python
def _get_entity_by_name(self, entity_name): """Fetch Entity record with an Entity name""" if entity_name in self._registry: return self._registry[entity_name] else: return self._find_entity_in_records_by_class_name(entity_name)
[ "def", "_get_entity_by_name", "(", "self", ",", "entity_name", ")", ":", "if", "entity_name", "in", "self", ".", "_registry", ":", "return", "self", ".", "_registry", "[", "entity_name", "]", "else", ":", "return", "self", ".", "_find_entity_in_records_by_class_...
Fetch Entity record with an Entity name
[ "Fetch", "Entity", "record", "with", "an", "Entity", "name" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/repository/factory.py#L93-L98
proteanhq/protean
src/protean/core/repository/factory.py
RepositoryFactory._validate_entity_cls
def _validate_entity_cls(self, entity_cls): """Validate that Entity is a valid class""" # Import here to avoid cyclic dependency from protean.core.entity import Entity if not issubclass(entity_cls, Entity): raise AssertionError( f'Entity {entity_cls.__name__} must be subclass of `Entity`') if entity_cls.meta_.abstract is True: raise NotSupportedError( f'{entity_cls.__name__} class has been marked abstract' f' and cannot be instantiated')
python
def _validate_entity_cls(self, entity_cls): """Validate that Entity is a valid class""" # Import here to avoid cyclic dependency from protean.core.entity import Entity if not issubclass(entity_cls, Entity): raise AssertionError( f'Entity {entity_cls.__name__} must be subclass of `Entity`') if entity_cls.meta_.abstract is True: raise NotSupportedError( f'{entity_cls.__name__} class has been marked abstract' f' and cannot be instantiated')
[ "def", "_validate_entity_cls", "(", "self", ",", "entity_cls", ")", ":", "# Import here to avoid cyclic dependency", "from", "protean", ".", "core", ".", "entity", "import", "Entity", "if", "not", "issubclass", "(", "entity_cls", ",", "Entity", ")", ":", "raise", ...
Validate that Entity is a valid class
[ "Validate", "that", "Entity", "is", "a", "valid", "class" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/repository/factory.py#L100-L112
proteanhq/protean
src/protean/core/repository/factory.py
RepositoryFactory.get_model
def get_model(self, entity_cls): """Retrieve Model class connected to Entity""" entity_record = self._get_entity_by_class(entity_cls) model_cls = None if entity_record.model_cls: model_cls = entity_record.model_cls else: # We should ask the Provider to give a fully baked model the first time # that has been initialized properly for this entity provider = self.get_provider(entity_record.provider_name) baked_model_cls = provider.get_model(entity_record.entity_cls) # Record for future reference new_entity_record = entity_record._replace(model_cls=baked_model_cls) self._registry[entity_record.qualname] = new_entity_record model_cls = baked_model_cls return model_cls
python
def get_model(self, entity_cls): """Retrieve Model class connected to Entity""" entity_record = self._get_entity_by_class(entity_cls) model_cls = None if entity_record.model_cls: model_cls = entity_record.model_cls else: # We should ask the Provider to give a fully baked model the first time # that has been initialized properly for this entity provider = self.get_provider(entity_record.provider_name) baked_model_cls = provider.get_model(entity_record.entity_cls) # Record for future reference new_entity_record = entity_record._replace(model_cls=baked_model_cls) self._registry[entity_record.qualname] = new_entity_record model_cls = baked_model_cls return model_cls
[ "def", "get_model", "(", "self", ",", "entity_cls", ")", ":", "entity_record", "=", "self", ".", "_get_entity_by_class", "(", "entity_cls", ")", "model_cls", "=", "None", "if", "entity_record", ".", "model_cls", ":", "model_cls", "=", "entity_record", ".", "mo...
Retrieve Model class connected to Entity
[ "Retrieve", "Model", "class", "connected", "to", "Entity" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/repository/factory.py#L114-L133
proteanhq/protean
src/protean/core/repository/factory.py
RepositoryFactory.get_repository
def get_repository(self, entity_cls): """Retrieve a Repository for the Model with a live connection""" entity_record = self._get_entity_by_class(entity_cls) provider = self.get_provider(entity_record.provider_name) return provider.get_repository(entity_record.entity_cls)
python
def get_repository(self, entity_cls): """Retrieve a Repository for the Model with a live connection""" entity_record = self._get_entity_by_class(entity_cls) provider = self.get_provider(entity_record.provider_name) return provider.get_repository(entity_record.entity_cls)
[ "def", "get_repository", "(", "self", ",", "entity_cls", ")", ":", "entity_record", "=", "self", ".", "_get_entity_by_class", "(", "entity_cls", ")", "provider", "=", "self", ".", "get_provider", "(", "entity_record", ".", "provider_name", ")", "return", "provid...
Retrieve a Repository for the Model with a live connection
[ "Retrieve", "a", "Repository", "for", "the", "Model", "with", "a", "live", "connection" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/repository/factory.py#L143-L148
Danielhiversen/pymill
mill/__init__.py
set_heater_values
async def set_heater_values(heater_data, heater): """Set heater values from heater data""" heater.current_temp = heater_data.get('currentTemp') heater.device_status = heater_data.get('deviceStatus') heater.available = heater.device_status == 0 heater.name = heater_data.get('deviceName') heater.fan_status = heater_data.get('fanStatus') heater.is_holiday = heater_data.get('isHoliday') # Room assigned devices don't report canChangeTemp # in selectDevice response. if heater.room is None: heater.can_change_temp = heater_data.get('canChangeTemp') # Independent devices report their target temperature via # holidayTemp value. But isHoliday is still set to 0. # Room assigned devices may have set "Control Device individually" # which effectively set their isHoliday value to 1. # In this mode they behave similar to independent devices # reporting their target temperature also via holidayTemp. if heater.independent_device or heater.is_holiday == 1: heater.set_temp = heater_data.get('holidayTemp') elif heater.room is not None: if heater.room.current_mode == 1: heater.set_temp = heater.room.comfort_temp elif heater.room.current_mode == 2: heater.set_temp = heater.room.sleep_temp elif heater.room.current_mode == 3: heater.set_temp = heater.room.away_temp heater.power_status = heater_data.get('powerStatus') heater.tibber_control = heater_data.get('tibberControl') heater.open_window = heater_data.get('open_window', heater_data.get('open') ) heater.is_heating = heater_data.get('heatStatus', heater_data.get('heaterFlag') ) try: heater.sub_domain = int(float(heater_data.get('subDomain', heater_data.get('subDomainId', heater.sub_domain) ))) except ValueError: pass
python
async def set_heater_values(heater_data, heater): """Set heater values from heater data""" heater.current_temp = heater_data.get('currentTemp') heater.device_status = heater_data.get('deviceStatus') heater.available = heater.device_status == 0 heater.name = heater_data.get('deviceName') heater.fan_status = heater_data.get('fanStatus') heater.is_holiday = heater_data.get('isHoliday') # Room assigned devices don't report canChangeTemp # in selectDevice response. if heater.room is None: heater.can_change_temp = heater_data.get('canChangeTemp') # Independent devices report their target temperature via # holidayTemp value. But isHoliday is still set to 0. # Room assigned devices may have set "Control Device individually" # which effectively set their isHoliday value to 1. # In this mode they behave similar to independent devices # reporting their target temperature also via holidayTemp. if heater.independent_device or heater.is_holiday == 1: heater.set_temp = heater_data.get('holidayTemp') elif heater.room is not None: if heater.room.current_mode == 1: heater.set_temp = heater.room.comfort_temp elif heater.room.current_mode == 2: heater.set_temp = heater.room.sleep_temp elif heater.room.current_mode == 3: heater.set_temp = heater.room.away_temp heater.power_status = heater_data.get('powerStatus') heater.tibber_control = heater_data.get('tibberControl') heater.open_window = heater_data.get('open_window', heater_data.get('open') ) heater.is_heating = heater_data.get('heatStatus', heater_data.get('heaterFlag') ) try: heater.sub_domain = int(float(heater_data.get('subDomain', heater_data.get('subDomainId', heater.sub_domain) ))) except ValueError: pass
[ "async", "def", "set_heater_values", "(", "heater_data", ",", "heater", ")", ":", "heater", ".", "current_temp", "=", "heater_data", ".", "get", "(", "'currentTemp'", ")", "heater", ".", "device_status", "=", "heater_data", ".", "get", "(", "'deviceStatus'", "...
Set heater values from heater data
[ "Set", "heater", "values", "from", "heater", "data" ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L445-L488
Danielhiversen/pymill
mill/__init__.py
Mill.connect
async def connect(self, retry=2): """Connect to Mill.""" # pylint: disable=too-many-return-statements url = API_ENDPOINT_1 + 'login' headers = { "Content-Type": "application/x-zc-object", "Connection": "Keep-Alive", "X-Zc-Major-Domain": "seanywell", "X-Zc-Msg-Name": "millService", "X-Zc-Sub-Domain": "milltype", "X-Zc-Seq-Id": "1", "X-Zc-Version": "1", } payload = {"account": self._username, "password": self._password} try: with async_timeout.timeout(self._timeout): resp = await self.websession.post(url, data=json.dumps(payload), headers=headers) except (asyncio.TimeoutError, aiohttp.ClientError): if retry < 1: _LOGGER.error("Error connecting to Mill", exc_info=True) return False return await self.connect(retry - 1) result = await resp.text() if '"errorCode":3504' in result: _LOGGER.error('Wrong password') return False if '"errorCode":3501' in result: _LOGGER.error('Account does not exist') return False data = json.loads(result) token = data.get('token') if token is None: _LOGGER.error('No token') return False user_id = data.get('userId') if user_id is None: _LOGGER.error('No user id') return False self._token = token self._user_id = user_id return True
python
async def connect(self, retry=2): """Connect to Mill.""" # pylint: disable=too-many-return-statements url = API_ENDPOINT_1 + 'login' headers = { "Content-Type": "application/x-zc-object", "Connection": "Keep-Alive", "X-Zc-Major-Domain": "seanywell", "X-Zc-Msg-Name": "millService", "X-Zc-Sub-Domain": "milltype", "X-Zc-Seq-Id": "1", "X-Zc-Version": "1", } payload = {"account": self._username, "password": self._password} try: with async_timeout.timeout(self._timeout): resp = await self.websession.post(url, data=json.dumps(payload), headers=headers) except (asyncio.TimeoutError, aiohttp.ClientError): if retry < 1: _LOGGER.error("Error connecting to Mill", exc_info=True) return False return await self.connect(retry - 1) result = await resp.text() if '"errorCode":3504' in result: _LOGGER.error('Wrong password') return False if '"errorCode":3501' in result: _LOGGER.error('Account does not exist') return False data = json.loads(result) token = data.get('token') if token is None: _LOGGER.error('No token') return False user_id = data.get('userId') if user_id is None: _LOGGER.error('No user id') return False self._token = token self._user_id = user_id return True
[ "async", "def", "connect", "(", "self", ",", "retry", "=", "2", ")", ":", "# pylint: disable=too-many-return-statements", "url", "=", "API_ENDPOINT_1", "+", "'login'", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/x-zc-object\"", ",", "\"Connection\"", ...
Connect to Mill.
[ "Connect", "to", "Mill", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L52-L100
Danielhiversen/pymill
mill/__init__.py
Mill.sync_connect
def sync_connect(self): """Close the Mill connection.""" loop = asyncio.get_event_loop() task = loop.create_task(self.connect()) loop.run_until_complete(task)
python
def sync_connect(self): """Close the Mill connection.""" loop = asyncio.get_event_loop() task = loop.create_task(self.connect()) loop.run_until_complete(task)
[ "def", "sync_connect", "(", "self", ")", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "task", "=", "loop", ".", "create_task", "(", "self", ".", "connect", "(", ")", ")", "loop", ".", "run_until_complete", "(", "task", ")" ]
Close the Mill connection.
[ "Close", "the", "Mill", "connection", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L102-L106
Danielhiversen/pymill
mill/__init__.py
Mill.sync_close_connection
def sync_close_connection(self): """Close the Mill connection.""" loop = asyncio.get_event_loop() task = loop.create_task(self.close_connection()) loop.run_until_complete(task)
python
def sync_close_connection(self): """Close the Mill connection.""" loop = asyncio.get_event_loop() task = loop.create_task(self.close_connection()) loop.run_until_complete(task)
[ "def", "sync_close_connection", "(", "self", ")", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "task", "=", "loop", ".", "create_task", "(", "self", ".", "close_connection", "(", ")", ")", "loop", ".", "run_until_complete", "(", "task", ")...
Close the Mill connection.
[ "Close", "the", "Mill", "connection", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L112-L116
Danielhiversen/pymill
mill/__init__.py
Mill.request
async def request(self, command, payload, retry=3): """Request data.""" # pylint: disable=too-many-return-statements if self._token is None: _LOGGER.error("No token") return None _LOGGER.debug(command, payload) nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16)) url = API_ENDPOINT_2 + command timestamp = int(time.time()) signature = hashlib.sha1(str(REQUEST_TIMEOUT + str(timestamp) + nonce + self._token).encode("utf-8")).hexdigest() headers = { "Content-Type": "application/x-zc-object", "Connection": "Keep-Alive", "X-Zc-Major-Domain": "seanywell", "X-Zc-Msg-Name": "millService", "X-Zc-Sub-Domain": "milltype", "X-Zc-Seq-Id": "1", "X-Zc-Version": "1", "X-Zc-Timestamp": str(timestamp), "X-Zc-Timeout": REQUEST_TIMEOUT, "X-Zc-Nonce": nonce, "X-Zc-User-Id": str(self._user_id), "X-Zc-User-Signature": signature, "X-Zc-Content-Length": str(len(payload)), } try: with async_timeout.timeout(self._timeout): resp = await self.websession.post(url, data=json.dumps(payload), headers=headers) except asyncio.TimeoutError: if retry < 1: _LOGGER.error("Timed out sending command to Mill: %s", command) return None return await self.request(command, payload, retry - 1) except aiohttp.ClientError: _LOGGER.error("Error sending command to Mill: %s", command, exc_info=True) return None result = await resp.text() _LOGGER.debug(result) if not result or result == '{"errorCode":0}': return None if 'access token expire' in result or 'invalid signature' in result: if retry < 1: return None if not await self.connect(): return None return await self.request(command, payload, retry - 1) if '"error":"device offline"' in result: if retry < 1: _LOGGER.error("Failed to send request, %s", result) return None _LOGGER.debug("Failed to send request, %s. Retrying...", result) await asyncio.sleep(3) return await self.request(command, payload, retry - 1) if 'errorCode' in result: _LOGGER.error("Failed to send request, %s", result) return None data = json.loads(result) return data
python
async def request(self, command, payload, retry=3): """Request data.""" # pylint: disable=too-many-return-statements if self._token is None: _LOGGER.error("No token") return None _LOGGER.debug(command, payload) nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16)) url = API_ENDPOINT_2 + command timestamp = int(time.time()) signature = hashlib.sha1(str(REQUEST_TIMEOUT + str(timestamp) + nonce + self._token).encode("utf-8")).hexdigest() headers = { "Content-Type": "application/x-zc-object", "Connection": "Keep-Alive", "X-Zc-Major-Domain": "seanywell", "X-Zc-Msg-Name": "millService", "X-Zc-Sub-Domain": "milltype", "X-Zc-Seq-Id": "1", "X-Zc-Version": "1", "X-Zc-Timestamp": str(timestamp), "X-Zc-Timeout": REQUEST_TIMEOUT, "X-Zc-Nonce": nonce, "X-Zc-User-Id": str(self._user_id), "X-Zc-User-Signature": signature, "X-Zc-Content-Length": str(len(payload)), } try: with async_timeout.timeout(self._timeout): resp = await self.websession.post(url, data=json.dumps(payload), headers=headers) except asyncio.TimeoutError: if retry < 1: _LOGGER.error("Timed out sending command to Mill: %s", command) return None return await self.request(command, payload, retry - 1) except aiohttp.ClientError: _LOGGER.error("Error sending command to Mill: %s", command, exc_info=True) return None result = await resp.text() _LOGGER.debug(result) if not result or result == '{"errorCode":0}': return None if 'access token expire' in result or 'invalid signature' in result: if retry < 1: return None if not await self.connect(): return None return await self.request(command, payload, retry - 1) if '"error":"device offline"' in result: if retry < 1: _LOGGER.error("Failed to send request, %s", result) return None _LOGGER.debug("Failed to send request, %s. Retrying...", result) await asyncio.sleep(3) return await self.request(command, payload, retry - 1) if 'errorCode' in result: _LOGGER.error("Failed to send request, %s", result) return None data = json.loads(result) return data
[ "async", "def", "request", "(", "self", ",", "command", ",", "payload", ",", "retry", "=", "3", ")", ":", "# pylint: disable=too-many-return-statements", "if", "self", ".", "_token", "is", "None", ":", "_LOGGER", ".", "error", "(", "\"No token\"", ")", "retu...
Request data.
[ "Request", "data", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L118-L191
Danielhiversen/pymill
mill/__init__.py
Mill.sync_request
def sync_request(self, command, payload, retry=2): """Request data.""" loop = asyncio.get_event_loop() task = loop.create_task(self.request(command, payload, retry)) return loop.run_until_complete(task)
python
def sync_request(self, command, payload, retry=2): """Request data.""" loop = asyncio.get_event_loop() task = loop.create_task(self.request(command, payload, retry)) return loop.run_until_complete(task)
[ "def", "sync_request", "(", "self", ",", "command", ",", "payload", ",", "retry", "=", "2", ")", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "task", "=", "loop", ".", "create_task", "(", "self", ".", "request", "(", "command", ",", ...
Request data.
[ "Request", "data", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L193-L197
Danielhiversen/pymill
mill/__init__.py
Mill.update_rooms
async def update_rooms(self): """Request data.""" homes = await self.get_home_list() for home in homes: payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"} data = await self.request("selectRoombyHome", payload) rooms = data.get('roomInfo', []) for _room in rooms: _id = _room.get('roomId') room = self.rooms.get(_id, Room()) room.room_id = _id room.comfort_temp = _room.get("comfortTemp") room.away_temp = _room.get("awayTemp") room.sleep_temp = _room.get("sleepTemp") room.name = _room.get("roomName") room.current_mode = _room.get("currentMode") room.heat_status = _room.get("heatStatus") room.home_name = data.get("homeName") room.avg_temp = _room.get("avgTemp") self.rooms[_id] = room payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"} room_device = await self.request("selectDevicebyRoom", payload) if room_device is None: continue heater_info = room_device.get('deviceInfo', []) for _heater in heater_info: _id = _heater.get('deviceId') heater = self.heaters.get(_id, Heater()) heater.device_id = _id heater.independent_device = False heater.can_change_temp = _heater.get('canChangeTemp') heater.name = _heater.get('deviceName') heater.room = room self.heaters[_id] = heater
python
async def update_rooms(self): """Request data.""" homes = await self.get_home_list() for home in homes: payload = {"homeId": home.get("homeId"), "timeZoneNum": "+01:00"} data = await self.request("selectRoombyHome", payload) rooms = data.get('roomInfo', []) for _room in rooms: _id = _room.get('roomId') room = self.rooms.get(_id, Room()) room.room_id = _id room.comfort_temp = _room.get("comfortTemp") room.away_temp = _room.get("awayTemp") room.sleep_temp = _room.get("sleepTemp") room.name = _room.get("roomName") room.current_mode = _room.get("currentMode") room.heat_status = _room.get("heatStatus") room.home_name = data.get("homeName") room.avg_temp = _room.get("avgTemp") self.rooms[_id] = room payload = {"roomId": _room.get("roomId"), "timeZoneNum": "+01:00"} room_device = await self.request("selectDevicebyRoom", payload) if room_device is None: continue heater_info = room_device.get('deviceInfo', []) for _heater in heater_info: _id = _heater.get('deviceId') heater = self.heaters.get(_id, Heater()) heater.device_id = _id heater.independent_device = False heater.can_change_temp = _heater.get('canChangeTemp') heater.name = _heater.get('deviceName') heater.room = room self.heaters[_id] = heater
[ "async", "def", "update_rooms", "(", "self", ")", ":", "homes", "=", "await", "self", ".", "get_home_list", "(", ")", "for", "home", "in", "homes", ":", "payload", "=", "{", "\"homeId\"", ":", "home", ".", "get", "(", "\"homeId\"", ")", ",", "\"timeZon...
Request data.
[ "Request", "data", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L206-L241
Danielhiversen/pymill
mill/__init__.py
Mill.sync_update_rooms
def sync_update_rooms(self): """Request data.""" loop = asyncio.get_event_loop() task = loop.create_task(self.update_rooms()) return loop.run_until_complete(task)
python
def sync_update_rooms(self): """Request data.""" loop = asyncio.get_event_loop() task = loop.create_task(self.update_rooms()) return loop.run_until_complete(task)
[ "def", "sync_update_rooms", "(", "self", ")", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "task", "=", "loop", ".", "create_task", "(", "self", ".", "update_rooms", "(", ")", ")", "return", "loop", ".", "run_until_complete", "(", "task", ...
Request data.
[ "Request", "data", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L243-L247
Danielhiversen/pymill
mill/__init__.py
Mill.set_room_temperatures_by_name
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None, comfort_temp=None, away_temp=None): """Set room temps by name.""" if sleep_temp is None and comfort_temp is None and away_temp is None: return for room_id, _room in self.rooms.items(): if _room.name == room_name: await self.set_room_temperatures(room_id, sleep_temp, comfort_temp, away_temp) return _LOGGER.error("Could not find a room with name %s", room_name)
python
async def set_room_temperatures_by_name(self, room_name, sleep_temp=None, comfort_temp=None, away_temp=None): """Set room temps by name.""" if sleep_temp is None and comfort_temp is None and away_temp is None: return for room_id, _room in self.rooms.items(): if _room.name == room_name: await self.set_room_temperatures(room_id, sleep_temp, comfort_temp, away_temp) return _LOGGER.error("Could not find a room with name %s", room_name)
[ "async", "def", "set_room_temperatures_by_name", "(", "self", ",", "room_name", ",", "sleep_temp", "=", "None", ",", "comfort_temp", "=", "None", ",", "away_temp", "=", "None", ")", ":", "if", "sleep_temp", "is", "None", "and", "comfort_temp", "is", "None", ...
Set room temps by name.
[ "Set", "room", "temps", "by", "name", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L249-L259
Danielhiversen/pymill
mill/__init__.py
Mill.set_room_temperatures
async def set_room_temperatures(self, room_id, sleep_temp=None, comfort_temp=None, away_temp=None): """Set room temps.""" if sleep_temp is None and comfort_temp is None and away_temp is None: return room = self.rooms.get(room_id) if room is None: _LOGGER.error("No such device") return room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp room.away_temp = away_temp if away_temp else room.away_temp room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp payload = {"roomId": room_id, "sleepTemp": room.sleep_temp, "comfortTemp": room.comfort_temp, "awayTemp": room.away_temp, "homeType": 0} await self.request("changeRoomModeTempInfo", payload) self.rooms[room_id] = room
python
async def set_room_temperatures(self, room_id, sleep_temp=None, comfort_temp=None, away_temp=None): """Set room temps.""" if sleep_temp is None and comfort_temp is None and away_temp is None: return room = self.rooms.get(room_id) if room is None: _LOGGER.error("No such device") return room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp room.away_temp = away_temp if away_temp else room.away_temp room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp payload = {"roomId": room_id, "sleepTemp": room.sleep_temp, "comfortTemp": room.comfort_temp, "awayTemp": room.away_temp, "homeType": 0} await self.request("changeRoomModeTempInfo", payload) self.rooms[room_id] = room
[ "async", "def", "set_room_temperatures", "(", "self", ",", "room_id", ",", "sleep_temp", "=", "None", ",", "comfort_temp", "=", "None", ",", "away_temp", "=", "None", ")", ":", "if", "sleep_temp", "is", "None", "and", "comfort_temp", "is", "None", "and", "...
Set room temps.
[ "Set", "room", "temps", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L261-L279
Danielhiversen/pymill
mill/__init__.py
Mill.update_heaters
async def update_heaters(self): """Request data.""" homes = await self.get_home_list() for home in homes: payload = {"homeId": home.get("homeId")} data = await self.request("getIndependentDevices", payload) if data is None: continue heater_data = data.get('deviceInfo', []) if not heater_data: continue for _heater in heater_data: _id = _heater.get('deviceId') heater = self.heaters.get(_id, Heater()) heater.device_id = _id await set_heater_values(_heater, heater) self.heaters[_id] = heater for _id, heater in self.heaters.items(): if heater.independent_device: continue payload = {"deviceId": _id} _heater = await self.request("selectDevice", payload) if _heater is None: self.heaters[_id].available = False continue await set_heater_values(_heater, heater) self.heaters[_id] = heater
python
async def update_heaters(self): """Request data.""" homes = await self.get_home_list() for home in homes: payload = {"homeId": home.get("homeId")} data = await self.request("getIndependentDevices", payload) if data is None: continue heater_data = data.get('deviceInfo', []) if not heater_data: continue for _heater in heater_data: _id = _heater.get('deviceId') heater = self.heaters.get(_id, Heater()) heater.device_id = _id await set_heater_values(_heater, heater) self.heaters[_id] = heater for _id, heater in self.heaters.items(): if heater.independent_device: continue payload = {"deviceId": _id} _heater = await self.request("selectDevice", payload) if _heater is None: self.heaters[_id].available = False continue await set_heater_values(_heater, heater) self.heaters[_id] = heater
[ "async", "def", "update_heaters", "(", "self", ")", ":", "homes", "=", "await", "self", ".", "get_home_list", "(", ")", "for", "home", "in", "homes", ":", "payload", "=", "{", "\"homeId\"", ":", "home", ".", "get", "(", "\"homeId\"", ")", "}", "data", ...
Request data.
[ "Request", "data", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L281-L308
Danielhiversen/pymill
mill/__init__.py
Mill.sync_update_heaters
def sync_update_heaters(self): """Request data.""" loop = asyncio.get_event_loop() task = loop.create_task(self.update_heaters()) loop.run_until_complete(task)
python
def sync_update_heaters(self): """Request data.""" loop = asyncio.get_event_loop() task = loop.create_task(self.update_heaters()) loop.run_until_complete(task)
[ "def", "sync_update_heaters", "(", "self", ")", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "task", "=", "loop", ".", "create_task", "(", "self", ".", "update_heaters", "(", ")", ")", "loop", ".", "run_until_complete", "(", "task", ")" ]
Request data.
[ "Request", "data", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L310-L314
Danielhiversen/pymill
mill/__init__.py
Mill.throttle_update_heaters
async def throttle_update_heaters(self): """Throttle update device.""" if (self._throttle_time is not None and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES): return self._throttle_time = dt.datetime.now() await self.update_heaters()
python
async def throttle_update_heaters(self): """Throttle update device.""" if (self._throttle_time is not None and dt.datetime.now() - self._throttle_time < MIN_TIME_BETWEEN_UPDATES): return self._throttle_time = dt.datetime.now() await self.update_heaters()
[ "async", "def", "throttle_update_heaters", "(", "self", ")", ":", "if", "(", "self", ".", "_throttle_time", "is", "not", "None", "and", "dt", ".", "datetime", ".", "now", "(", ")", "-", "self", ".", "_throttle_time", "<", "MIN_TIME_BETWEEN_UPDATES", ")", "...
Throttle update device.
[ "Throttle", "update", "device", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L316-L322
Danielhiversen/pymill
mill/__init__.py
Mill.throttle_update_all_heaters
async def throttle_update_all_heaters(self): """Throttle update all devices and rooms.""" if (self._throttle_all_time is not None and dt.datetime.now() - self._throttle_all_time < MIN_TIME_BETWEEN_UPDATES): return self._throttle_all_time = dt.datetime.now() await self.find_all_heaters()
python
async def throttle_update_all_heaters(self): """Throttle update all devices and rooms.""" if (self._throttle_all_time is not None and dt.datetime.now() - self._throttle_all_time < MIN_TIME_BETWEEN_UPDATES): return self._throttle_all_time = dt.datetime.now() await self.find_all_heaters()
[ "async", "def", "throttle_update_all_heaters", "(", "self", ")", ":", "if", "(", "self", ".", "_throttle_all_time", "is", "not", "None", "and", "dt", ".", "datetime", ".", "now", "(", ")", "-", "self", ".", "_throttle_all_time", "<", "MIN_TIME_BETWEEN_UPDATES"...
Throttle update all devices and rooms.
[ "Throttle", "update", "all", "devices", "and", "rooms", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L324-L331
Danielhiversen/pymill
mill/__init__.py
Mill.heater_control
async def heater_control(self, device_id, fan_status=None, power_status=None): """Set heater temps.""" heater = self.heaters.get(device_id) if heater is None: _LOGGER.error("No such device") return if fan_status is None: fan_status = heater.fan_status if power_status is None: power_status = heater.power_status operation = 0 if fan_status == heater.fan_status else 4 payload = {"subDomain": heater.sub_domain, "deviceId": device_id, "testStatus": 1, "operation": operation, "status": power_status, "windStatus": fan_status, "holdTemp": heater.set_temp, "tempType": 0, "powerLevel": 0} await self.request("deviceControl", payload)
python
async def heater_control(self, device_id, fan_status=None, power_status=None): """Set heater temps.""" heater = self.heaters.get(device_id) if heater is None: _LOGGER.error("No such device") return if fan_status is None: fan_status = heater.fan_status if power_status is None: power_status = heater.power_status operation = 0 if fan_status == heater.fan_status else 4 payload = {"subDomain": heater.sub_domain, "deviceId": device_id, "testStatus": 1, "operation": operation, "status": power_status, "windStatus": fan_status, "holdTemp": heater.set_temp, "tempType": 0, "powerLevel": 0} await self.request("deviceControl", payload)
[ "async", "def", "heater_control", "(", "self", ",", "device_id", ",", "fan_status", "=", "None", ",", "power_status", "=", "None", ")", ":", "heater", "=", "self", ".", "heaters", ".", "get", "(", "device_id", ")", "if", "heater", "is", "None", ":", "_...
Set heater temps.
[ "Set", "heater", "temps", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L343-L364
Danielhiversen/pymill
mill/__init__.py
Mill.sync_heater_control
def sync_heater_control(self, device_id, fan_status=None, power_status=None): """Set heater temps.""" loop = asyncio.get_event_loop() task = loop.create_task(self.heater_control(device_id, fan_status, power_status)) loop.run_until_complete(task)
python
def sync_heater_control(self, device_id, fan_status=None, power_status=None): """Set heater temps.""" loop = asyncio.get_event_loop() task = loop.create_task(self.heater_control(device_id, fan_status, power_status)) loop.run_until_complete(task)
[ "def", "sync_heater_control", "(", "self", ",", "device_id", ",", "fan_status", "=", "None", ",", "power_status", "=", "None", ")", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "task", "=", "loop", ".", "create_task", "(", "self", ".", "...
Set heater temps.
[ "Set", "heater", "temps", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L366-L373
Danielhiversen/pymill
mill/__init__.py
Mill.set_heater_temp
async def set_heater_temp(self, device_id, set_temp): """Set heater temp.""" payload = {"homeType": 0, "timeZoneNum": "+02:00", "deviceId": device_id, "value": int(set_temp), "key": "holidayTemp"} await self.request("changeDeviceInfo", payload)
python
async def set_heater_temp(self, device_id, set_temp): """Set heater temp.""" payload = {"homeType": 0, "timeZoneNum": "+02:00", "deviceId": device_id, "value": int(set_temp), "key": "holidayTemp"} await self.request("changeDeviceInfo", payload)
[ "async", "def", "set_heater_temp", "(", "self", ",", "device_id", ",", "set_temp", ")", ":", "payload", "=", "{", "\"homeType\"", ":", "0", ",", "\"timeZoneNum\"", ":", "\"+02:00\"", ",", "\"deviceId\"", ":", "device_id", ",", "\"value\"", ":", "int", "(", ...
Set heater temp.
[ "Set", "heater", "temp", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L375-L382
Danielhiversen/pymill
mill/__init__.py
Mill.sync_set_heater_temp
def sync_set_heater_temp(self, device_id, set_temp): """Set heater temps.""" loop = asyncio.get_event_loop() task = loop.create_task(self.set_heater_temp(device_id, set_temp)) loop.run_until_complete(task)
python
def sync_set_heater_temp(self, device_id, set_temp): """Set heater temps.""" loop = asyncio.get_event_loop() task = loop.create_task(self.set_heater_temp(device_id, set_temp)) loop.run_until_complete(task)
[ "def", "sync_set_heater_temp", "(", "self", ",", "device_id", ",", "set_temp", ")", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "task", "=", "loop", ".", "create_task", "(", "self", ".", "set_heater_temp", "(", "device_id", ",", "set_temp",...
Set heater temps.
[ "Set", "heater", "temps", "." ]
train
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L384-L388
tadashi-aikawa/jumeaux
jumeaux/addons/log2reqs/csv.py
Executor.exec
def exec(self, payload: Log2ReqsAddOnPayload) -> TList[Request]: """Transform csv as below. "title1","/path1","a=1&b=2","header1=1&header2=2" "title2","/path2","c=1" "title3","/path3",,"header1=1&header2=2" "title4","/path4" Exception: ValueError: If fomat is invalid. """ outputs = [] with open(payload.file, encoding=self.config.encoding) as f: rs = csv.DictReader(f, ('name', 'path', 'qs', 'headers'), restval={}, dialect=self.config.dialect) for r in rs: if len(r) > 4: raise ValueError r['qs'] = urlparser.parse_qs(r['qs'], keep_blank_values=self.config.keep_blank) # XXX: This is bad implementation but looks simple... r['headers'] = urlparser.parse_qs(r['headers'], keep_blank_values=self.config.keep_blank) for k, v in r['headers'].items(): r['headers'][k] = v[0] outputs.append(r) return Request.from_dicts(outputs)
python
def exec(self, payload: Log2ReqsAddOnPayload) -> TList[Request]: """Transform csv as below. "title1","/path1","a=1&b=2","header1=1&header2=2" "title2","/path2","c=1" "title3","/path3",,"header1=1&header2=2" "title4","/path4" Exception: ValueError: If fomat is invalid. """ outputs = [] with open(payload.file, encoding=self.config.encoding) as f: rs = csv.DictReader(f, ('name', 'path', 'qs', 'headers'), restval={}, dialect=self.config.dialect) for r in rs: if len(r) > 4: raise ValueError r['qs'] = urlparser.parse_qs(r['qs'], keep_blank_values=self.config.keep_blank) # XXX: This is bad implementation but looks simple... r['headers'] = urlparser.parse_qs(r['headers'], keep_blank_values=self.config.keep_blank) for k, v in r['headers'].items(): r['headers'][k] = v[0] outputs.append(r) return Request.from_dicts(outputs)
[ "def", "exec", "(", "self", ",", "payload", ":", "Log2ReqsAddOnPayload", ")", "->", "TList", "[", "Request", "]", ":", "outputs", "=", "[", "]", "with", "open", "(", "payload", ".", "file", ",", "encoding", "=", "self", ".", "config", ".", "encoding", ...
Transform csv as below. "title1","/path1","a=1&b=2","header1=1&header2=2" "title2","/path2","c=1" "title3","/path3",,"header1=1&header2=2" "title4","/path4" Exception: ValueError: If fomat is invalid.
[ "Transform", "csv", "as", "below", ".", "title1", "/", "path1", "a", "=", "1&b", "=", "2", "header1", "=", "1&header2", "=", "2", "title2", "/", "path2", "c", "=", "1", "title3", "/", "path3", "header1", "=", "1&header2", "=", "2", "title4", "/", "...
train
https://github.com/tadashi-aikawa/jumeaux/blob/23389bde3e9b27b3a646d99289f8b5ced411f6f0/jumeaux/addons/log2reqs/csv.py#L23-L49
wasp/waspy
waspy/app.py
Application.handle_request
async def handle_request(self, request: Request) -> Response: """ coroutine: This method is called by Transport implementation to handle the actual request. It returns a webtype.Response object. """ # Get handler try: try: self._set_ctx(request) handler = self.router.get_handler_for_request(request) request.app = self response = await handler(request) response.app = self except ResponseError as r: parser = app_parsers.get(request.content_type, None) # Content-Type of an error response will be the same as the incoming request # unless a parser for that content type is not found. if not parser: content_type = r.content_type if not content_type: content_type = self.default_content_type else: content_type = request.content_type response = Response( headers=r.headers, correlation_id=r.correlation_id, body=r.body, status=r.status, content_type=content_type ) response.app = self if r.log: exc_info = sys.exc_info() self.logger.log_exception(request, exc_info, level='warning') # invoke serialization (json) to make sure it works _ = response.body except CancelledError: # This error can happen if a client closes the connection # The response shouldnt really ever be used return None except asyncio.TimeoutError: response = Response(status=HTTPStatus.GATEWAY_TIMEOUT, body={'message': 'Gateway Timeout'}) response.app = self except NackMePleaseError: """ See message where this error is defined """ raise except Exception: exc_info = sys.exc_info() self.logger.log_exception(request, exc_info) response = Response(status=HTTPStatus.INTERNAL_SERVER_ERROR, body={'message': 'Server Error'}) response.app = self if not response.correlation_id: response.correlation_id = request.correlation_id if self._cors_handler is not None: self._cors_handler.add_cors_headers(request, response) # add default headers response.headers = {**self.default_headers, **response.headers} return response
python
async def handle_request(self, request: Request) -> Response: """ coroutine: This method is called by Transport implementation to handle the actual request. It returns a webtype.Response object. """ # Get handler try: try: self._set_ctx(request) handler = self.router.get_handler_for_request(request) request.app = self response = await handler(request) response.app = self except ResponseError as r: parser = app_parsers.get(request.content_type, None) # Content-Type of an error response will be the same as the incoming request # unless a parser for that content type is not found. if not parser: content_type = r.content_type if not content_type: content_type = self.default_content_type else: content_type = request.content_type response = Response( headers=r.headers, correlation_id=r.correlation_id, body=r.body, status=r.status, content_type=content_type ) response.app = self if r.log: exc_info = sys.exc_info() self.logger.log_exception(request, exc_info, level='warning') # invoke serialization (json) to make sure it works _ = response.body except CancelledError: # This error can happen if a client closes the connection # The response shouldnt really ever be used return None except asyncio.TimeoutError: response = Response(status=HTTPStatus.GATEWAY_TIMEOUT, body={'message': 'Gateway Timeout'}) response.app = self except NackMePleaseError: """ See message where this error is defined """ raise except Exception: exc_info = sys.exc_info() self.logger.log_exception(request, exc_info) response = Response(status=HTTPStatus.INTERNAL_SERVER_ERROR, body={'message': 'Server Error'}) response.app = self if not response.correlation_id: response.correlation_id = request.correlation_id if self._cors_handler is not None: self._cors_handler.add_cors_headers(request, response) # add default headers response.headers = {**self.default_headers, **response.headers} return response
[ "async", "def", "handle_request", "(", "self", ",", "request", ":", "Request", ")", "->", "Response", ":", "# Get handler", "try", ":", "try", ":", "self", ".", "_set_ctx", "(", "request", ")", "handler", "=", "self", ".", "router", ".", "get_handler_for_r...
coroutine: This method is called by Transport implementation to handle the actual request. It returns a webtype.Response object.
[ "coroutine", ":", "This", "method", "is", "called", "by", "Transport", "implementation", "to", "handle", "the", "actual", "request", ".", "It", "returns", "a", "webtype", ".", "Response", "object", "." ]
train
https://github.com/wasp/waspy/blob/31cc352f300a089f9607d7f13d93591d4c69d5ec/waspy/app.py#L169-L234
martinmcbride/pysound
pysound/effects.py
echo
def echo(params, source, delay, strength): ''' Create an echo :param params: :param source: :param delay: :param strength: :return: ''' source = create_buffer(params, source) delay = create_buffer(params, delay) strength = create_buffer(params, strength) output = source[:] for i in range(params.length): d = int(i - delay[i]) if 0 <= d < params.length: output[i] += source[d]*strength[i] return output
python
def echo(params, source, delay, strength): ''' Create an echo :param params: :param source: :param delay: :param strength: :return: ''' source = create_buffer(params, source) delay = create_buffer(params, delay) strength = create_buffer(params, strength) output = source[:] for i in range(params.length): d = int(i - delay[i]) if 0 <= d < params.length: output[i] += source[d]*strength[i] return output
[ "def", "echo", "(", "params", ",", "source", ",", "delay", ",", "strength", ")", ":", "source", "=", "create_buffer", "(", "params", ",", "source", ")", "delay", "=", "create_buffer", "(", "params", ",", "delay", ")", "strength", "=", "create_buffer", "(...
Create an echo :param params: :param source: :param delay: :param strength: :return:
[ "Create", "an", "echo", ":", "param", "params", ":", ":", "param", "source", ":", ":", "param", "delay", ":", ":", "param", "strength", ":", ":", "return", ":" ]
train
https://github.com/martinmcbride/pysound/blob/253c8f712ad475318350e5a8ba21f6fefd7a3de2/pysound/effects.py#L11-L28
proteanhq/protean
src/protean/core/queryset.py
QuerySet._clone
def _clone(self): """ Return a copy of the current QuerySet. """ clone = self.__class__(self._entity_cls, criteria=self._criteria, offset=self._offset, limit=self._limit, order_by=self._order_by) return clone
python
def _clone(self): """ Return a copy of the current QuerySet. """ clone = self.__class__(self._entity_cls, criteria=self._criteria, offset=self._offset, limit=self._limit, order_by=self._order_by) return clone
[ "def", "_clone", "(", "self", ")", ":", "clone", "=", "self", ".", "__class__", "(", "self", ".", "_entity_cls", ",", "criteria", "=", "self", ".", "_criteria", ",", "offset", "=", "self", ".", "_offset", ",", "limit", "=", "self", ".", "_limit", ","...
Return a copy of the current QuerySet.
[ "Return", "a", "copy", "of", "the", "current", "QuerySet", "." ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L58-L65
proteanhq/protean
src/protean/core/queryset.py
QuerySet._add_q
def _add_q(self, q_object): """Add a Q-object to the current filter.""" self._criteria = self._criteria._combine(q_object, q_object.connector)
python
def _add_q(self, q_object): """Add a Q-object to the current filter.""" self._criteria = self._criteria._combine(q_object, q_object.connector)
[ "def", "_add_q", "(", "self", ",", "q_object", ")", ":", "self", ".", "_criteria", "=", "self", ".", "_criteria", ".", "_combine", "(", "q_object", ",", "q_object", ".", "connector", ")" ]
Add a Q-object to the current filter.
[ "Add", "a", "Q", "-", "object", "to", "the", "current", "filter", "." ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L67-L69
proteanhq/protean
src/protean/core/queryset.py
QuerySet.limit
def limit(self, limit): """Limit number of records""" clone = self._clone() if isinstance(limit, int): clone._limit = limit return clone
python
def limit(self, limit): """Limit number of records""" clone = self._clone() if isinstance(limit, int): clone._limit = limit return clone
[ "def", "limit", "(", "self", ",", "limit", ")", ":", "clone", "=", "self", ".", "_clone", "(", ")", "if", "isinstance", "(", "limit", ",", "int", ")", ":", "clone", ".", "_limit", "=", "limit", "return", "clone" ]
Limit number of records
[ "Limit", "number", "of", "records" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L93-L100
proteanhq/protean
src/protean/core/queryset.py
QuerySet.offset
def offset(self, offset): """Fetch results after `offset` value""" clone = self._clone() if isinstance(offset, int): clone._offset = offset return clone
python
def offset(self, offset): """Fetch results after `offset` value""" clone = self._clone() if isinstance(offset, int): clone._offset = offset return clone
[ "def", "offset", "(", "self", ",", "offset", ")", ":", "clone", "=", "self", ".", "_clone", "(", ")", "if", "isinstance", "(", "offset", ",", "int", ")", ":", "clone", ".", "_offset", "=", "offset", "return", "clone" ]
Fetch results after `offset` value
[ "Fetch", "results", "after", "offset", "value" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L102-L109
proteanhq/protean
src/protean/core/queryset.py
QuerySet.order_by
def order_by(self, order_by: Union[set, str]): """Update order_by setting for filter set""" clone = self._clone() if isinstance(order_by, str): order_by = {order_by} clone._order_by = clone._order_by.union(order_by) return clone
python
def order_by(self, order_by: Union[set, str]): """Update order_by setting for filter set""" clone = self._clone() if isinstance(order_by, str): order_by = {order_by} clone._order_by = clone._order_by.union(order_by) return clone
[ "def", "order_by", "(", "self", ",", "order_by", ":", "Union", "[", "set", ",", "str", "]", ")", ":", "clone", "=", "self", ".", "_clone", "(", ")", "if", "isinstance", "(", "order_by", ",", "str", ")", ":", "order_by", "=", "{", "order_by", "}", ...
Update order_by setting for filter set
[ "Update", "order_by", "setting", "for", "filter", "set" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L111-L119
proteanhq/protean
src/protean/core/queryset.py
QuerySet.all
def all(self): """Primary method to fetch data based on filters Also trigged when the QuerySet is evaluated by calling one of the following methods: * len() * bool() * list() * Iteration * Slicing """ logger.debug(f'Query `{self.__class__.__name__}` objects with filters {self}') # Destroy any cached results self._result_cache = None # Fetch Model class and connected repository from Repository Factory model_cls = repo_factory.get_model(self._entity_cls) repository = repo_factory.get_repository(self._entity_cls) # order_by clause must be list of keys order_by = self._entity_cls.meta_.order_by if not self._order_by else self._order_by # Call the read method of the repository results = repository.filter(self._criteria, self._offset, self._limit, order_by) # Convert the returned results to entity and return it entity_items = [] for item in results.items: entity = model_cls.to_entity(item) entity.state_.mark_retrieved() entity_items.append(entity) results.items = entity_items # Cache results self._result_cache = results return results
python
def all(self): """Primary method to fetch data based on filters Also trigged when the QuerySet is evaluated by calling one of the following methods: * len() * bool() * list() * Iteration * Slicing """ logger.debug(f'Query `{self.__class__.__name__}` objects with filters {self}') # Destroy any cached results self._result_cache = None # Fetch Model class and connected repository from Repository Factory model_cls = repo_factory.get_model(self._entity_cls) repository = repo_factory.get_repository(self._entity_cls) # order_by clause must be list of keys order_by = self._entity_cls.meta_.order_by if not self._order_by else self._order_by # Call the read method of the repository results = repository.filter(self._criteria, self._offset, self._limit, order_by) # Convert the returned results to entity and return it entity_items = [] for item in results.items: entity = model_cls.to_entity(item) entity.state_.mark_retrieved() entity_items.append(entity) results.items = entity_items # Cache results self._result_cache = results return results
[ "def", "all", "(", "self", ")", ":", "logger", ".", "debug", "(", "f'Query `{self.__class__.__name__}` objects with filters {self}'", ")", "# Destroy any cached results", "self", ".", "_result_cache", "=", "None", "# Fetch Model class and connected repository from Repository Fact...
Primary method to fetch data based on filters Also trigged when the QuerySet is evaluated by calling one of the following methods: * len() * bool() * list() * Iteration * Slicing
[ "Primary", "method", "to", "fetch", "data", "based", "on", "filters" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L121-L157
proteanhq/protean
src/protean/core/queryset.py
QuerySet.update
def update(self, *data, **kwargs): """Updates all objects with details given if they match a set of conditions supplied. This method updates each object individually, to fire callback methods and ensure validations are run. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value). """ updated_item_count = 0 try: items = self.all() for item in items: item.update(*data, **kwargs) updated_item_count += 1 except Exception: # FIXME Log Exception raise return updated_item_count
python
def update(self, *data, **kwargs): """Updates all objects with details given if they match a set of conditions supplied. This method updates each object individually, to fire callback methods and ensure validations are run. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value). """ updated_item_count = 0 try: items = self.all() for item in items: item.update(*data, **kwargs) updated_item_count += 1 except Exception: # FIXME Log Exception raise return updated_item_count
[ "def", "update", "(", "self", ",", "*", "data", ",", "*", "*", "kwargs", ")", ":", "updated_item_count", "=", "0", "try", ":", "items", "=", "self", ".", "all", "(", ")", "for", "item", "in", "items", ":", "item", ".", "update", "(", "*", "data",...
Updates all objects with details given if they match a set of conditions supplied. This method updates each object individually, to fire callback methods and ensure validations are run. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value).
[ "Updates", "all", "objects", "with", "details", "given", "if", "they", "match", "a", "set", "of", "conditions", "supplied", "." ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L159-L179
proteanhq/protean
src/protean/core/queryset.py
QuerySet.raw
def raw(self, query: Any, data: Any = None): """Runs raw query directly on the database and returns Entity objects Note that this method will raise an exception if the returned objects are not of the Entity type. `query` is not checked for correctness or validity, and any errors thrown by the plugin or database are passed as-is. Data passed will be transferred as-is to the plugin. All other query options like `order_by`, `offset` and `limit` are ignored for this action. """ logger.debug(f'Query `{self.__class__.__name__}` objects with raw query {query}') # Destroy any cached results self._result_cache = None # Fetch Model class and connected repository from Repository Factory model_cls = repo_factory.get_model(self._entity_cls) repository = repo_factory.get_repository(self._entity_cls) try: # Call the raw method of the repository results = repository.raw(query, data) # Convert the returned results to entity and return it entity_items = [] for item in results.items: entity = model_cls.to_entity(item) entity.state_.mark_retrieved() entity_items.append(entity) results.items = entity_items # Cache results self._result_cache = results except Exception: # FIXME Log Exception raise return results
python
def raw(self, query: Any, data: Any = None): """Runs raw query directly on the database and returns Entity objects Note that this method will raise an exception if the returned objects are not of the Entity type. `query` is not checked for correctness or validity, and any errors thrown by the plugin or database are passed as-is. Data passed will be transferred as-is to the plugin. All other query options like `order_by`, `offset` and `limit` are ignored for this action. """ logger.debug(f'Query `{self.__class__.__name__}` objects with raw query {query}') # Destroy any cached results self._result_cache = None # Fetch Model class and connected repository from Repository Factory model_cls = repo_factory.get_model(self._entity_cls) repository = repo_factory.get_repository(self._entity_cls) try: # Call the raw method of the repository results = repository.raw(query, data) # Convert the returned results to entity and return it entity_items = [] for item in results.items: entity = model_cls.to_entity(item) entity.state_.mark_retrieved() entity_items.append(entity) results.items = entity_items # Cache results self._result_cache = results except Exception: # FIXME Log Exception raise return results
[ "def", "raw", "(", "self", ",", "query", ":", "Any", ",", "data", ":", "Any", "=", "None", ")", ":", "logger", ".", "debug", "(", "f'Query `{self.__class__.__name__}` objects with raw query {query}'", ")", "# Destroy any cached results", "self", ".", "_result_cache"...
Runs raw query directly on the database and returns Entity objects Note that this method will raise an exception if the returned objects are not of the Entity type. `query` is not checked for correctness or validity, and any errors thrown by the plugin or database are passed as-is. Data passed will be transferred as-is to the plugin. All other query options like `order_by`, `offset` and `limit` are ignored for this action.
[ "Runs", "raw", "query", "directly", "on", "the", "database", "and", "returns", "Entity", "objects" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L181-L219
proteanhq/protean
src/protean/core/queryset.py
QuerySet.delete
def delete(self): """Deletes matching objects from the Repository Does not throw error if no objects are matched. Returns the number of objects matched (which may not be equal to the number of objects deleted if objects rows already have the new value). """ # Fetch Model class and connected repository from Repository Factory deleted_item_count = 0 try: items = self.all() for item in items: item.delete() deleted_item_count += 1 except Exception: # FIXME Log Exception raise return deleted_item_count
python
def delete(self): """Deletes matching objects from the Repository Does not throw error if no objects are matched. Returns the number of objects matched (which may not be equal to the number of objects deleted if objects rows already have the new value). """ # Fetch Model class and connected repository from Repository Factory deleted_item_count = 0 try: items = self.all() for item in items: item.delete() deleted_item_count += 1 except Exception: # FIXME Log Exception raise return deleted_item_count
[ "def", "delete", "(", "self", ")", ":", "# Fetch Model class and connected repository from Repository Factory", "deleted_item_count", "=", "0", "try", ":", "items", "=", "self", ".", "all", "(", ")", "for", "item", "in", "items", ":", "item", ".", "delete", "(",...
Deletes matching objects from the Repository Does not throw error if no objects are matched. Returns the number of objects matched (which may not be equal to the number of objects deleted if objects rows already have the new value).
[ "Deletes", "matching", "objects", "from", "the", "Repository" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L221-L241
proteanhq/protean
src/protean/core/queryset.py
QuerySet.update_all
def update_all(self, *args, **kwargs): """Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value). """ updated_item_count = 0 repository = repo_factory.get_repository(self._entity_cls) try: updated_item_count = repository.update_all(self._criteria, *args, **kwargs) except Exception: # FIXME Log Exception raise return updated_item_count
python
def update_all(self, *args, **kwargs): """Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value). """ updated_item_count = 0 repository = repo_factory.get_repository(self._entity_cls) try: updated_item_count = repository.update_all(self._criteria, *args, **kwargs) except Exception: # FIXME Log Exception raise return updated_item_count
[ "def", "update_all", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "updated_item_count", "=", "0", "repository", "=", "repo_factory", ".", "get_repository", "(", "self", ".", "_entity_cls", ")", "try", ":", "updated_item_count", "=", "r...
Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value).
[ "Updates", "all", "objects", "with", "details", "given", "if", "they", "match", "a", "set", "of", "conditions", "supplied", "." ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L243-L264
proteanhq/protean
src/protean/core/queryset.py
QuerySet.delete_all
def delete_all(self, *args, **kwargs): """Deletes objects that match a set of conditions supplied. This method forwards filters directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Returns the number of objects matched and deleted. """ deleted_item_count = 0 repository = repo_factory.get_repository(self._entity_cls) try: deleted_item_count = repository.delete_all(self._criteria) except Exception: # FIXME Log Exception raise return deleted_item_count
python
def delete_all(self, *args, **kwargs): """Deletes objects that match a set of conditions supplied. This method forwards filters directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Returns the number of objects matched and deleted. """ deleted_item_count = 0 repository = repo_factory.get_repository(self._entity_cls) try: deleted_item_count = repository.delete_all(self._criteria) except Exception: # FIXME Log Exception raise return deleted_item_count
[ "def", "delete_all", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "deleted_item_count", "=", "0", "repository", "=", "repo_factory", ".", "get_repository", "(", "self", ".", "_entity_cls", ")", "try", ":", "deleted_item_count", "=", "r...
Deletes objects that match a set of conditions supplied. This method forwards filters directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Returns the number of objects matched and deleted.
[ "Deletes", "objects", "that", "match", "a", "set", "of", "conditions", "supplied", "." ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L266-L282
proteanhq/protean
src/protean/core/queryset.py
QuerySet.total
def total(self): """Return the total number of records""" if self._result_cache: return self._result_cache.total return self.all().total
python
def total(self): """Return the total number of records""" if self._result_cache: return self._result_cache.total return self.all().total
[ "def", "total", "(", "self", ")", ":", "if", "self", ".", "_result_cache", ":", "return", "self", ".", "_result_cache", ".", "total", "return", "self", ".", "all", "(", ")", ".", "total" ]
Return the total number of records
[ "Return", "the", "total", "number", "of", "records" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L328-L333
proteanhq/protean
src/protean/core/queryset.py
QuerySet.items
def items(self): """Return result values""" if self._result_cache: return self._result_cache.items return self.all().items
python
def items(self): """Return result values""" if self._result_cache: return self._result_cache.items return self.all().items
[ "def", "items", "(", "self", ")", ":", "if", "self", ".", "_result_cache", ":", "return", "self", ".", "_result_cache", ".", "items", "return", "self", ".", "all", "(", ")", ".", "items" ]
Return result values
[ "Return", "result", "values" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L336-L341
proteanhq/protean
src/protean/core/queryset.py
QuerySet.first
def first(self): """Return the first result""" if self._result_cache: return self._result_cache.first return self.all().first
python
def first(self): """Return the first result""" if self._result_cache: return self._result_cache.first return self.all().first
[ "def", "first", "(", "self", ")", ":", "if", "self", ".", "_result_cache", ":", "return", "self", ".", "_result_cache", ".", "first", "return", "self", ".", "all", "(", ")", ".", "first" ]
Return the first result
[ "Return", "the", "first", "result" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L344-L349
proteanhq/protean
src/protean/core/queryset.py
QuerySet.has_next
def has_next(self): """Return True if there are more values present""" if self._result_cache: return self._result_cache.has_next return self.all().has_next
python
def has_next(self): """Return True if there are more values present""" if self._result_cache: return self._result_cache.has_next return self.all().has_next
[ "def", "has_next", "(", "self", ")", ":", "if", "self", ".", "_result_cache", ":", "return", "self", ".", "_result_cache", ".", "has_next", "return", "self", ".", "all", "(", ")", ".", "has_next" ]
Return True if there are more values present
[ "Return", "True", "if", "there", "are", "more", "values", "present" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L352-L357
proteanhq/protean
src/protean/core/queryset.py
QuerySet.has_prev
def has_prev(self): """Return True if there are previous values present""" if self._result_cache: return self._result_cache.has_prev return self.all().has_prev
python
def has_prev(self): """Return True if there are previous values present""" if self._result_cache: return self._result_cache.has_prev return self.all().has_prev
[ "def", "has_prev", "(", "self", ")", ":", "if", "self", ".", "_result_cache", ":", "return", "self", ".", "_result_cache", ".", "has_prev", "return", "self", ".", "all", "(", ")", ".", "has_prev" ]
Return True if there are previous values present
[ "Return", "True", "if", "there", "are", "previous", "values", "present" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L360-L365
proteanhq/protean
src/protean/services/email/utils.py
get_connection
def get_connection(backend=None, fail_silently=False, **kwargs): """Load an email backend and return an instance of it. If backend is None (default), use settings.EMAIL_BACKEND. Both fail_silently and other keyword arguments are used in the constructor of the backend. """ klass = perform_import(backend or active_config.EMAIL_BACKEND) return klass(fail_silently=fail_silently, **kwargs)
python
def get_connection(backend=None, fail_silently=False, **kwargs): """Load an email backend and return an instance of it. If backend is None (default), use settings.EMAIL_BACKEND. Both fail_silently and other keyword arguments are used in the constructor of the backend. """ klass = perform_import(backend or active_config.EMAIL_BACKEND) return klass(fail_silently=fail_silently, **kwargs)
[ "def", "get_connection", "(", "backend", "=", "None", ",", "fail_silently", "=", "False", ",", "*", "*", "kwargs", ")", ":", "klass", "=", "perform_import", "(", "backend", "or", "active_config", ".", "EMAIL_BACKEND", ")", "return", "klass", "(", "fail_silen...
Load an email backend and return an instance of it. If backend is None (default), use settings.EMAIL_BACKEND. Both fail_silently and other keyword arguments are used in the constructor of the backend.
[ "Load", "an", "email", "backend", "and", "return", "an", "instance", "of", "it", ".", "If", "backend", "is", "None", "(", "default", ")", "use", "settings", ".", "EMAIL_BACKEND", ".", "Both", "fail_silently", "and", "other", "keyword", "arguments", "are", ...
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/services/email/utils.py#L8-L15
proteanhq/protean
src/protean/services/email/utils.py
send_mail
def send_mail(subject, message, recipient_list, from_email=None, fail_silently=False, auth_user=None, auth_password=None, connection=None, **kwargs): """ Easy wrapper for sending a single message to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. """ connection = connection or get_connection( username=auth_user, password=auth_password, fail_silently=fail_silently, ) mail_message = EmailMessage(subject, message, from_email, recipient_list, **kwargs) return connection.send_messages([mail_message])
python
def send_mail(subject, message, recipient_list, from_email=None, fail_silently=False, auth_user=None, auth_password=None, connection=None, **kwargs): """ Easy wrapper for sending a single message to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. """ connection = connection or get_connection( username=auth_user, password=auth_password, fail_silently=fail_silently, ) mail_message = EmailMessage(subject, message, from_email, recipient_list, **kwargs) return connection.send_messages([mail_message])
[ "def", "send_mail", "(", "subject", ",", "message", ",", "recipient_list", ",", "from_email", "=", "None", ",", "fail_silently", "=", "False", ",", "auth_user", "=", "None", ",", "auth_password", "=", "None", ",", "connection", "=", "None", ",", "*", "*", ...
Easy wrapper for sending a single message to a recipient list. All members of the recipient list will see the other recipients in the 'To' field.
[ "Easy", "wrapper", "for", "sending", "a", "single", "message", "to", "a", "recipient", "list", ".", "All", "members", "of", "the", "recipient", "list", "will", "see", "the", "other", "recipients", "in", "the", "To", "field", "." ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/services/email/utils.py#L18-L34
proteanhq/protean
src/protean/services/email/utils.py
send_mass_mail
def send_mass_mail(data_tuple, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Given a data_tuple of (subject, message, from_email, recipient_list), send each message to each recipient list. Return the number of emails sent. If from_email is None, use the DEFAULT_FROM_EMAIL setting. """ connection = connection or get_connection( username=auth_user, password=auth_password, fail_silently=fail_silently, ) messages = [ EmailMessage(subject, message, sender, recipient) for subject, message, sender, recipient in data_tuple ] return connection.send_messages(messages)
python
def send_mass_mail(data_tuple, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Given a data_tuple of (subject, message, from_email, recipient_list), send each message to each recipient list. Return the number of emails sent. If from_email is None, use the DEFAULT_FROM_EMAIL setting. """ connection = connection or get_connection( username=auth_user, password=auth_password, fail_silently=fail_silently, ) messages = [ EmailMessage(subject, message, sender, recipient) for subject, message, sender, recipient in data_tuple ] return connection.send_messages(messages)
[ "def", "send_mass_mail", "(", "data_tuple", ",", "fail_silently", "=", "False", ",", "auth_user", "=", "None", ",", "auth_password", "=", "None", ",", "connection", "=", "None", ")", ":", "connection", "=", "connection", "or", "get_connection", "(", "username"...
Given a data_tuple of (subject, message, from_email, recipient_list), send each message to each recipient list. Return the number of emails sent. If from_email is None, use the DEFAULT_FROM_EMAIL setting.
[ "Given", "a", "data_tuple", "of", "(", "subject", "message", "from_email", "recipient_list", ")", "send", "each", "message", "to", "each", "recipient", "list", ".", "Return", "the", "number", "of", "emails", "sent", ".", "If", "from_email", "is", "None", "us...
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/services/email/utils.py#L37-L54
proteanhq/protean
src/protean/core/transport/response.py
ResponseFailure.value
def value(self): """Utility method to retrieve Response Object information""" # Set the code to the status value if isinstance(self.code, Status): code = self.code.value else: code = self.code return {'code': code, 'errors': self.errors}
python
def value(self): """Utility method to retrieve Response Object information""" # Set the code to the status value if isinstance(self.code, Status): code = self.code.value else: code = self.code return {'code': code, 'errors': self.errors}
[ "def", "value", "(", "self", ")", ":", "# Set the code to the status value", "if", "isinstance", "(", "self", ".", "code", ",", "Status", ")", ":", "code", "=", "self", ".", "code", ".", "value", "else", ":", "code", "=", "self", ".", "code", "return", ...
Utility method to retrieve Response Object information
[ "Utility", "method", "to", "retrieve", "Response", "Object", "information" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/transport/response.py#L62-L69
proteanhq/protean
src/protean/core/transport/response.py
ResponseFailure.build_response
def build_response(cls, code=Status.SYSTEM_ERROR, errors=None): """Utility method to build a new Resource Error object. Can be used to build all kinds of error messages. """ errors = [errors] if not isinstance(errors, list) else errors return cls(code, errors)
python
def build_response(cls, code=Status.SYSTEM_ERROR, errors=None): """Utility method to build a new Resource Error object. Can be used to build all kinds of error messages. """ errors = [errors] if not isinstance(errors, list) else errors return cls(code, errors)
[ "def", "build_response", "(", "cls", ",", "code", "=", "Status", ".", "SYSTEM_ERROR", ",", "errors", "=", "None", ")", ":", "errors", "=", "[", "errors", "]", "if", "not", "isinstance", "(", "errors", ",", "list", ")", "else", "errors", "return", "cls"...
Utility method to build a new Resource Error object. Can be used to build all kinds of error messages.
[ "Utility", "method", "to", "build", "a", "new", "Resource", "Error", "object", ".", "Can", "be", "used", "to", "build", "all", "kinds", "of", "error", "messages", "." ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/transport/response.py#L72-L77
proteanhq/protean
src/protean/core/transport/response.py
ResponseFailure.build_from_invalid_request
def build_from_invalid_request(cls, invalid_request_object): """Utility method to build a new Error object from parameters. Typically used to build HTTP 422 error response.""" errors = [{err['parameter']: err['message']} for err in invalid_request_object.errors] return cls.build_response(Status.UNPROCESSABLE_ENTITY, errors)
python
def build_from_invalid_request(cls, invalid_request_object): """Utility method to build a new Error object from parameters. Typically used to build HTTP 422 error response.""" errors = [{err['parameter']: err['message']} for err in invalid_request_object.errors] return cls.build_response(Status.UNPROCESSABLE_ENTITY, errors)
[ "def", "build_from_invalid_request", "(", "cls", ",", "invalid_request_object", ")", ":", "errors", "=", "[", "{", "err", "[", "'parameter'", "]", ":", "err", "[", "'message'", "]", "}", "for", "err", "in", "invalid_request_object", ".", "errors", "]", "retu...
Utility method to build a new Error object from parameters. Typically used to build HTTP 422 error response.
[ "Utility", "method", "to", "build", "a", "new", "Error", "object", "from", "parameters", ".", "Typically", "used", "to", "build", "HTTP", "422", "error", "response", "." ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/transport/response.py#L80-L84
proteanhq/protean
src/protean/core/transport/response.py
ResponseFailure.build_not_found
def build_not_found(cls, errors=None): """Utility method to build a HTTP 404 Resource Error response""" errors = [errors] if not isinstance(errors, list) else errors return cls(Status.NOT_FOUND, errors)
python
def build_not_found(cls, errors=None): """Utility method to build a HTTP 404 Resource Error response""" errors = [errors] if not isinstance(errors, list) else errors return cls(Status.NOT_FOUND, errors)
[ "def", "build_not_found", "(", "cls", ",", "errors", "=", "None", ")", ":", "errors", "=", "[", "errors", "]", "if", "not", "isinstance", "(", "errors", ",", "list", ")", "else", "errors", "return", "cls", "(", "Status", ".", "NOT_FOUND", ",", "errors"...
Utility method to build a HTTP 404 Resource Error response
[ "Utility", "method", "to", "build", "a", "HTTP", "404", "Resource", "Error", "response" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/transport/response.py#L87-L90
proteanhq/protean
src/protean/core/transport/response.py
ResponseFailure.build_system_error
def build_system_error(cls, errors=None): """Utility method to build a HTTP 500 System Error response""" errors = [errors] if not isinstance(errors, list) else errors return cls(Status.SYSTEM_ERROR, errors)
python
def build_system_error(cls, errors=None): """Utility method to build a HTTP 500 System Error response""" errors = [errors] if not isinstance(errors, list) else errors return cls(Status.SYSTEM_ERROR, errors)
[ "def", "build_system_error", "(", "cls", ",", "errors", "=", "None", ")", ":", "errors", "=", "[", "errors", "]", "if", "not", "isinstance", "(", "errors", ",", "list", ")", "else", "errors", "return", "cls", "(", "Status", ".", "SYSTEM_ERROR", ",", "e...
Utility method to build a HTTP 500 System Error response
[ "Utility", "method", "to", "build", "a", "HTTP", "500", "System", "Error", "response" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/transport/response.py#L93-L96
proteanhq/protean
src/protean/core/transport/response.py
ResponseFailure.build_parameters_error
def build_parameters_error(cls, errors=None): """Utility method to build a HTTP 400 Parameter Error response""" errors = [errors] if not isinstance(errors, list) else errors return cls(Status.PARAMETERS_ERROR, errors)
python
def build_parameters_error(cls, errors=None): """Utility method to build a HTTP 400 Parameter Error response""" errors = [errors] if not isinstance(errors, list) else errors return cls(Status.PARAMETERS_ERROR, errors)
[ "def", "build_parameters_error", "(", "cls", ",", "errors", "=", "None", ")", ":", "errors", "=", "[", "errors", "]", "if", "not", "isinstance", "(", "errors", ",", "list", ")", "else", "errors", "return", "cls", "(", "Status", ".", "PARAMETERS_ERROR", "...
Utility method to build a HTTP 400 Parameter Error response
[ "Utility", "method", "to", "build", "a", "HTTP", "400", "Parameter", "Error", "response" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/transport/response.py#L99-L102
proteanhq/protean
src/protean/core/transport/response.py
ResponseFailure.build_unprocessable_error
def build_unprocessable_error(cls, errors=None): """Utility method to build a HTTP 422 Parameter Error object""" errors = [errors] if not isinstance(errors, list) else errors return cls(Status.UNPROCESSABLE_ENTITY, errors)
python
def build_unprocessable_error(cls, errors=None): """Utility method to build a HTTP 422 Parameter Error object""" errors = [errors] if not isinstance(errors, list) else errors return cls(Status.UNPROCESSABLE_ENTITY, errors)
[ "def", "build_unprocessable_error", "(", "cls", ",", "errors", "=", "None", ")", ":", "errors", "=", "[", "errors", "]", "if", "not", "isinstance", "(", "errors", ",", "list", ")", "else", "errors", "return", "cls", "(", "Status", ".", "UNPROCESSABLE_ENTIT...
Utility method to build a HTTP 422 Parameter Error object
[ "Utility", "method", "to", "build", "a", "HTTP", "422", "Parameter", "Error", "object" ]
train
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/transport/response.py#L105-L108
martinmcbride/pysound
pysound/envelopes.py
linseg
def linseg(params, start=0, end=1): ''' Signal starts at start value, ramps linearly up to end value :param params: buffer parameters, controls length of signal created :param start: start value (number) :param end: end value (number) :return: array of resulting signal ''' return np.linspace(start, end, num=params.length, endpoint=True)
python
def linseg(params, start=0, end=1): ''' Signal starts at start value, ramps linearly up to end value :param params: buffer parameters, controls length of signal created :param start: start value (number) :param end: end value (number) :return: array of resulting signal ''' return np.linspace(start, end, num=params.length, endpoint=True)
[ "def", "linseg", "(", "params", ",", "start", "=", "0", ",", "end", "=", "1", ")", ":", "return", "np", ".", "linspace", "(", "start", ",", "end", ",", "num", "=", "params", ".", "length", ",", "endpoint", "=", "True", ")" ]
Signal starts at start value, ramps linearly up to end value :param params: buffer parameters, controls length of signal created :param start: start value (number) :param end: end value (number) :return: array of resulting signal
[ "Signal", "starts", "at", "start", "value", "ramps", "linearly", "up", "to", "end", "value", ":", "param", "params", ":", "buffer", "parameters", "controls", "length", "of", "signal", "created", ":", "param", "start", ":", "start", "value", "(", "number", ...
train
https://github.com/martinmcbride/pysound/blob/253c8f712ad475318350e5a8ba21f6fefd7a3de2/pysound/envelopes.py#L8-L16
martinmcbride/pysound
pysound/envelopes.py
attack_decay
def attack_decay(params, attack, start=0, peak=1): ''' Signal starts at min value, ramps linearly up to max value during the attack time, than ramps back down to min value over remaining time :param params: buffer parameters, controls length of signal created :param attack: attack time, in samples :param start: start value (number) :param peak: peak value (number) :return: ''' builder = GenericEnvelope(params) builder.set(start) builder.linseg(peak, attack) if attack < params.length: builder.linseg(start, params.length - attack) return builder.build()
python
def attack_decay(params, attack, start=0, peak=1): ''' Signal starts at min value, ramps linearly up to max value during the attack time, than ramps back down to min value over remaining time :param params: buffer parameters, controls length of signal created :param attack: attack time, in samples :param start: start value (number) :param peak: peak value (number) :return: ''' builder = GenericEnvelope(params) builder.set(start) builder.linseg(peak, attack) if attack < params.length: builder.linseg(start, params.length - attack) return builder.build()
[ "def", "attack_decay", "(", "params", ",", "attack", ",", "start", "=", "0", ",", "peak", "=", "1", ")", ":", "builder", "=", "GenericEnvelope", "(", "params", ")", "builder", ".", "set", "(", "start", ")", "builder", ".", "linseg", "(", "peak", ",",...
Signal starts at min value, ramps linearly up to max value during the attack time, than ramps back down to min value over remaining time :param params: buffer parameters, controls length of signal created :param attack: attack time, in samples :param start: start value (number) :param peak: peak value (number) :return:
[ "Signal", "starts", "at", "min", "value", "ramps", "linearly", "up", "to", "max", "value", "during", "the", "attack", "time", "than", "ramps", "back", "down", "to", "min", "value", "over", "remaining", "time", ":", "param", "params", ":", "buffer", "parame...
train
https://github.com/martinmcbride/pysound/blob/253c8f712ad475318350e5a8ba21f6fefd7a3de2/pysound/envelopes.py#L79-L94
martinmcbride/pysound
pysound/envelopes.py
GenericEnvelope.set
def set(self, value, samples=0): ''' Set the current value and optionally maintain it for a period :param value: New current value :param samples: Add current value for this number of samples (if not zero) :return: ''' if self.params.length > self.pos and samples > 0: l = min(samples, self.params.length-self.pos) self.data[self.pos:self.pos+l] = np.full(l, value, dtype=np.float) self.pos += l self.latest = value return self
python
def set(self, value, samples=0): ''' Set the current value and optionally maintain it for a period :param value: New current value :param samples: Add current value for this number of samples (if not zero) :return: ''' if self.params.length > self.pos and samples > 0: l = min(samples, self.params.length-self.pos) self.data[self.pos:self.pos+l] = np.full(l, value, dtype=np.float) self.pos += l self.latest = value return self
[ "def", "set", "(", "self", ",", "value", ",", "samples", "=", "0", ")", ":", "if", "self", ".", "params", ".", "length", ">", "self", ".", "pos", "and", "samples", ">", "0", ":", "l", "=", "min", "(", "samples", ",", "self", ".", "params", ".",...
Set the current value and optionally maintain it for a period :param value: New current value :param samples: Add current value for this number of samples (if not zero) :return:
[ "Set", "the", "current", "value", "and", "optionally", "maintain", "it", "for", "a", "period", ":", "param", "value", ":", "New", "current", "value", ":", "param", "samples", ":", "Add", "current", "value", "for", "this", "number", "of", "samples", "(", ...
train
https://github.com/martinmcbride/pysound/blob/253c8f712ad475318350e5a8ba21f6fefd7a3de2/pysound/envelopes.py#L30-L42
martinmcbride/pysound
pysound/envelopes.py
GenericEnvelope.linseg
def linseg(self, value, samples): ''' Create a linear section moving from current value to new value over acertain number of samples. :param value: New value :param samples: Length of segment in samples :return: ''' if self.params.length > self.pos and samples > 0: len = min(samples, self.params.length - self.pos) end = value if len == samples else self.latest + (value - self.latest)*len/samples self.data[self.pos:self.pos + len] = np.linspace(self.latest, end, num=len, endpoint=False, dtype=np.float) self.pos += len self.latest = value return self
python
def linseg(self, value, samples): ''' Create a linear section moving from current value to new value over acertain number of samples. :param value: New value :param samples: Length of segment in samples :return: ''' if self.params.length > self.pos and samples > 0: len = min(samples, self.params.length - self.pos) end = value if len == samples else self.latest + (value - self.latest)*len/samples self.data[self.pos:self.pos + len] = np.linspace(self.latest, end, num=len, endpoint=False, dtype=np.float) self.pos += len self.latest = value return self
[ "def", "linseg", "(", "self", ",", "value", ",", "samples", ")", ":", "if", "self", ".", "params", ".", "length", ">", "self", ".", "pos", "and", "samples", ">", "0", ":", "len", "=", "min", "(", "samples", ",", "self", ".", "params", ".", "lengt...
Create a linear section moving from current value to new value over acertain number of samples. :param value: New value :param samples: Length of segment in samples :return:
[ "Create", "a", "linear", "section", "moving", "from", "current", "value", "to", "new", "value", "over", "acertain", "number", "of", "samples", ".", ":", "param", "value", ":", "New", "value", ":", "param", "samples", ":", "Length", "of", "segment", "in", ...
train
https://github.com/martinmcbride/pysound/blob/253c8f712ad475318350e5a8ba21f6fefd7a3de2/pysound/envelopes.py#L56-L70
heroku/salesforce-oauth-request
salesforce_oauth_request/utils.py
oauth_flow
def oauth_flow(s, oauth_url, username=None, password=None, sandbox=False): """s should be a requests session""" r = s.get(oauth_url) if r.status_code >= 300: raise RuntimeError(r.text) params = urlparse.parse_qs(urlparse.urlparse(r.url).query) data = {"un":username, "width":2560, "height":1440, "hasRememberUn":True, "startURL":params['startURL'], "loginURL":"", "loginType":6, "useSecure":True, "local":"", "lt":"OAUTH", "qs":"r=https%3A%2F%2Flocalhost%3A8443%2Fsalesforce%2F21", "locale":"", "oauth_token":"", "oauth_callback":"", "login":"", "serverid":"", "display":"popup", "username":username, "pw":password, "Login":""} base = "https://login.salesforce.com" if not sandbox else "https://test.salesforce.com" r2 = s.post(base, data) m = re.search("window.location.href\s*='(.[^']+)'", r2.text) assert m is not None, "Couldn't find location.href expression in page %s (Username or password is wrong)" % r2.url u3 = "https://" + urlparse.urlparse(r2.url).hostname + m.group(1) r3 = s.get(u3) m = re.search("window.location.href\s*='(.[^']+)'", r3.text) assert m is not None, "Couldn't find location.href expression in page %s:\n%s" % (r3.url, r3.text) return m.group(1)
python
def oauth_flow(s, oauth_url, username=None, password=None, sandbox=False): """s should be a requests session""" r = s.get(oauth_url) if r.status_code >= 300: raise RuntimeError(r.text) params = urlparse.parse_qs(urlparse.urlparse(r.url).query) data = {"un":username, "width":2560, "height":1440, "hasRememberUn":True, "startURL":params['startURL'], "loginURL":"", "loginType":6, "useSecure":True, "local":"", "lt":"OAUTH", "qs":"r=https%3A%2F%2Flocalhost%3A8443%2Fsalesforce%2F21", "locale":"", "oauth_token":"", "oauth_callback":"", "login":"", "serverid":"", "display":"popup", "username":username, "pw":password, "Login":""} base = "https://login.salesforce.com" if not sandbox else "https://test.salesforce.com" r2 = s.post(base, data) m = re.search("window.location.href\s*='(.[^']+)'", r2.text) assert m is not None, "Couldn't find location.href expression in page %s (Username or password is wrong)" % r2.url u3 = "https://" + urlparse.urlparse(r2.url).hostname + m.group(1) r3 = s.get(u3) m = re.search("window.location.href\s*='(.[^']+)'", r3.text) assert m is not None, "Couldn't find location.href expression in page %s:\n%s" % (r3.url, r3.text) return m.group(1)
[ "def", "oauth_flow", "(", "s", ",", "oauth_url", ",", "username", "=", "None", ",", "password", "=", "None", ",", "sandbox", "=", "False", ")", ":", "r", "=", "s", ".", "get", "(", "oauth_url", ")", "if", "r", ".", "status_code", ">=", "300", ":", ...
s should be a requests session
[ "s", "should", "be", "a", "requests", "session" ]
train
https://github.com/heroku/salesforce-oauth-request/blob/16d4aa57f5bc00912d466a532c3f1d946d186da6/salesforce_oauth_request/utils.py#L113-L154
moonso/loqusdb
loqusdb/utils/update.py
update_database
def update_database(adapter, variant_file=None, sv_file=None, family_file=None, family_type='ped', skip_case_id=False, gq_treshold=None, case_id=None, max_window = 3000): """Update a case in the database Args: adapter: Connection to database variant_file(str): Path to variant file sv_file(str): Path to sv variant file family_file(str): Path to family file family_type(str): Format of family file skip_case_id(bool): If no case information should be added to variants gq_treshold(int): If only quality variants should be considered case_id(str): If different case id than the one in family file should be used max_window(int): Specify the max size for sv windows Returns: nr_inserted(int) """ vcf_files = [] nr_variants = None vcf_individuals = None if variant_file: vcf_info = check_vcf(variant_file) nr_variants = vcf_info['nr_variants'] variant_type = vcf_info['variant_type'] vcf_files.append(variant_file) # Get the indivuduals that are present in vcf file vcf_individuals = vcf_info['individuals'] nr_sv_variants = None sv_individuals = None if sv_file: vcf_info = check_vcf(sv_file, 'sv') nr_sv_variants = vcf_info['nr_variants'] vcf_files.append(sv_file) sv_individuals = vcf_info['individuals'] # If a gq treshold is used the variants needs to have GQ for _vcf_file in vcf_files: # Get a cyvcf2.VCF object vcf = get_vcf(_vcf_file) if gq_treshold: if not vcf.contains('GQ'): LOG.warning('Set gq-treshold to 0 or add info to vcf {0}'.format(_vcf_file)) raise SyntaxError('GQ is not defined in vcf header') # Get a ped_parser.Family object from family file family = None family_id = None if family_file: with open(family_file, 'r') as family_lines: family = get_case( family_lines=family_lines, family_type=family_type ) family_id = family.family_id # There has to be a case_id or a family at this stage. case_id = case_id or family_id # Convert infromation to a loqusdb Case object case_obj = build_case( case=family, case_id=case_id, vcf_path=variant_file, vcf_individuals=vcf_individuals, nr_variants=nr_variants, vcf_sv_path=sv_file, sv_individuals=sv_individuals, nr_sv_variants=nr_sv_variants, ) existing_case = adapter.case(case_obj) if not existing_case: raise CaseError("Case {} does not exist in database".format(case_obj['case_id'])) # Update the existing case in database case_obj = load_case( adapter=adapter, case_obj=case_obj, update=True, ) nr_inserted = 0 # If case was succesfully added we can store the variants for file_type in ['vcf_path','vcf_sv_path']: variant_type = 'snv' if file_type == 'vcf_sv_path': variant_type = 'sv' if case_obj.get(file_type) is None: continue vcf_obj = get_vcf(case_obj[file_type]) try: nr_inserted += load_variants( adapter=adapter, vcf_obj=vcf_obj, case_obj=case_obj, skip_case_id=skip_case_id, gq_treshold=gq_treshold, max_window=max_window, variant_type=variant_type, ) except Exception as err: # If something went wrong do a rollback LOG.warning(err) delete( adapter=adapter, case_obj=case_obj, update=True, existing_case=existing_case, ) raise err return nr_inserted
python
def update_database(adapter, variant_file=None, sv_file=None, family_file=None, family_type='ped', skip_case_id=False, gq_treshold=None, case_id=None, max_window = 3000): """Update a case in the database Args: adapter: Connection to database variant_file(str): Path to variant file sv_file(str): Path to sv variant file family_file(str): Path to family file family_type(str): Format of family file skip_case_id(bool): If no case information should be added to variants gq_treshold(int): If only quality variants should be considered case_id(str): If different case id than the one in family file should be used max_window(int): Specify the max size for sv windows Returns: nr_inserted(int) """ vcf_files = [] nr_variants = None vcf_individuals = None if variant_file: vcf_info = check_vcf(variant_file) nr_variants = vcf_info['nr_variants'] variant_type = vcf_info['variant_type'] vcf_files.append(variant_file) # Get the indivuduals that are present in vcf file vcf_individuals = vcf_info['individuals'] nr_sv_variants = None sv_individuals = None if sv_file: vcf_info = check_vcf(sv_file, 'sv') nr_sv_variants = vcf_info['nr_variants'] vcf_files.append(sv_file) sv_individuals = vcf_info['individuals'] # If a gq treshold is used the variants needs to have GQ for _vcf_file in vcf_files: # Get a cyvcf2.VCF object vcf = get_vcf(_vcf_file) if gq_treshold: if not vcf.contains('GQ'): LOG.warning('Set gq-treshold to 0 or add info to vcf {0}'.format(_vcf_file)) raise SyntaxError('GQ is not defined in vcf header') # Get a ped_parser.Family object from family file family = None family_id = None if family_file: with open(family_file, 'r') as family_lines: family = get_case( family_lines=family_lines, family_type=family_type ) family_id = family.family_id # There has to be a case_id or a family at this stage. case_id = case_id or family_id # Convert infromation to a loqusdb Case object case_obj = build_case( case=family, case_id=case_id, vcf_path=variant_file, vcf_individuals=vcf_individuals, nr_variants=nr_variants, vcf_sv_path=sv_file, sv_individuals=sv_individuals, nr_sv_variants=nr_sv_variants, ) existing_case = adapter.case(case_obj) if not existing_case: raise CaseError("Case {} does not exist in database".format(case_obj['case_id'])) # Update the existing case in database case_obj = load_case( adapter=adapter, case_obj=case_obj, update=True, ) nr_inserted = 0 # If case was succesfully added we can store the variants for file_type in ['vcf_path','vcf_sv_path']: variant_type = 'snv' if file_type == 'vcf_sv_path': variant_type = 'sv' if case_obj.get(file_type) is None: continue vcf_obj = get_vcf(case_obj[file_type]) try: nr_inserted += load_variants( adapter=adapter, vcf_obj=vcf_obj, case_obj=case_obj, skip_case_id=skip_case_id, gq_treshold=gq_treshold, max_window=max_window, variant_type=variant_type, ) except Exception as err: # If something went wrong do a rollback LOG.warning(err) delete( adapter=adapter, case_obj=case_obj, update=True, existing_case=existing_case, ) raise err return nr_inserted
[ "def", "update_database", "(", "adapter", ",", "variant_file", "=", "None", ",", "sv_file", "=", "None", ",", "family_file", "=", "None", ",", "family_type", "=", "'ped'", ",", "skip_case_id", "=", "False", ",", "gq_treshold", "=", "None", ",", "case_id", ...
Update a case in the database Args: adapter: Connection to database variant_file(str): Path to variant file sv_file(str): Path to sv variant file family_file(str): Path to family file family_type(str): Format of family file skip_case_id(bool): If no case information should be added to variants gq_treshold(int): If only quality variants should be considered case_id(str): If different case id than the one in family file should be used max_window(int): Specify the max size for sv windows Returns: nr_inserted(int)
[ "Update", "a", "case", "in", "the", "database", "Args", ":", "adapter", ":", "Connection", "to", "database", "variant_file", "(", "str", ")", ":", "Path", "to", "variant", "file", "sv_file", "(", "str", ")", ":", "Path", "to", "sv", "variant", "file", ...
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/update.py#L23-L137
MainRo/cyclotron-py
cyclotron/router.py
make_crossroad_router
def make_crossroad_router(source, drain=False): ''' legacy crossroad implementation. deprecated ''' sink_observer = None def on_sink_subscribe(observer): nonlocal sink_observer sink_observer = observer def dispose(): nonlocal sink_observer sink_observer = None return dispose def route_crossroad(request): def on_response_subscribe(observer): def on_next_source(i): if type(i) is cyclotron.Drain: observer.on_completed() else: observer.on_next(i) source_disposable = source.subscribe( on_next=on_next_source, on_error=lambda e: observer.on_error(e), on_completed=lambda: observer.on_completed() ) def on_next_request(i): if sink_observer is not None: sink_observer.on_next(i) def on_request_completed(): if sink_observer is not None: if drain is True: sink_observer.on_next(cyclotron.Drain()) else: sink_observer.on_completed() request_disposable = request.subscribe( on_next=on_next_request, on_error=observer.on_error, on_completed=on_request_completed ) def dispose(): source_disposable.dispose() request_disposable.dispose() return dispose return Observable.create(on_response_subscribe) return Observable.create(on_sink_subscribe), route_crossroad
python
def make_crossroad_router(source, drain=False): ''' legacy crossroad implementation. deprecated ''' sink_observer = None def on_sink_subscribe(observer): nonlocal sink_observer sink_observer = observer def dispose(): nonlocal sink_observer sink_observer = None return dispose def route_crossroad(request): def on_response_subscribe(observer): def on_next_source(i): if type(i) is cyclotron.Drain: observer.on_completed() else: observer.on_next(i) source_disposable = source.subscribe( on_next=on_next_source, on_error=lambda e: observer.on_error(e), on_completed=lambda: observer.on_completed() ) def on_next_request(i): if sink_observer is not None: sink_observer.on_next(i) def on_request_completed(): if sink_observer is not None: if drain is True: sink_observer.on_next(cyclotron.Drain()) else: sink_observer.on_completed() request_disposable = request.subscribe( on_next=on_next_request, on_error=observer.on_error, on_completed=on_request_completed ) def dispose(): source_disposable.dispose() request_disposable.dispose() return dispose return Observable.create(on_response_subscribe) return Observable.create(on_sink_subscribe), route_crossroad
[ "def", "make_crossroad_router", "(", "source", ",", "drain", "=", "False", ")", ":", "sink_observer", "=", "None", "def", "on_sink_subscribe", "(", "observer", ")", ":", "nonlocal", "sink_observer", "sink_observer", "=", "observer", "def", "dispose", "(", ")", ...
legacy crossroad implementation. deprecated
[ "legacy", "crossroad", "implementation", ".", "deprecated" ]
train
https://github.com/MainRo/cyclotron-py/blob/4530f65173aa4b9e27c3d4a2f5d33900fc19f754/cyclotron/router.py#L5-L59
MainRo/cyclotron-py
cyclotron/router.py
make_error_router
def make_error_router(): """ Creates an error router An error router takes a higher order observable a input and returns two observables: One containing the flattened items of the input observable and another one containing the flattened errors of the input observable. .. image:: ../docs/asset/error_router.png :scale: 60% :align: center Returns ------- error_observable: observable An observable emitting errors remapped. route_error: function A lettable function routing errors and taking three parameters: * source: Observable (higher order). Observable with errors to route. * error_map: function. Function used to map errors before routing them. * source_map: function. A function used to select the observable from each item is source. Examples -------- >>> sink, route_error = make_error_router() my_observable.let(route_error, error_map=lambda e: e) """ sink_observer = None def on_subscribe(observer): nonlocal sink_observer sink_observer = observer def dispose(): nonlocal sink_observer sink_observer = None return dispose def route_error(obs, convert): """ Handles error raised by obs observable catches any error raised by obs, maps it to anther object with the convert function, and emits in on the error observer. """ def catch_error(e): sink_observer.on_next(convert(e)) return Observable.empty() return obs.catch_exception(catch_error) def catch_or_flat_map(source, error_map, source_map=lambda i: i): return source.flat_map(lambda i: route_error(source_map(i), error_map)) return Observable.create(on_subscribe), catch_or_flat_map
python
def make_error_router(): """ Creates an error router An error router takes a higher order observable a input and returns two observables: One containing the flattened items of the input observable and another one containing the flattened errors of the input observable. .. image:: ../docs/asset/error_router.png :scale: 60% :align: center Returns ------- error_observable: observable An observable emitting errors remapped. route_error: function A lettable function routing errors and taking three parameters: * source: Observable (higher order). Observable with errors to route. * error_map: function. Function used to map errors before routing them. * source_map: function. A function used to select the observable from each item is source. Examples -------- >>> sink, route_error = make_error_router() my_observable.let(route_error, error_map=lambda e: e) """ sink_observer = None def on_subscribe(observer): nonlocal sink_observer sink_observer = observer def dispose(): nonlocal sink_observer sink_observer = None return dispose def route_error(obs, convert): """ Handles error raised by obs observable catches any error raised by obs, maps it to anther object with the convert function, and emits in on the error observer. """ def catch_error(e): sink_observer.on_next(convert(e)) return Observable.empty() return obs.catch_exception(catch_error) def catch_or_flat_map(source, error_map, source_map=lambda i: i): return source.flat_map(lambda i: route_error(source_map(i), error_map)) return Observable.create(on_subscribe), catch_or_flat_map
[ "def", "make_error_router", "(", ")", ":", "sink_observer", "=", "None", "def", "on_subscribe", "(", "observer", ")", ":", "nonlocal", "sink_observer", "sink_observer", "=", "observer", "def", "dispose", "(", ")", ":", "nonlocal", "sink_observer", "sink_observer",...
Creates an error router An error router takes a higher order observable a input and returns two observables: One containing the flattened items of the input observable and another one containing the flattened errors of the input observable. .. image:: ../docs/asset/error_router.png :scale: 60% :align: center Returns ------- error_observable: observable An observable emitting errors remapped. route_error: function A lettable function routing errors and taking three parameters: * source: Observable (higher order). Observable with errors to route. * error_map: function. Function used to map errors before routing them. * source_map: function. A function used to select the observable from each item is source. Examples -------- >>> sink, route_error = make_error_router() my_observable.let(route_error, error_map=lambda e: e)
[ "Creates", "an", "error", "router" ]
train
https://github.com/MainRo/cyclotron-py/blob/4530f65173aa4b9e27c3d4a2f5d33900fc19f754/cyclotron/router.py#L145-L202
moonso/loqusdb
loqusdb/commands/load.py
load
def load(ctx, variant_file, sv_variants, family_file, family_type, skip_case_id, gq_treshold, case_id, ensure_index, max_window, check_profile, hard_threshold, soft_threshold): """Load the variants of a case A variant is loaded if it is observed in any individual of a case If no family file is provided all individuals in vcf file will be considered. """ if not (family_file or case_id): LOG.warning("Please provide a family file or a case id") ctx.abort() if not (variant_file or sv_variants): LOG.warning("Please provide a VCF file") ctx.abort() variant_path = None if variant_file: variant_path = os.path.abspath(variant_file) variant_sv_path = None if sv_variants: variant_sv_path = os.path.abspath(sv_variants) variant_profile_path = None if check_profile: variant_profile_path = os.path.abspath(check_profile) adapter = ctx.obj['adapter'] start_inserting = datetime.now() try: nr_inserted = load_database( adapter=adapter, variant_file=variant_path, sv_file=variant_sv_path, family_file=family_file, family_type=family_type, skip_case_id=skip_case_id, case_id=case_id, gq_treshold=gq_treshold, max_window=max_window, profile_file=variant_profile_path, hard_threshold=hard_threshold, soft_threshold=soft_threshold ) except (SyntaxError, CaseError, IOError) as error: LOG.warning(error) ctx.abort() LOG.info("Nr variants inserted: %s", nr_inserted) LOG.info("Time to insert variants: {0}".format( datetime.now() - start_inserting)) if ensure_index: adapter.ensure_indexes() else: adapter.check_indexes()
python
def load(ctx, variant_file, sv_variants, family_file, family_type, skip_case_id, gq_treshold, case_id, ensure_index, max_window, check_profile, hard_threshold, soft_threshold): """Load the variants of a case A variant is loaded if it is observed in any individual of a case If no family file is provided all individuals in vcf file will be considered. """ if not (family_file or case_id): LOG.warning("Please provide a family file or a case id") ctx.abort() if not (variant_file or sv_variants): LOG.warning("Please provide a VCF file") ctx.abort() variant_path = None if variant_file: variant_path = os.path.abspath(variant_file) variant_sv_path = None if sv_variants: variant_sv_path = os.path.abspath(sv_variants) variant_profile_path = None if check_profile: variant_profile_path = os.path.abspath(check_profile) adapter = ctx.obj['adapter'] start_inserting = datetime.now() try: nr_inserted = load_database( adapter=adapter, variant_file=variant_path, sv_file=variant_sv_path, family_file=family_file, family_type=family_type, skip_case_id=skip_case_id, case_id=case_id, gq_treshold=gq_treshold, max_window=max_window, profile_file=variant_profile_path, hard_threshold=hard_threshold, soft_threshold=soft_threshold ) except (SyntaxError, CaseError, IOError) as error: LOG.warning(error) ctx.abort() LOG.info("Nr variants inserted: %s", nr_inserted) LOG.info("Time to insert variants: {0}".format( datetime.now() - start_inserting)) if ensure_index: adapter.ensure_indexes() else: adapter.check_indexes()
[ "def", "load", "(", "ctx", ",", "variant_file", ",", "sv_variants", ",", "family_file", ",", "family_type", ",", "skip_case_id", ",", "gq_treshold", ",", "case_id", ",", "ensure_index", ",", "max_window", ",", "check_profile", ",", "hard_threshold", ",", "soft_t...
Load the variants of a case A variant is loaded if it is observed in any individual of a case If no family file is provided all individuals in vcf file will be considered.
[ "Load", "the", "variants", "of", "a", "case" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/load.py#L84-L141
moonso/loqusdb
loqusdb/plugins/mongo/adapter.py
MongoAdapter.wipe_db
def wipe_db(self): """Wipe the whole database""" logger.warning("Wiping the whole database") self.client.drop_database(self.db_name) logger.debug("Database wiped")
python
def wipe_db(self): """Wipe the whole database""" logger.warning("Wiping the whole database") self.client.drop_database(self.db_name) logger.debug("Database wiped")
[ "def", "wipe_db", "(", "self", ")", ":", "logger", ".", "warning", "(", "\"Wiping the whole database\"", ")", "self", ".", "client", ".", "drop_database", "(", "self", ".", "db_name", ")", "logger", ".", "debug", "(", "\"Database wiped\"", ")" ]
Wipe the whole database
[ "Wipe", "the", "whole", "database" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/adapter.py#L17-L21
moonso/loqusdb
loqusdb/plugins/mongo/adapter.py
MongoAdapter.check_indexes
def check_indexes(self): """Check if the indexes exists""" for collection_name in INDEXES: existing_indexes = self.indexes(collection_name) indexes = INDEXES[collection_name] for index in indexes: index_name = index.document.get('name') if not index_name in existing_indexes: logger.warning("Index {0} missing. Run command `loqusdb index`".format(index_name)) return logger.info("All indexes exists")
python
def check_indexes(self): """Check if the indexes exists""" for collection_name in INDEXES: existing_indexes = self.indexes(collection_name) indexes = INDEXES[collection_name] for index in indexes: index_name = index.document.get('name') if not index_name in existing_indexes: logger.warning("Index {0} missing. Run command `loqusdb index`".format(index_name)) return logger.info("All indexes exists")
[ "def", "check_indexes", "(", "self", ")", ":", "for", "collection_name", "in", "INDEXES", ":", "existing_indexes", "=", "self", ".", "indexes", "(", "collection_name", ")", "indexes", "=", "INDEXES", "[", "collection_name", "]", "for", "index", "in", "indexes"...
Check if the indexes exists
[ "Check", "if", "the", "indexes", "exists" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/adapter.py#L43-L53
moonso/loqusdb
loqusdb/plugins/mongo/adapter.py
MongoAdapter.ensure_indexes
def ensure_indexes(self): """Update the indexes""" for collection_name in INDEXES: existing_indexes = self.indexes(collection_name) indexes = INDEXES[collection_name] for index in indexes: index_name = index.document.get('name') if index_name in existing_indexes: logger.debug("Index exists: %s" % index_name) self.db[collection_name].drop_index(index_name) logger.info("creating indexes for collection {0}: {1}".format( collection_name, ', '.join([index.document.get('name') for index in indexes]), ) ) self.db[collection_name].create_indexes(indexes)
python
def ensure_indexes(self): """Update the indexes""" for collection_name in INDEXES: existing_indexes = self.indexes(collection_name) indexes = INDEXES[collection_name] for index in indexes: index_name = index.document.get('name') if index_name in existing_indexes: logger.debug("Index exists: %s" % index_name) self.db[collection_name].drop_index(index_name) logger.info("creating indexes for collection {0}: {1}".format( collection_name, ', '.join([index.document.get('name') for index in indexes]), ) ) self.db[collection_name].create_indexes(indexes)
[ "def", "ensure_indexes", "(", "self", ")", ":", "for", "collection_name", "in", "INDEXES", ":", "existing_indexes", "=", "self", ".", "indexes", "(", "collection_name", ")", "indexes", "=", "INDEXES", "[", "collection_name", "]", "for", "index", "in", "indexes...
Update the indexes
[ "Update", "the", "indexes" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/adapter.py#L55-L70
moonso/loqusdb
loqusdb/utils/variant.py
format_info
def format_info(variant, variant_type='snv'): """Format the info field for SNV variants Args: variant(dict) variant_type(str): snv or sv Returns: vcf_info(str): A VCF formated info field """ observations = variant.get('observations',0) homozygotes = variant.get('homozygote') hemizygotes = variant.get('hemizygote') vcf_info = f"Obs={observations}" if homozygotes: vcf_info += f";Hom={homozygotes}" if hemizygotes: vcf_info += f";Hem={hemizygotes}" # This is SV specific if variant_type == 'sv': end = int((variant['end_left'] + variant['end_right'])/2) vcf_info += f";SVTYPE={variant['sv_type']};END={end};SVLEN={variant['length']}" return vcf_info
python
def format_info(variant, variant_type='snv'): """Format the info field for SNV variants Args: variant(dict) variant_type(str): snv or sv Returns: vcf_info(str): A VCF formated info field """ observations = variant.get('observations',0) homozygotes = variant.get('homozygote') hemizygotes = variant.get('hemizygote') vcf_info = f"Obs={observations}" if homozygotes: vcf_info += f";Hom={homozygotes}" if hemizygotes: vcf_info += f";Hem={hemizygotes}" # This is SV specific if variant_type == 'sv': end = int((variant['end_left'] + variant['end_right'])/2) vcf_info += f";SVTYPE={variant['sv_type']};END={end};SVLEN={variant['length']}" return vcf_info
[ "def", "format_info", "(", "variant", ",", "variant_type", "=", "'snv'", ")", ":", "observations", "=", "variant", ".", "get", "(", "'observations'", ",", "0", ")", "homozygotes", "=", "variant", ".", "get", "(", "'homozygote'", ")", "hemizygotes", "=", "v...
Format the info field for SNV variants Args: variant(dict) variant_type(str): snv or sv Returns: vcf_info(str): A VCF formated info field
[ "Format", "the", "info", "field", "for", "SNV", "variants", "Args", ":", "variant", "(", "dict", ")", "variant_type", "(", "str", ")", ":", "snv", "or", "sv", "Returns", ":", "vcf_info", "(", "str", ")", ":", "A", "VCF", "formated", "info", "field" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/variant.py#L6-L35
moonso/loqusdb
loqusdb/utils/variant.py
format_variant
def format_variant(variant, variant_type='snv'): """Convert variant information to a VCF formated string Args: variant(dict) variant_type(str) Returns: vcf_variant(str) """ chrom = variant.get('chrom') pos = variant.get('start') ref = variant.get('ref') alt = variant.get('alt') if variant_type == 'sv': pos = int((variant['pos_left'] + variant['pos_right'])/2) ref = 'N' alt = f"<{variant['sv_type']}>" info = None info = format_info(variant, variant_type=variant_type) variant_line = f"{chrom}\t{pos}\t.\t{ref}\t{alt}\t.\t.\t{info}" return variant_line
python
def format_variant(variant, variant_type='snv'): """Convert variant information to a VCF formated string Args: variant(dict) variant_type(str) Returns: vcf_variant(str) """ chrom = variant.get('chrom') pos = variant.get('start') ref = variant.get('ref') alt = variant.get('alt') if variant_type == 'sv': pos = int((variant['pos_left'] + variant['pos_right'])/2) ref = 'N' alt = f"<{variant['sv_type']}>" info = None info = format_info(variant, variant_type=variant_type) variant_line = f"{chrom}\t{pos}\t.\t{ref}\t{alt}\t.\t.\t{info}" return variant_line
[ "def", "format_variant", "(", "variant", ",", "variant_type", "=", "'snv'", ")", ":", "chrom", "=", "variant", ".", "get", "(", "'chrom'", ")", "pos", "=", "variant", ".", "get", "(", "'start'", ")", "ref", "=", "variant", ".", "get", "(", "'ref'", "...
Convert variant information to a VCF formated string Args: variant(dict) variant_type(str) Returns: vcf_variant(str)
[ "Convert", "variant", "information", "to", "a", "VCF", "formated", "string", "Args", ":", "variant", "(", "dict", ")", "variant_type", "(", "str", ")", "Returns", ":", "vcf_variant", "(", "str", ")" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/variant.py#L37-L64
yjzhang/uncurl_python
uncurl/state_estimation.py
_create_m_objective
def _create_m_objective(w, X): """ Creates an objective function and its derivative for M, given W and X Args: w (array): clusters x cells X (array): genes x cells """ clusters, cells = w.shape genes = X.shape[0] w_sum = w.sum(1) def objective(m): m = m.reshape((X.shape[0], w.shape[0])) d = m.dot(w)+eps temp = X/d w2 = w.dot(temp.T) deriv = w_sum - w2.T return np.sum(d - X*np.log(d))/genes, deriv.flatten()/genes return objective
python
def _create_m_objective(w, X): """ Creates an objective function and its derivative for M, given W and X Args: w (array): clusters x cells X (array): genes x cells """ clusters, cells = w.shape genes = X.shape[0] w_sum = w.sum(1) def objective(m): m = m.reshape((X.shape[0], w.shape[0])) d = m.dot(w)+eps temp = X/d w2 = w.dot(temp.T) deriv = w_sum - w2.T return np.sum(d - X*np.log(d))/genes, deriv.flatten()/genes return objective
[ "def", "_create_m_objective", "(", "w", ",", "X", ")", ":", "clusters", ",", "cells", "=", "w", ".", "shape", "genes", "=", "X", ".", "shape", "[", "0", "]", "w_sum", "=", "w", ".", "sum", "(", "1", ")", "def", "objective", "(", "m", ")", ":", ...
Creates an objective function and its derivative for M, given W and X Args: w (array): clusters x cells X (array): genes x cells
[ "Creates", "an", "objective", "function", "and", "its", "derivative", "for", "M", "given", "W", "and", "X" ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/state_estimation.py#L50-L68
yjzhang/uncurl_python
uncurl/state_estimation.py
initialize_from_assignments
def initialize_from_assignments(assignments, k, max_assign_weight=0.75): """ Creates a weight initialization matrix from Poisson clustering assignments. Args: assignments (array): 1D array of integers, of length cells k (int): number of states/clusters max_assign_weight (float, optional): between 0 and 1 - how much weight to assign to the highest cluster. Default: 0.75 Returns: init_W (array): k x cells """ cells = len(assignments) init_W = np.zeros((k, cells)) for i, a in enumerate(assignments): # entirely arbitrary... maybe it would be better to scale # the weights based on k? init_W[a, i] = max_assign_weight for a2 in range(k): if a2!=a: init_W[a2, i] = (1-max_assign_weight)/(k-1) return init_W/init_W.sum(0)
python
def initialize_from_assignments(assignments, k, max_assign_weight=0.75): """ Creates a weight initialization matrix from Poisson clustering assignments. Args: assignments (array): 1D array of integers, of length cells k (int): number of states/clusters max_assign_weight (float, optional): between 0 and 1 - how much weight to assign to the highest cluster. Default: 0.75 Returns: init_W (array): k x cells """ cells = len(assignments) init_W = np.zeros((k, cells)) for i, a in enumerate(assignments): # entirely arbitrary... maybe it would be better to scale # the weights based on k? init_W[a, i] = max_assign_weight for a2 in range(k): if a2!=a: init_W[a2, i] = (1-max_assign_weight)/(k-1) return init_W/init_W.sum(0)
[ "def", "initialize_from_assignments", "(", "assignments", ",", "k", ",", "max_assign_weight", "=", "0.75", ")", ":", "cells", "=", "len", "(", "assignments", ")", "init_W", "=", "np", ".", "zeros", "(", "(", "k", ",", "cells", ")", ")", "for", "i", ","...
Creates a weight initialization matrix from Poisson clustering assignments. Args: assignments (array): 1D array of integers, of length cells k (int): number of states/clusters max_assign_weight (float, optional): between 0 and 1 - how much weight to assign to the highest cluster. Default: 0.75 Returns: init_W (array): k x cells
[ "Creates", "a", "weight", "initialization", "matrix", "from", "Poisson", "clustering", "assignments", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/state_estimation.py#L70-L91
yjzhang/uncurl_python
uncurl/state_estimation.py
initialize_means
def initialize_means(data, clusters, k): """ Initializes the M matrix given the data and a set of cluster labels. Cluster centers are set to the mean of each cluster. Args: data (array): genes x cells clusters (array): 1d array of ints (0...k-1) k (int): number of clusters """ init_w = np.zeros((data.shape[0], k)) if sparse.issparse(data): for i in range(k): if data[:,clusters==i].shape[1]==0: point = np.random.randint(0, data.shape[1]) init_w[:,i] = data[:,point].toarray().flatten() else: # memory usage might be a problem here? init_w[:,i] = np.array(data[:,clusters==i].mean(1)).flatten() + eps else: for i in range(k): if data[:,clusters==i].shape[1]==0: point = np.random.randint(0, data.shape[1]) init_w[:,i] = data[:,point].flatten() else: init_w[:,i] = data[:,clusters==i].mean(1) + eps return init_w
python
def initialize_means(data, clusters, k): """ Initializes the M matrix given the data and a set of cluster labels. Cluster centers are set to the mean of each cluster. Args: data (array): genes x cells clusters (array): 1d array of ints (0...k-1) k (int): number of clusters """ init_w = np.zeros((data.shape[0], k)) if sparse.issparse(data): for i in range(k): if data[:,clusters==i].shape[1]==0: point = np.random.randint(0, data.shape[1]) init_w[:,i] = data[:,point].toarray().flatten() else: # memory usage might be a problem here? init_w[:,i] = np.array(data[:,clusters==i].mean(1)).flatten() + eps else: for i in range(k): if data[:,clusters==i].shape[1]==0: point = np.random.randint(0, data.shape[1]) init_w[:,i] = data[:,point].flatten() else: init_w[:,i] = data[:,clusters==i].mean(1) + eps return init_w
[ "def", "initialize_means", "(", "data", ",", "clusters", ",", "k", ")", ":", "init_w", "=", "np", ".", "zeros", "(", "(", "data", ".", "shape", "[", "0", "]", ",", "k", ")", ")", "if", "sparse", ".", "issparse", "(", "data", ")", ":", "for", "i...
Initializes the M matrix given the data and a set of cluster labels. Cluster centers are set to the mean of each cluster. Args: data (array): genes x cells clusters (array): 1d array of ints (0...k-1) k (int): number of clusters
[ "Initializes", "the", "M", "matrix", "given", "the", "data", "and", "a", "set", "of", "cluster", "labels", ".", "Cluster", "centers", "are", "set", "to", "the", "mean", "of", "each", "cluster", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/state_estimation.py#L93-L119
yjzhang/uncurl_python
uncurl/state_estimation.py
initialize_weights_nn
def initialize_weights_nn(data, means, lognorm=True): """ Initializes the weights with a nearest-neighbor approach using the means. """ # TODO genes, cells = data.shape k = means.shape[1] if lognorm: data = log1p(cell_normalize(data)) for i in range(cells): for j in range(k): pass
python
def initialize_weights_nn(data, means, lognorm=True): """ Initializes the weights with a nearest-neighbor approach using the means. """ # TODO genes, cells = data.shape k = means.shape[1] if lognorm: data = log1p(cell_normalize(data)) for i in range(cells): for j in range(k): pass
[ "def", "initialize_weights_nn", "(", "data", ",", "means", ",", "lognorm", "=", "True", ")", ":", "# TODO", "genes", ",", "cells", "=", "data", ".", "shape", "k", "=", "means", ".", "shape", "[", "1", "]", "if", "lognorm", ":", "data", "=", "log1p", ...
Initializes the weights with a nearest-neighbor approach using the means.
[ "Initializes", "the", "weights", "with", "a", "nearest", "-", "neighbor", "approach", "using", "the", "means", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/state_estimation.py#L121-L132
yjzhang/uncurl_python
uncurl/state_estimation.py
initialize_means_weights
def initialize_means_weights(data, clusters, init_means=None, init_weights=None, initialization='tsvd', max_assign_weight=0.75): """ Generates initial means and weights for state estimation. """ genes, cells = data.shape if init_means is None: if init_weights is not None: if len(init_weights.shape)==1: means = initialize_means(data, init_weights, clusters) else: means = initialize_means(data, init_weights.argmax(0), clusters, max_assign_weight=max_assign_weight) elif initialization=='cluster': assignments, means = poisson_cluster(data, clusters) if init_weights is None: init_weights = initialize_from_assignments(assignments, clusters, max_assign_weight=max_assign_weight) elif initialization=='kmpp': means, assignments = kmeans_pp(data, clusters) elif initialization=='km': km = KMeans(clusters) assignments = km.fit_predict(log1p(cell_normalize(data)).T) init_weights = initialize_from_assignments(assignments, clusters, max_assign_weight) means = initialize_means(data, assignments, clusters) elif initialization=='tsvd': n_components = min(50, genes-1) #tsvd = TruncatedSVD(min(50, genes-1)) km = KMeans(clusters) # remove dependence on sklearn tsvd b/c it has a bug that # prevents it from working properly on long inputs # if num elements > 2**31 #data_reduced = tsvd.fit_transform(log1p(cell_normalize(data)).T) U, Sigma, VT = randomized_svd(log1p(cell_normalize(data)).T, n_components) data_reduced = U*Sigma assignments = km.fit_predict(data_reduced) init_weights = initialize_from_assignments(assignments, clusters, max_assign_weight) means = initialize_means(data, assignments, clusters) elif initialization == 'random' or initialization == 'rand': # choose k random cells and set means to those selected_cells = np.random.choice(range(cells), size=clusters, replace=False) means = data[:, selected_cells] if sparse.issparse(means): means = means.toarray() else: means = init_means.copy() means = means.astype(float) if init_weights is None: if init_means is not None: if initialization == 'cluster': assignments, means = poisson_cluster(data, clusters, init=init_means, max_iters=1) w_init = initialize_from_assignments(assignments, clusters, max_assign_weight) elif initialization == 'km': km = KMeans(clusters, init=log1p(init_means.T), max_iter=1) assignments = km.fit_predict(log1p(cell_normalize(data)).T) w_init = initialize_from_assignments(assignments, clusters, max_assign_weight) else: w_init = np.random.random((clusters, cells)) w_init = w_init/w_init.sum(0) else: w_init = np.random.random((clusters, cells)) w_init = w_init/w_init.sum(0) else: if len(init_weights.shape)==1: init_weights = initialize_from_assignments(init_weights, clusters, max_assign_weight) w_init = init_weights.copy() return means, w_init
python
def initialize_means_weights(data, clusters, init_means=None, init_weights=None, initialization='tsvd', max_assign_weight=0.75): """ Generates initial means and weights for state estimation. """ genes, cells = data.shape if init_means is None: if init_weights is not None: if len(init_weights.shape)==1: means = initialize_means(data, init_weights, clusters) else: means = initialize_means(data, init_weights.argmax(0), clusters, max_assign_weight=max_assign_weight) elif initialization=='cluster': assignments, means = poisson_cluster(data, clusters) if init_weights is None: init_weights = initialize_from_assignments(assignments, clusters, max_assign_weight=max_assign_weight) elif initialization=='kmpp': means, assignments = kmeans_pp(data, clusters) elif initialization=='km': km = KMeans(clusters) assignments = km.fit_predict(log1p(cell_normalize(data)).T) init_weights = initialize_from_assignments(assignments, clusters, max_assign_weight) means = initialize_means(data, assignments, clusters) elif initialization=='tsvd': n_components = min(50, genes-1) #tsvd = TruncatedSVD(min(50, genes-1)) km = KMeans(clusters) # remove dependence on sklearn tsvd b/c it has a bug that # prevents it from working properly on long inputs # if num elements > 2**31 #data_reduced = tsvd.fit_transform(log1p(cell_normalize(data)).T) U, Sigma, VT = randomized_svd(log1p(cell_normalize(data)).T, n_components) data_reduced = U*Sigma assignments = km.fit_predict(data_reduced) init_weights = initialize_from_assignments(assignments, clusters, max_assign_weight) means = initialize_means(data, assignments, clusters) elif initialization == 'random' or initialization == 'rand': # choose k random cells and set means to those selected_cells = np.random.choice(range(cells), size=clusters, replace=False) means = data[:, selected_cells] if sparse.issparse(means): means = means.toarray() else: means = init_means.copy() means = means.astype(float) if init_weights is None: if init_means is not None: if initialization == 'cluster': assignments, means = poisson_cluster(data, clusters, init=init_means, max_iters=1) w_init = initialize_from_assignments(assignments, clusters, max_assign_weight) elif initialization == 'km': km = KMeans(clusters, init=log1p(init_means.T), max_iter=1) assignments = km.fit_predict(log1p(cell_normalize(data)).T) w_init = initialize_from_assignments(assignments, clusters, max_assign_weight) else: w_init = np.random.random((clusters, cells)) w_init = w_init/w_init.sum(0) else: w_init = np.random.random((clusters, cells)) w_init = w_init/w_init.sum(0) else: if len(init_weights.shape)==1: init_weights = initialize_from_assignments(init_weights, clusters, max_assign_weight) w_init = init_weights.copy() return means, w_init
[ "def", "initialize_means_weights", "(", "data", ",", "clusters", ",", "init_means", "=", "None", ",", "init_weights", "=", "None", ",", "initialization", "=", "'tsvd'", ",", "max_assign_weight", "=", "0.75", ")", ":", "genes", ",", "cells", "=", "data", ".",...
Generates initial means and weights for state estimation.
[ "Generates", "initial", "means", "and", "weights", "for", "state", "estimation", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/state_estimation.py#L170-L243
yjzhang/uncurl_python
uncurl/state_estimation.py
poisson_estimate_state
def poisson_estimate_state(data, clusters, init_means=None, init_weights=None, method='NoLips', max_iters=30, tol=0, disp=False, inner_max_iters=100, normalize=True, initialization='tsvd', parallel=True, threads=4, max_assign_weight=0.75, run_w_first=True, constrain_w=False, regularization=0.0, write_progress_file=None, **kwargs): """ Uses a Poisson Covex Mixture model to estimate cell states and cell state mixing weights. To lower computational costs, use a sparse matrix, set disp to False, and set tol to 0. Args: data (array): genes x cells array or sparse matrix. clusters (int): number of mixture components init_means (array, optional): initial centers - genes x clusters. Default: from Poisson kmeans init_weights (array, optional): initial weights - clusters x cells, or assignments as produced by clustering. Default: from Poisson kmeans method (str, optional): optimization method. Current options are 'NoLips' and 'L-BFGS-B'. Default: 'NoLips'. max_iters (int, optional): maximum number of iterations. Default: 30 tol (float, optional): if both M and W change by less than tol (RMSE), then the iteration is stopped. Default: 1e-10 disp (bool, optional): whether or not to display optimization progress. Default: False inner_max_iters (int, optional): Number of iterations to run in the optimization subroutine for M and W. Default: 100 normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True. initialization (str, optional): If initial means and weights are not provided, this describes how they are initialized. Options: 'cluster' (poisson cluster for means and weights), 'kmpp' (kmeans++ for means, random weights), 'km' (regular k-means), 'tsvd' (tsvd(50) + k-means). Default: tsvd. parallel (bool, optional): Whether to use parallel updates (sparse NoLips only). Default: True threads (int, optional): How many threads to use in the parallel computation. Default: 4 max_assign_weight (float, optional): If using a clustering-based initialization, how much weight to assign to the max weight cluster. Default: 0.75 run_w_first (bool, optional): Whether or not to optimize W first (if false, M will be optimized first). Default: True constrain_w (bool, optional): If True, then W is normalized after every iteration. Default: False regularization (float, optional): Regularization coefficient for M and W. Default: 0 (no regularization). write_progress_file (str, optional): filename to write progress updates to. Returns: M (array): genes x clusters - state means W (array): clusters x cells - state mixing components for each cell ll (float): final log-likelihood """ genes, cells = data.shape means, w_init = initialize_means_weights(data, clusters, init_means, init_weights, initialization, max_assign_weight) X = data.astype(float) XT = X.T is_sparse = False if sparse.issparse(X): is_sparse = True update_fn = sparse_nolips_update_w # convert to csc X = sparse.csc_matrix(X) XT = sparse.csc_matrix(XT) if parallel: update_fn = parallel_sparse_nolips_update_w Xsum = np.asarray(X.sum(0)).flatten() Xsum_m = np.asarray(X.sum(1)).flatten() # L-BFGS-B won't work right now for sparse matrices method = 'NoLips' objective_fn = _call_sparse_obj else: objective_fn = objective update_fn = nolips_update_w Xsum = X.sum(0) Xsum_m = X.sum(1) # If method is NoLips, converting to a sparse matrix # will always improve the performance (?) and never lower accuracy... if method == 'NoLips': is_sparse = True X = sparse.csc_matrix(X) XT = sparse.csc_matrix(XT) update_fn = sparse_nolips_update_w if parallel: update_fn = parallel_sparse_nolips_update_w objective_fn = _call_sparse_obj w_new = w_init for i in range(max_iters): if disp: print('iter: {0}'.format(i)) if run_w_first: # step 1: given M, estimate W w_new = _estimate_w(X, w_new, means, Xsum, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'W', regularization) if constrain_w: w_new = w_new/w_new.sum(0) if disp: w_ll = objective_fn(X, means, w_new) print('Finished updating W. Objective value: {0}'.format(w_ll)) # step 2: given W, update M means = _estimate_w(XT, means.T, w_new.T, Xsum_m, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'M', regularization) means = means.T if disp: w_ll = objective_fn(X, means, w_new) print('Finished updating M. Objective value: {0}'.format(w_ll)) else: # step 1: given W, update M means = _estimate_w(XT, means.T, w_new.T, Xsum_m, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'M', regularization) means = means.T if disp: w_ll = objective_fn(X, means, w_new) print('Finished updating M. Objective value: {0}'.format(w_ll)) # step 2: given M, estimate W w_new = _estimate_w(X, w_new, means, Xsum, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'W', regularization) if constrain_w: w_new = w_new/w_new.sum(0) if disp: w_ll = objective_fn(X, means, w_new) print('Finished updating W. Objective value: {0}'.format(w_ll)) # write progress to progress file if write_progress_file is not None: progress = open(write_progress_file, 'w') progress.write(str(i)) progress.close() if normalize: w_new = w_new/w_new.sum(0) m_ll = objective_fn(X, means, w_new) return means, w_new, m_ll
python
def poisson_estimate_state(data, clusters, init_means=None, init_weights=None, method='NoLips', max_iters=30, tol=0, disp=False, inner_max_iters=100, normalize=True, initialization='tsvd', parallel=True, threads=4, max_assign_weight=0.75, run_w_first=True, constrain_w=False, regularization=0.0, write_progress_file=None, **kwargs): """ Uses a Poisson Covex Mixture model to estimate cell states and cell state mixing weights. To lower computational costs, use a sparse matrix, set disp to False, and set tol to 0. Args: data (array): genes x cells array or sparse matrix. clusters (int): number of mixture components init_means (array, optional): initial centers - genes x clusters. Default: from Poisson kmeans init_weights (array, optional): initial weights - clusters x cells, or assignments as produced by clustering. Default: from Poisson kmeans method (str, optional): optimization method. Current options are 'NoLips' and 'L-BFGS-B'. Default: 'NoLips'. max_iters (int, optional): maximum number of iterations. Default: 30 tol (float, optional): if both M and W change by less than tol (RMSE), then the iteration is stopped. Default: 1e-10 disp (bool, optional): whether or not to display optimization progress. Default: False inner_max_iters (int, optional): Number of iterations to run in the optimization subroutine for M and W. Default: 100 normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True. initialization (str, optional): If initial means and weights are not provided, this describes how they are initialized. Options: 'cluster' (poisson cluster for means and weights), 'kmpp' (kmeans++ for means, random weights), 'km' (regular k-means), 'tsvd' (tsvd(50) + k-means). Default: tsvd. parallel (bool, optional): Whether to use parallel updates (sparse NoLips only). Default: True threads (int, optional): How many threads to use in the parallel computation. Default: 4 max_assign_weight (float, optional): If using a clustering-based initialization, how much weight to assign to the max weight cluster. Default: 0.75 run_w_first (bool, optional): Whether or not to optimize W first (if false, M will be optimized first). Default: True constrain_w (bool, optional): If True, then W is normalized after every iteration. Default: False regularization (float, optional): Regularization coefficient for M and W. Default: 0 (no regularization). write_progress_file (str, optional): filename to write progress updates to. Returns: M (array): genes x clusters - state means W (array): clusters x cells - state mixing components for each cell ll (float): final log-likelihood """ genes, cells = data.shape means, w_init = initialize_means_weights(data, clusters, init_means, init_weights, initialization, max_assign_weight) X = data.astype(float) XT = X.T is_sparse = False if sparse.issparse(X): is_sparse = True update_fn = sparse_nolips_update_w # convert to csc X = sparse.csc_matrix(X) XT = sparse.csc_matrix(XT) if parallel: update_fn = parallel_sparse_nolips_update_w Xsum = np.asarray(X.sum(0)).flatten() Xsum_m = np.asarray(X.sum(1)).flatten() # L-BFGS-B won't work right now for sparse matrices method = 'NoLips' objective_fn = _call_sparse_obj else: objective_fn = objective update_fn = nolips_update_w Xsum = X.sum(0) Xsum_m = X.sum(1) # If method is NoLips, converting to a sparse matrix # will always improve the performance (?) and never lower accuracy... if method == 'NoLips': is_sparse = True X = sparse.csc_matrix(X) XT = sparse.csc_matrix(XT) update_fn = sparse_nolips_update_w if parallel: update_fn = parallel_sparse_nolips_update_w objective_fn = _call_sparse_obj w_new = w_init for i in range(max_iters): if disp: print('iter: {0}'.format(i)) if run_w_first: # step 1: given M, estimate W w_new = _estimate_w(X, w_new, means, Xsum, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'W', regularization) if constrain_w: w_new = w_new/w_new.sum(0) if disp: w_ll = objective_fn(X, means, w_new) print('Finished updating W. Objective value: {0}'.format(w_ll)) # step 2: given W, update M means = _estimate_w(XT, means.T, w_new.T, Xsum_m, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'M', regularization) means = means.T if disp: w_ll = objective_fn(X, means, w_new) print('Finished updating M. Objective value: {0}'.format(w_ll)) else: # step 1: given W, update M means = _estimate_w(XT, means.T, w_new.T, Xsum_m, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'M', regularization) means = means.T if disp: w_ll = objective_fn(X, means, w_new) print('Finished updating M. Objective value: {0}'.format(w_ll)) # step 2: given M, estimate W w_new = _estimate_w(X, w_new, means, Xsum, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'W', regularization) if constrain_w: w_new = w_new/w_new.sum(0) if disp: w_ll = objective_fn(X, means, w_new) print('Finished updating W. Objective value: {0}'.format(w_ll)) # write progress to progress file if write_progress_file is not None: progress = open(write_progress_file, 'w') progress.write(str(i)) progress.close() if normalize: w_new = w_new/w_new.sum(0) m_ll = objective_fn(X, means, w_new) return means, w_new, m_ll
[ "def", "poisson_estimate_state", "(", "data", ",", "clusters", ",", "init_means", "=", "None", ",", "init_weights", "=", "None", ",", "method", "=", "'NoLips'", ",", "max_iters", "=", "30", ",", "tol", "=", "0", ",", "disp", "=", "False", ",", "inner_max...
Uses a Poisson Covex Mixture model to estimate cell states and cell state mixing weights. To lower computational costs, use a sparse matrix, set disp to False, and set tol to 0. Args: data (array): genes x cells array or sparse matrix. clusters (int): number of mixture components init_means (array, optional): initial centers - genes x clusters. Default: from Poisson kmeans init_weights (array, optional): initial weights - clusters x cells, or assignments as produced by clustering. Default: from Poisson kmeans method (str, optional): optimization method. Current options are 'NoLips' and 'L-BFGS-B'. Default: 'NoLips'. max_iters (int, optional): maximum number of iterations. Default: 30 tol (float, optional): if both M and W change by less than tol (RMSE), then the iteration is stopped. Default: 1e-10 disp (bool, optional): whether or not to display optimization progress. Default: False inner_max_iters (int, optional): Number of iterations to run in the optimization subroutine for M and W. Default: 100 normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True. initialization (str, optional): If initial means and weights are not provided, this describes how they are initialized. Options: 'cluster' (poisson cluster for means and weights), 'kmpp' (kmeans++ for means, random weights), 'km' (regular k-means), 'tsvd' (tsvd(50) + k-means). Default: tsvd. parallel (bool, optional): Whether to use parallel updates (sparse NoLips only). Default: True threads (int, optional): How many threads to use in the parallel computation. Default: 4 max_assign_weight (float, optional): If using a clustering-based initialization, how much weight to assign to the max weight cluster. Default: 0.75 run_w_first (bool, optional): Whether or not to optimize W first (if false, M will be optimized first). Default: True constrain_w (bool, optional): If True, then W is normalized after every iteration. Default: False regularization (float, optional): Regularization coefficient for M and W. Default: 0 (no regularization). write_progress_file (str, optional): filename to write progress updates to. Returns: M (array): genes x clusters - state means W (array): clusters x cells - state mixing components for each cell ll (float): final log-likelihood
[ "Uses", "a", "Poisson", "Covex", "Mixture", "model", "to", "estimate", "cell", "states", "and", "cell", "state", "mixing", "weights", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/state_estimation.py#L245-L350
yjzhang/uncurl_python
uncurl/state_estimation.py
update_m
def update_m(data, old_M, old_W, selected_genes, disp=False, inner_max_iters=100, parallel=True, threads=4, write_progress_file=None, tol=0.0, regularization=0.0, **kwargs): """ This returns a new M matrix that contains all genes, given an M that was created from running state estimation with a subset of genes. Args: data (sparse matrix or dense array): data matrix of shape (genes, cells), containing all genes old_M (array): shape is (selected_genes, k) old_W (array): shape is (k, cells) selected_genes (list): list of selected gene indices Rest of the args are as in poisson_estimate_state Returns: new_M: array of shape (all_genes, k) """ genes, cells = data.shape k = old_M.shape[1] non_selected_genes = [x for x in range(genes) if x not in set(selected_genes)] # 1. initialize new M new_M = np.zeros((genes, k)) new_M[selected_genes, :] = old_M # TODO: how to initialize rest of genes? # data*w? if disp: print('computing initial guess for M by data*W.T') new_M_non_selected = data[non_selected_genes, :] * sparse.csc_matrix(old_W.T) new_M[non_selected_genes, :] = new_M_non_selected.toarray() X = data.astype(float) XT = X.T is_sparse = False if sparse.issparse(X): is_sparse = True update_fn = sparse_nolips_update_w # convert to csc X = sparse.csc_matrix(X) XT = sparse.csc_matrix(XT) if parallel: update_fn = parallel_sparse_nolips_update_w Xsum = np.asarray(X.sum(0)).flatten() Xsum_m = np.asarray(X.sum(1)).flatten() # L-BFGS-B won't work right now for sparse matrices method = 'NoLips' objective_fn = _call_sparse_obj else: objective_fn = objective update_fn = nolips_update_w Xsum = X.sum(0) Xsum_m = X.sum(1) # If method is NoLips, converting to a sparse matrix # will always improve the performance (?) and never lower accuracy... # will almost always improve performance? # if sparsity is below 40%? if method == 'NoLips': is_sparse = True X = sparse.csc_matrix(X) XT = sparse.csc_matrix(XT) update_fn = sparse_nolips_update_w if parallel: update_fn = parallel_sparse_nolips_update_w objective_fn = _call_sparse_obj if disp: print('starting estimating M') new_M = _estimate_w(XT, new_M.T, old_W.T, Xsum_m, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'M', regularization) if write_progress_file is not None: progress = open(write_progress_file, 'w') progress.write('0') progress.close() return new_M.T
python
def update_m(data, old_M, old_W, selected_genes, disp=False, inner_max_iters=100, parallel=True, threads=4, write_progress_file=None, tol=0.0, regularization=0.0, **kwargs): """ This returns a new M matrix that contains all genes, given an M that was created from running state estimation with a subset of genes. Args: data (sparse matrix or dense array): data matrix of shape (genes, cells), containing all genes old_M (array): shape is (selected_genes, k) old_W (array): shape is (k, cells) selected_genes (list): list of selected gene indices Rest of the args are as in poisson_estimate_state Returns: new_M: array of shape (all_genes, k) """ genes, cells = data.shape k = old_M.shape[1] non_selected_genes = [x for x in range(genes) if x not in set(selected_genes)] # 1. initialize new M new_M = np.zeros((genes, k)) new_M[selected_genes, :] = old_M # TODO: how to initialize rest of genes? # data*w? if disp: print('computing initial guess for M by data*W.T') new_M_non_selected = data[non_selected_genes, :] * sparse.csc_matrix(old_W.T) new_M[non_selected_genes, :] = new_M_non_selected.toarray() X = data.astype(float) XT = X.T is_sparse = False if sparse.issparse(X): is_sparse = True update_fn = sparse_nolips_update_w # convert to csc X = sparse.csc_matrix(X) XT = sparse.csc_matrix(XT) if parallel: update_fn = parallel_sparse_nolips_update_w Xsum = np.asarray(X.sum(0)).flatten() Xsum_m = np.asarray(X.sum(1)).flatten() # L-BFGS-B won't work right now for sparse matrices method = 'NoLips' objective_fn = _call_sparse_obj else: objective_fn = objective update_fn = nolips_update_w Xsum = X.sum(0) Xsum_m = X.sum(1) # If method is NoLips, converting to a sparse matrix # will always improve the performance (?) and never lower accuracy... # will almost always improve performance? # if sparsity is below 40%? if method == 'NoLips': is_sparse = True X = sparse.csc_matrix(X) XT = sparse.csc_matrix(XT) update_fn = sparse_nolips_update_w if parallel: update_fn = parallel_sparse_nolips_update_w objective_fn = _call_sparse_obj if disp: print('starting estimating M') new_M = _estimate_w(XT, new_M.T, old_W.T, Xsum_m, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'M', regularization) if write_progress_file is not None: progress = open(write_progress_file, 'w') progress.write('0') progress.close() return new_M.T
[ "def", "update_m", "(", "data", ",", "old_M", ",", "old_W", ",", "selected_genes", ",", "disp", "=", "False", ",", "inner_max_iters", "=", "100", ",", "parallel", "=", "True", ",", "threads", "=", "4", ",", "write_progress_file", "=", "None", ",", "tol",...
This returns a new M matrix that contains all genes, given an M that was created from running state estimation with a subset of genes. Args: data (sparse matrix or dense array): data matrix of shape (genes, cells), containing all genes old_M (array): shape is (selected_genes, k) old_W (array): shape is (k, cells) selected_genes (list): list of selected gene indices Rest of the args are as in poisson_estimate_state Returns: new_M: array of shape (all_genes, k)
[ "This", "returns", "a", "new", "M", "matrix", "that", "contains", "all", "genes", "given", "an", "M", "that", "was", "created", "from", "running", "state", "estimation", "with", "a", "subset", "of", "genes", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/state_estimation.py#L353-L420
markperdue/pyvesync
home_assistant/custom_components/__init__.py
setup
def setup(hass, config): """Set up the VeSync component.""" from pyvesync.vesync import VeSync conf = config[DOMAIN] manager = VeSync(conf.get(CONF_USERNAME), conf.get(CONF_PASSWORD), time_zone=conf.get(CONF_TIME_ZONE)) if not manager.login(): _LOGGER.error("Unable to login to VeSync") return manager.update() hass.data[DOMAIN] = { 'manager': manager } discovery.load_platform(hass, 'switch', DOMAIN, {}, config) return True
python
def setup(hass, config): """Set up the VeSync component.""" from pyvesync.vesync import VeSync conf = config[DOMAIN] manager = VeSync(conf.get(CONF_USERNAME), conf.get(CONF_PASSWORD), time_zone=conf.get(CONF_TIME_ZONE)) if not manager.login(): _LOGGER.error("Unable to login to VeSync") return manager.update() hass.data[DOMAIN] = { 'manager': manager } discovery.load_platform(hass, 'switch', DOMAIN, {}, config) return True
[ "def", "setup", "(", "hass", ",", "config", ")", ":", "from", "pyvesync", ".", "vesync", "import", "VeSync", "conf", "=", "config", "[", "DOMAIN", "]", "manager", "=", "VeSync", "(", "conf", ".", "get", "(", "CONF_USERNAME", ")", ",", "conf", ".", "g...
Set up the VeSync component.
[ "Set", "up", "the", "VeSync", "component", "." ]
train
https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/home_assistant/custom_components/__init__.py#L22-L43
yjzhang/uncurl_python
uncurl/ensemble.py
state_estimation_ensemble
def state_estimation_ensemble(data, k, n_runs=10, M_list=[], **se_params): """ Runs an ensemble method on the list of M results... Args: data: genes x cells array k: number of classes n_runs (optional): number of random initializations of state estimation M_list (optional): list of M arrays from state estimation se_params (optional): optional poisson_estimate_state params Returns: M_new W_new ll """ if len(M_list)==0: M_list = [] for i in range(n_runs): M, W, ll = poisson_estimate_state(data, k, **se_params) M_list.append(M) M_stacked = np.hstack(M_list) M_new, W_new, ll = poisson_estimate_state(M_stacked, k, **se_params) W_new = np.dot(data.T, M_new) W_new = W_new/W_new.sum(0) return M_new, W_new, ll
python
def state_estimation_ensemble(data, k, n_runs=10, M_list=[], **se_params): """ Runs an ensemble method on the list of M results... Args: data: genes x cells array k: number of classes n_runs (optional): number of random initializations of state estimation M_list (optional): list of M arrays from state estimation se_params (optional): optional poisson_estimate_state params Returns: M_new W_new ll """ if len(M_list)==0: M_list = [] for i in range(n_runs): M, W, ll = poisson_estimate_state(data, k, **se_params) M_list.append(M) M_stacked = np.hstack(M_list) M_new, W_new, ll = poisson_estimate_state(M_stacked, k, **se_params) W_new = np.dot(data.T, M_new) W_new = W_new/W_new.sum(0) return M_new, W_new, ll
[ "def", "state_estimation_ensemble", "(", "data", ",", "k", ",", "n_runs", "=", "10", ",", "M_list", "=", "[", "]", ",", "*", "*", "se_params", ")", ":", "if", "len", "(", "M_list", ")", "==", "0", ":", "M_list", "=", "[", "]", "for", "i", "in", ...
Runs an ensemble method on the list of M results... Args: data: genes x cells array k: number of classes n_runs (optional): number of random initializations of state estimation M_list (optional): list of M arrays from state estimation se_params (optional): optional poisson_estimate_state params Returns: M_new W_new ll
[ "Runs", "an", "ensemble", "method", "on", "the", "list", "of", "M", "results", "..." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L22-L47
yjzhang/uncurl_python
uncurl/ensemble.py
nmf_ensemble
def nmf_ensemble(data, k, n_runs=10, W_list=[], **nmf_params): """ Runs an ensemble method on the list of NMF W matrices... Args: data: genes x cells array (should be log + cell-normalized) k: number of classes n_runs (optional): number of random initializations of state estimation M_list (optional): list of M arrays from state estimation se_params (optional): optional poisson_estimate_state params Returns: W_new H_new """ nmf = NMF(k) if len(W_list)==0: W_list = [] for i in range(n_runs): W = nmf.fit_transform(data) W_list.append(W) W_stacked = np.hstack(W_list) nmf_w = nmf.fit_transform(W_stacked) nmf_h = nmf.components_ H_new = data.T.dot(nmf_w).T nmf2 = NMF(k, init='custom') nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new) H_new = nmf2.components_ #W_new = W_new/W_new.sum(0) # alternatively, use nmf_w and h_new as initializations for another NMF round? return nmf_w, H_new
python
def nmf_ensemble(data, k, n_runs=10, W_list=[], **nmf_params): """ Runs an ensemble method on the list of NMF W matrices... Args: data: genes x cells array (should be log + cell-normalized) k: number of classes n_runs (optional): number of random initializations of state estimation M_list (optional): list of M arrays from state estimation se_params (optional): optional poisson_estimate_state params Returns: W_new H_new """ nmf = NMF(k) if len(W_list)==0: W_list = [] for i in range(n_runs): W = nmf.fit_transform(data) W_list.append(W) W_stacked = np.hstack(W_list) nmf_w = nmf.fit_transform(W_stacked) nmf_h = nmf.components_ H_new = data.T.dot(nmf_w).T nmf2 = NMF(k, init='custom') nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new) H_new = nmf2.components_ #W_new = W_new/W_new.sum(0) # alternatively, use nmf_w and h_new as initializations for another NMF round? return nmf_w, H_new
[ "def", "nmf_ensemble", "(", "data", ",", "k", ",", "n_runs", "=", "10", ",", "W_list", "=", "[", "]", ",", "*", "*", "nmf_params", ")", ":", "nmf", "=", "NMF", "(", "k", ")", "if", "len", "(", "W_list", ")", "==", "0", ":", "W_list", "=", "["...
Runs an ensemble method on the list of NMF W matrices... Args: data: genes x cells array (should be log + cell-normalized) k: number of classes n_runs (optional): number of random initializations of state estimation M_list (optional): list of M arrays from state estimation se_params (optional): optional poisson_estimate_state params Returns: W_new H_new
[ "Runs", "an", "ensemble", "method", "on", "the", "list", "of", "NMF", "W", "matrices", "..." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L49-L79
yjzhang/uncurl_python
uncurl/ensemble.py
nmf_kfold
def nmf_kfold(data, k, n_runs=10, **nmf_params): """ Runs K-fold ensemble topic modeling (Belford et al. 2017) """ # TODO nmf = NMF(k) W_list = [] kf = KFold(n_splits=n_runs, shuffle=True) # TODO: randomly divide data into n_runs folds for train_index, test_index in kf.split(data.T): W = nmf.fit_transform(data[:,train_index]) W_list.append(W) W_stacked = np.hstack(W_list) nmf_w = nmf.fit_transform(W_stacked) nmf_h = nmf.components_ H_new = data.T.dot(nmf_w).T nmf2 = NMF(k, init='custom') nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new) H_new = nmf2.components_ #W_new = W_new/W_new.sum(0) return nmf_w, H_new
python
def nmf_kfold(data, k, n_runs=10, **nmf_params): """ Runs K-fold ensemble topic modeling (Belford et al. 2017) """ # TODO nmf = NMF(k) W_list = [] kf = KFold(n_splits=n_runs, shuffle=True) # TODO: randomly divide data into n_runs folds for train_index, test_index in kf.split(data.T): W = nmf.fit_transform(data[:,train_index]) W_list.append(W) W_stacked = np.hstack(W_list) nmf_w = nmf.fit_transform(W_stacked) nmf_h = nmf.components_ H_new = data.T.dot(nmf_w).T nmf2 = NMF(k, init='custom') nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new) H_new = nmf2.components_ #W_new = W_new/W_new.sum(0) return nmf_w, H_new
[ "def", "nmf_kfold", "(", "data", ",", "k", ",", "n_runs", "=", "10", ",", "*", "*", "nmf_params", ")", ":", "# TODO", "nmf", "=", "NMF", "(", "k", ")", "W_list", "=", "[", "]", "kf", "=", "KFold", "(", "n_splits", "=", "n_runs", ",", "shuffle", ...
Runs K-fold ensemble topic modeling (Belford et al. 2017)
[ "Runs", "K", "-", "fold", "ensemble", "topic", "modeling", "(", "Belford", "et", "al", ".", "2017", ")" ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L81-L101
yjzhang/uncurl_python
uncurl/ensemble.py
nmf_init
def nmf_init(data, clusters, k, init='enhanced'): """ runs enhanced NMF initialization from clusterings (Gong 2013) There are 3 options for init: enhanced - uses EIn-NMF from Gong 2013 basic - uses means for W, assigns H such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1). nmf - uses means for W, and assigns H using the NMF objective while holding W constant. """ init_w = np.zeros((data.shape[0], k)) if sparse.issparse(data): for i in range(k): if data[:,clusters==i].shape[1]==0: point = np.random.randint(0, data.shape[1]) init_w[:,i] = data[:,point].toarray().flatten() else: init_w[:,i] = np.array(data[:,clusters==i].mean(1)).flatten() else: for i in range(k): if data[:,clusters==i].shape[1]==0: point = np.random.randint(0, data.shape[1]) init_w[:,i] = data[:,point].flatten() else: init_w[:,i] = data[:,clusters==i].mean(1) init_h = np.zeros((k, data.shape[1])) if init == 'enhanced': distances = np.zeros((k, data.shape[1])) for i in range(k): for j in range(data.shape[1]): distances[i,j] = np.sqrt(((data[:,j] - init_w[:,i])**2).sum()) for i in range(k): for j in range(data.shape[1]): init_h[i,j] = 1/((distances[:,j]/distances[i,j])**(-2)).sum() elif init == 'basic': init_h = initialize_from_assignments(clusters, k) elif init == 'nmf': init_h_, _, n_iter = non_negative_factorization(data.T, n_components=k, init='custom', update_H=False, H=init_w.T) init_h = init_h_.T return init_w, init_h
python
def nmf_init(data, clusters, k, init='enhanced'): """ runs enhanced NMF initialization from clusterings (Gong 2013) There are 3 options for init: enhanced - uses EIn-NMF from Gong 2013 basic - uses means for W, assigns H such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1). nmf - uses means for W, and assigns H using the NMF objective while holding W constant. """ init_w = np.zeros((data.shape[0], k)) if sparse.issparse(data): for i in range(k): if data[:,clusters==i].shape[1]==0: point = np.random.randint(0, data.shape[1]) init_w[:,i] = data[:,point].toarray().flatten() else: init_w[:,i] = np.array(data[:,clusters==i].mean(1)).flatten() else: for i in range(k): if data[:,clusters==i].shape[1]==0: point = np.random.randint(0, data.shape[1]) init_w[:,i] = data[:,point].flatten() else: init_w[:,i] = data[:,clusters==i].mean(1) init_h = np.zeros((k, data.shape[1])) if init == 'enhanced': distances = np.zeros((k, data.shape[1])) for i in range(k): for j in range(data.shape[1]): distances[i,j] = np.sqrt(((data[:,j] - init_w[:,i])**2).sum()) for i in range(k): for j in range(data.shape[1]): init_h[i,j] = 1/((distances[:,j]/distances[i,j])**(-2)).sum() elif init == 'basic': init_h = initialize_from_assignments(clusters, k) elif init == 'nmf': init_h_, _, n_iter = non_negative_factorization(data.T, n_components=k, init='custom', update_H=False, H=init_w.T) init_h = init_h_.T return init_w, init_h
[ "def", "nmf_init", "(", "data", ",", "clusters", ",", "k", ",", "init", "=", "'enhanced'", ")", ":", "init_w", "=", "np", ".", "zeros", "(", "(", "data", ".", "shape", "[", "0", "]", ",", "k", ")", ")", "if", "sparse", ".", "issparse", "(", "da...
runs enhanced NMF initialization from clusterings (Gong 2013) There are 3 options for init: enhanced - uses EIn-NMF from Gong 2013 basic - uses means for W, assigns H such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1). nmf - uses means for W, and assigns H using the NMF objective while holding W constant.
[ "runs", "enhanced", "NMF", "initialization", "from", "clusterings", "(", "Gong", "2013", ")" ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L110-L148
yjzhang/uncurl_python
uncurl/ensemble.py
nmf_tsne
def nmf_tsne(data, k, n_runs=10, init='enhanced', **params): """ runs tsne-consensus-NMF 1. run a bunch of NMFs, get W and H 2. run tsne + km on all WH matrices 3. run consensus clustering on all km results 4. use consensus clustering as initialization for a new run of NMF 5. return the W and H from the resulting NMF run """ clusters = [] nmf = NMF(k) tsne = TSNE(2) km = KMeans(k) for i in range(n_runs): w = nmf.fit_transform(data) h = nmf.components_ tsne_wh = tsne.fit_transform(w.dot(h).T) clust = km.fit_predict(tsne_wh) clusters.append(clust) clusterings = np.vstack(clusters) consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k) nmf_new = NMF(k, init='custom') # TODO: find an initialization for the consensus W and H init_w, init_h = nmf_init(data, consensus, k, init) W = nmf_new.fit_transform(data, W=init_w, H=init_h) H = nmf_new.components_ return W, H
python
def nmf_tsne(data, k, n_runs=10, init='enhanced', **params): """ runs tsne-consensus-NMF 1. run a bunch of NMFs, get W and H 2. run tsne + km on all WH matrices 3. run consensus clustering on all km results 4. use consensus clustering as initialization for a new run of NMF 5. return the W and H from the resulting NMF run """ clusters = [] nmf = NMF(k) tsne = TSNE(2) km = KMeans(k) for i in range(n_runs): w = nmf.fit_transform(data) h = nmf.components_ tsne_wh = tsne.fit_transform(w.dot(h).T) clust = km.fit_predict(tsne_wh) clusters.append(clust) clusterings = np.vstack(clusters) consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k) nmf_new = NMF(k, init='custom') # TODO: find an initialization for the consensus W and H init_w, init_h = nmf_init(data, consensus, k, init) W = nmf_new.fit_transform(data, W=init_w, H=init_h) H = nmf_new.components_ return W, H
[ "def", "nmf_tsne", "(", "data", ",", "k", ",", "n_runs", "=", "10", ",", "init", "=", "'enhanced'", ",", "*", "*", "params", ")", ":", "clusters", "=", "[", "]", "nmf", "=", "NMF", "(", "k", ")", "tsne", "=", "TSNE", "(", "2", ")", "km", "=",...
runs tsne-consensus-NMF 1. run a bunch of NMFs, get W and H 2. run tsne + km on all WH matrices 3. run consensus clustering on all km results 4. use consensus clustering as initialization for a new run of NMF 5. return the W and H from the resulting NMF run
[ "runs", "tsne", "-", "consensus", "-", "NMF" ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L150-L177
yjzhang/uncurl_python
uncurl/ensemble.py
poisson_se_multiclust
def poisson_se_multiclust(data, k, n_runs=10, **se_params): """ Initializes state estimation using a consensus of several fast clustering/dimensionality reduction algorithms. It does a consensus of 8 truncated SVD - k-means rounds, and uses the basic nmf_init to create starting points. """ clusters = [] norm_data = cell_normalize(data) if sparse.issparse(data): log_data = data.log1p() log_norm = norm_data.log1p() else: log_data = np.log1p(data) log_norm = np.log1p(norm_data) tsvd_50 = TruncatedSVD(50) tsvd_k = TruncatedSVD(k) km = KMeans(k) tsvd1 = tsvd_50.fit_transform(data.T) tsvd2 = tsvd_k.fit_transform(data.T) tsvd3 = tsvd_50.fit_transform(log_data.T) tsvd4 = tsvd_k.fit_transform(log_data.T) tsvd5 = tsvd_50.fit_transform(norm_data.T) tsvd6 = tsvd_k.fit_transform(norm_data.T) tsvd7 = tsvd_50.fit_transform(log_norm.T) tsvd8 = tsvd_k.fit_transform(log_norm.T) tsvd_results = [tsvd1, tsvd2, tsvd3, tsvd4, tsvd5, tsvd6, tsvd7, tsvd8] clusters = [] for t in tsvd_results: clust = km.fit_predict(t) clusters.append(clust) clusterings = np.vstack(clusters) consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k) init_m, init_w = nmf_init(data, consensus, k, 'basic') M, W, ll = poisson_estimate_state(data, k, init_means=init_m, init_weights=init_w, **se_params) return M, W, ll
python
def poisson_se_multiclust(data, k, n_runs=10, **se_params): """ Initializes state estimation using a consensus of several fast clustering/dimensionality reduction algorithms. It does a consensus of 8 truncated SVD - k-means rounds, and uses the basic nmf_init to create starting points. """ clusters = [] norm_data = cell_normalize(data) if sparse.issparse(data): log_data = data.log1p() log_norm = norm_data.log1p() else: log_data = np.log1p(data) log_norm = np.log1p(norm_data) tsvd_50 = TruncatedSVD(50) tsvd_k = TruncatedSVD(k) km = KMeans(k) tsvd1 = tsvd_50.fit_transform(data.T) tsvd2 = tsvd_k.fit_transform(data.T) tsvd3 = tsvd_50.fit_transform(log_data.T) tsvd4 = tsvd_k.fit_transform(log_data.T) tsvd5 = tsvd_50.fit_transform(norm_data.T) tsvd6 = tsvd_k.fit_transform(norm_data.T) tsvd7 = tsvd_50.fit_transform(log_norm.T) tsvd8 = tsvd_k.fit_transform(log_norm.T) tsvd_results = [tsvd1, tsvd2, tsvd3, tsvd4, tsvd5, tsvd6, tsvd7, tsvd8] clusters = [] for t in tsvd_results: clust = km.fit_predict(t) clusters.append(clust) clusterings = np.vstack(clusters) consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k) init_m, init_w = nmf_init(data, consensus, k, 'basic') M, W, ll = poisson_estimate_state(data, k, init_means=init_m, init_weights=init_w, **se_params) return M, W, ll
[ "def", "poisson_se_multiclust", "(", "data", ",", "k", ",", "n_runs", "=", "10", ",", "*", "*", "se_params", ")", ":", "clusters", "=", "[", "]", "norm_data", "=", "cell_normalize", "(", "data", ")", "if", "sparse", ".", "issparse", "(", "data", ")", ...
Initializes state estimation using a consensus of several fast clustering/dimensionality reduction algorithms. It does a consensus of 8 truncated SVD - k-means rounds, and uses the basic nmf_init to create starting points.
[ "Initializes", "state", "estimation", "using", "a", "consensus", "of", "several", "fast", "clustering", "/", "dimensionality", "reduction", "algorithms", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L197-L233
yjzhang/uncurl_python
uncurl/ensemble.py
poisson_consensus_se
def poisson_consensus_se(data, k, n_runs=10, **se_params): """ Initializes Poisson State Estimation using a consensus Poisson clustering. """ clusters = [] for i in range(n_runs): assignments, means = poisson_cluster(data, k) clusters.append(assignments) clusterings = np.vstack(clusters) consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k) init_m, init_w = nmf_init(data, consensus, k, 'basic') M, W, ll = poisson_estimate_state(data, k, init_means=init_m, init_weights=init_w, **se_params) return M, W, ll
python
def poisson_consensus_se(data, k, n_runs=10, **se_params): """ Initializes Poisson State Estimation using a consensus Poisson clustering. """ clusters = [] for i in range(n_runs): assignments, means = poisson_cluster(data, k) clusters.append(assignments) clusterings = np.vstack(clusters) consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k) init_m, init_w = nmf_init(data, consensus, k, 'basic') M, W, ll = poisson_estimate_state(data, k, init_means=init_m, init_weights=init_w, **se_params) return M, W, ll
[ "def", "poisson_consensus_se", "(", "data", ",", "k", ",", "n_runs", "=", "10", ",", "*", "*", "se_params", ")", ":", "clusters", "=", "[", "]", "for", "i", "in", "range", "(", "n_runs", ")", ":", "assignments", ",", "means", "=", "poisson_cluster", ...
Initializes Poisson State Estimation using a consensus Poisson clustering.
[ "Initializes", "Poisson", "State", "Estimation", "using", "a", "consensus", "Poisson", "clustering", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L235-L247
yjzhang/uncurl_python
uncurl/ensemble.py
lensNMF
def lensNMF(data, k, ks=1): """ Runs L-EnsNMF on the data. (Suh et al. 2016) """ # TODO: why is this not working n_rounds = k/ks R_i = data.copy() nmf = NMF(ks) nmf2 = NMF(ks, init='custom') w_is = [] h_is = [] rs = [] w_i = np.zeros((data.shape[0], ks)) h_i = np.zeros((ks, data.shape[1])) for i in range(n_rounds): R_i = R_i - w_i.dot(h_i) R_i[R_i < 0] = 0 """ P_r = R_i.sum(1)/R_i.sum() print(P_r.shape) P_c = R_i.sum(0)/R_i.sum() print(P_c.shape) row_choice = np.random.choice(range(len(P_r)), p=P_r) print(row_choice) col_choice = np.random.choice(range(len(P_c)), p=P_c) print(col_choice) D_r = cosine_similarity(data[row_choice:row_choice+1,:], data) D_c = cosine_similarity(data[:,col_choice:col_choice+1].T, data.T) D_r = np.diag(D_r.flatten()) D_c = np.diag(D_c.flatten()) R_L = D_r.dot(R_i).dot(D_c) w_i = nmf.fit_transform(R_L) """ w_i = nmf.fit_transform(R_i) h_i = nmf.components_ #nmf2.fit_transform(R_i, W=w_i, H=nmf.components_) #h_i = nmf2.components_ #h_i[h_i < 0] = 0 w_is.append(w_i) h_is.append(h_i) rs.append(R_i) return np.hstack(w_is), np.vstack(h_is), rs
python
def lensNMF(data, k, ks=1): """ Runs L-EnsNMF on the data. (Suh et al. 2016) """ # TODO: why is this not working n_rounds = k/ks R_i = data.copy() nmf = NMF(ks) nmf2 = NMF(ks, init='custom') w_is = [] h_is = [] rs = [] w_i = np.zeros((data.shape[0], ks)) h_i = np.zeros((ks, data.shape[1])) for i in range(n_rounds): R_i = R_i - w_i.dot(h_i) R_i[R_i < 0] = 0 """ P_r = R_i.sum(1)/R_i.sum() print(P_r.shape) P_c = R_i.sum(0)/R_i.sum() print(P_c.shape) row_choice = np.random.choice(range(len(P_r)), p=P_r) print(row_choice) col_choice = np.random.choice(range(len(P_c)), p=P_c) print(col_choice) D_r = cosine_similarity(data[row_choice:row_choice+1,:], data) D_c = cosine_similarity(data[:,col_choice:col_choice+1].T, data.T) D_r = np.diag(D_r.flatten()) D_c = np.diag(D_c.flatten()) R_L = D_r.dot(R_i).dot(D_c) w_i = nmf.fit_transform(R_L) """ w_i = nmf.fit_transform(R_i) h_i = nmf.components_ #nmf2.fit_transform(R_i, W=w_i, H=nmf.components_) #h_i = nmf2.components_ #h_i[h_i < 0] = 0 w_is.append(w_i) h_is.append(h_i) rs.append(R_i) return np.hstack(w_is), np.vstack(h_is), rs
[ "def", "lensNMF", "(", "data", ",", "k", ",", "ks", "=", "1", ")", ":", "# TODO: why is this not working", "n_rounds", "=", "k", "/", "ks", "R_i", "=", "data", ".", "copy", "(", ")", "nmf", "=", "NMF", "(", "ks", ")", "nmf2", "=", "NMF", "(", "ks...
Runs L-EnsNMF on the data. (Suh et al. 2016)
[ "Runs", "L", "-", "EnsNMF", "on", "the", "data", ".", "(", "Suh", "et", "al", ".", "2016", ")" ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L274-L315
moonso/loqusdb
loqusdb/utils/case.py
get_case
def get_case(family_lines, family_type='ped', vcf_path=None): """Return ped_parser case from a family file Create a dictionary with case data. If no family file is given create from VCF Args: family_lines (iterator): The family lines family_type (str): The format of the family lines vcf_path(str): Path to VCF Returns: family (Family): A ped_parser family object """ family = None LOG.info("Parsing family information") family_parser = FamilyParser(family_lines, family_type) families = list(family_parser.families.keys()) LOG.info("Found families {0}".format(', '.join(families))) if len(families) > 1: raise CaseError("Only one family per load can be used") family = family_parser.families[families[0]] return family
python
def get_case(family_lines, family_type='ped', vcf_path=None): """Return ped_parser case from a family file Create a dictionary with case data. If no family file is given create from VCF Args: family_lines (iterator): The family lines family_type (str): The format of the family lines vcf_path(str): Path to VCF Returns: family (Family): A ped_parser family object """ family = None LOG.info("Parsing family information") family_parser = FamilyParser(family_lines, family_type) families = list(family_parser.families.keys()) LOG.info("Found families {0}".format(', '.join(families))) if len(families) > 1: raise CaseError("Only one family per load can be used") family = family_parser.families[families[0]] return family
[ "def", "get_case", "(", "family_lines", ",", "family_type", "=", "'ped'", ",", "vcf_path", "=", "None", ")", ":", "family", "=", "None", "LOG", ".", "info", "(", "\"Parsing family information\"", ")", "family_parser", "=", "FamilyParser", "(", "family_lines", ...
Return ped_parser case from a family file Create a dictionary with case data. If no family file is given create from VCF Args: family_lines (iterator): The family lines family_type (str): The format of the family lines vcf_path(str): Path to VCF Returns: family (Family): A ped_parser family object
[ "Return", "ped_parser", "case", "from", "a", "family", "file", "Create", "a", "dictionary", "with", "case", "data", ".", "If", "no", "family", "file", "is", "given", "create", "from", "VCF", "Args", ":", "family_lines", "(", "iterator", ")", ":", "The", ...
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/case.py#L11-L38
moonso/loqusdb
loqusdb/utils/case.py
update_case
def update_case(case_obj, existing_case): """Update an existing case This will add paths to VCF files, individuals etc Args: case_obj(models.Case) existing_case(models.Case) Returns: updated_case(models.Case): Updated existing case """ variant_nrs = ['nr_variants', 'nr_sv_variants'] individuals = [('individuals','_inds'), ('sv_individuals','_sv_inds')] updated_case = deepcopy(existing_case) for i,file_name in enumerate(['vcf_path','vcf_sv_path']): variant_type = 'snv' if file_name == 'vcf_sv_path': variant_type = 'sv' if case_obj.get(file_name): if updated_case.get(file_name): LOG.warning("VCF of type %s already exists in case", variant_type) raise CaseError("Can not replace VCF in existing case") else: updated_case[file_name] = case_obj[file_name] updated_case[variant_nrs[i]] = case_obj[variant_nrs[i]] updated_case[individuals[i][0]] = case_obj[individuals[i][0]] updated_case[individuals[i][1]] = case_obj[individuals[i][1]] return updated_case
python
def update_case(case_obj, existing_case): """Update an existing case This will add paths to VCF files, individuals etc Args: case_obj(models.Case) existing_case(models.Case) Returns: updated_case(models.Case): Updated existing case """ variant_nrs = ['nr_variants', 'nr_sv_variants'] individuals = [('individuals','_inds'), ('sv_individuals','_sv_inds')] updated_case = deepcopy(existing_case) for i,file_name in enumerate(['vcf_path','vcf_sv_path']): variant_type = 'snv' if file_name == 'vcf_sv_path': variant_type = 'sv' if case_obj.get(file_name): if updated_case.get(file_name): LOG.warning("VCF of type %s already exists in case", variant_type) raise CaseError("Can not replace VCF in existing case") else: updated_case[file_name] = case_obj[file_name] updated_case[variant_nrs[i]] = case_obj[variant_nrs[i]] updated_case[individuals[i][0]] = case_obj[individuals[i][0]] updated_case[individuals[i][1]] = case_obj[individuals[i][1]] return updated_case
[ "def", "update_case", "(", "case_obj", ",", "existing_case", ")", ":", "variant_nrs", "=", "[", "'nr_variants'", ",", "'nr_sv_variants'", "]", "individuals", "=", "[", "(", "'individuals'", ",", "'_inds'", ")", ",", "(", "'sv_individuals'", ",", "'_sv_inds'", ...
Update an existing case This will add paths to VCF files, individuals etc Args: case_obj(models.Case) existing_case(models.Case) Returns: updated_case(models.Case): Updated existing case
[ "Update", "an", "existing", "case", "This", "will", "add", "paths", "to", "VCF", "files", "individuals", "etc", "Args", ":", "case_obj", "(", "models", ".", "Case", ")", "existing_case", "(", "models", ".", "Case", ")", "Returns", ":", "updated_case", "(",...
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/case.py#L40-L71
fbergmann/libSEDML
examples/python/print_sedml.py
main
def main (args): """Usage: print_sedml input-filename """ if len(args) != 2: print(main.__doc__) sys.exit(1) doc = libsedml.readSedML(args[1]); if ( doc.getErrorLog().getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_ERROR) > 0): print doc.getErrorLog().toString(); sys.exit(2); print 'The document has {0}" simulation(s).'.format(doc.getNumSimulations()); for i in range(0, doc.getNumSimulations()): current = doc.getSimulation(i); if (current.getTypeCode() == libsedml.SEDML_SIMULATION_UNIFORMTIMECOURSE): tc = current; kisaoid="none" if tc.isSetAlgorithm(): kisaoid=tc.getAlgorithm().getKisaoID() print "\tTimecourse id=", tc.getId()," start=",tc.getOutputStartTime()," end=",tc.getOutputEndTime()," numPoints=",tc.getNumberOfPoints()," kisao=",kisaoid,"\n"; else: print "\tUncountered unknown simulation. ",current.getId(),"\n"; print "\n" print "The document has ",doc.getNumModels() , " model(s)." , "\n"; for i in range(0,doc.getNumModels()): current = doc.getModel(i); print "\tModel id=" , current.getId() , " language=" , current.getLanguage() , " source=" , current.getSource() , " numChanges=" , current.getNumChanges() , "\n"; print "\n"; print "The document has " , doc.getNumTasks() , " task(s)." , "\n"; for i in range(0,doc.getNumTasks()): current = doc.getTask(i); print "\tTask id=" , current.getId() , " model=" , current.getModelReference() , " sim=" , current.getSimulationReference() , "\n"; print "\n"; print "The document has " , doc.getNumDataGenerators() , " datagenerators(s)." , "\n"; for i in range( 0, doc.getNumDataGenerators()): current = doc.getDataGenerator(i); print "\tDG id=" , current.getId() , " math=" , libsedml.formulaToString(current.getMath()) , "\n"; print "\n"; print "The document has " , doc.getNumOutputs() , " output(s)." , "\n"; for i in range (0, doc.getNumOutputs()): current = doc.getOutput(i); tc = current.getTypeCode(); if tc == libsedml.SEDML_OUTPUT_REPORT: r = (current); print "\tReport id=" , current.getId() , " numDataSets=" , r.getNumDataSets() , "\n"; elif tc == libsedml.SEDML_OUTPUT_PLOT2D: p = (current); print "\tPlot2d id=" , current.getId() , " numCurves=" , p.getNumCurves() , "\n"; elif tc == libsedml.SEDML_OUTPUT_PLOT3D: p = (current); print "\tPlot3d id=" , current.getId() , " numSurfaces=" , p.getNumSurfaces() , "\n"; else: print "\tEncountered unknown output " , current.getId() , "\n";
python
def main (args): """Usage: print_sedml input-filename """ if len(args) != 2: print(main.__doc__) sys.exit(1) doc = libsedml.readSedML(args[1]); if ( doc.getErrorLog().getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_ERROR) > 0): print doc.getErrorLog().toString(); sys.exit(2); print 'The document has {0}" simulation(s).'.format(doc.getNumSimulations()); for i in range(0, doc.getNumSimulations()): current = doc.getSimulation(i); if (current.getTypeCode() == libsedml.SEDML_SIMULATION_UNIFORMTIMECOURSE): tc = current; kisaoid="none" if tc.isSetAlgorithm(): kisaoid=tc.getAlgorithm().getKisaoID() print "\tTimecourse id=", tc.getId()," start=",tc.getOutputStartTime()," end=",tc.getOutputEndTime()," numPoints=",tc.getNumberOfPoints()," kisao=",kisaoid,"\n"; else: print "\tUncountered unknown simulation. ",current.getId(),"\n"; print "\n" print "The document has ",doc.getNumModels() , " model(s)." , "\n"; for i in range(0,doc.getNumModels()): current = doc.getModel(i); print "\tModel id=" , current.getId() , " language=" , current.getLanguage() , " source=" , current.getSource() , " numChanges=" , current.getNumChanges() , "\n"; print "\n"; print "The document has " , doc.getNumTasks() , " task(s)." , "\n"; for i in range(0,doc.getNumTasks()): current = doc.getTask(i); print "\tTask id=" , current.getId() , " model=" , current.getModelReference() , " sim=" , current.getSimulationReference() , "\n"; print "\n"; print "The document has " , doc.getNumDataGenerators() , " datagenerators(s)." , "\n"; for i in range( 0, doc.getNumDataGenerators()): current = doc.getDataGenerator(i); print "\tDG id=" , current.getId() , " math=" , libsedml.formulaToString(current.getMath()) , "\n"; print "\n"; print "The document has " , doc.getNumOutputs() , " output(s)." , "\n"; for i in range (0, doc.getNumOutputs()): current = doc.getOutput(i); tc = current.getTypeCode(); if tc == libsedml.SEDML_OUTPUT_REPORT: r = (current); print "\tReport id=" , current.getId() , " numDataSets=" , r.getNumDataSets() , "\n"; elif tc == libsedml.SEDML_OUTPUT_PLOT2D: p = (current); print "\tPlot2d id=" , current.getId() , " numCurves=" , p.getNumCurves() , "\n"; elif tc == libsedml.SEDML_OUTPUT_PLOT3D: p = (current); print "\tPlot3d id=" , current.getId() , " numSurfaces=" , p.getNumSurfaces() , "\n"; else: print "\tEncountered unknown output " , current.getId() , "\n";
[ "def", "main", "(", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "2", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", "1", ")", "doc", "=", "libsedml", ".", "readSedML", "(", "args", "[", "1", "]", ")", "if"...
Usage: print_sedml input-filename
[ "Usage", ":", "print_sedml", "input", "-", "filename" ]
train
https://github.com/fbergmann/libSEDML/blob/2611274d993cb92c663f8f0296896a6e441f75fd/examples/python/print_sedml.py#L40-L97
bachya/py17track
py17track/profile.py
Profile.login
async def login(self, email: str, password: str) -> bool: """Login to the profile.""" login_resp = await self._request( 'post', API_URL_USER, json={ 'version': '1.0', 'method': 'Signin', 'param': { 'Email': email, 'Password': password, 'CaptchaCode': '' }, 'sourcetype': 0 }) _LOGGER.debug('Login response: %s', login_resp) if login_resp.get('Code') != 0: return False self.account_id = login_resp['Json']['gid'] return True
python
async def login(self, email: str, password: str) -> bool: """Login to the profile.""" login_resp = await self._request( 'post', API_URL_USER, json={ 'version': '1.0', 'method': 'Signin', 'param': { 'Email': email, 'Password': password, 'CaptchaCode': '' }, 'sourcetype': 0 }) _LOGGER.debug('Login response: %s', login_resp) if login_resp.get('Code') != 0: return False self.account_id = login_resp['Json']['gid'] return True
[ "async", "def", "login", "(", "self", ",", "email", ":", "str", ",", "password", ":", "str", ")", "->", "bool", ":", "login_resp", "=", "await", "self", ".", "_request", "(", "'post'", ",", "API_URL_USER", ",", "json", "=", "{", "'version'", ":", "'1...
Login to the profile.
[ "Login", "to", "the", "profile", "." ]
train
https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/profile.py#L22-L45
bachya/py17track
py17track/profile.py
Profile.packages
async def packages( self, package_state: Union[int, str] = '', show_archived: bool = False) -> list: """Get the list of packages associated with the account.""" packages_resp = await self._request( 'post', API_URL_BUYER, json={ 'version': '1.0', 'method': 'GetTrackInfoList', 'param': { 'IsArchived': show_archived, 'Item': '', 'Page': 1, 'PerPage': 40, 'PackageState': package_state, 'Sequence': '0' }, 'sourcetype': 0 }) _LOGGER.debug('Packages response: %s', packages_resp) packages = [] for package in packages_resp.get('Json', []): last_event = package.get('FLastEvent') if last_event: event = json.loads(last_event) else: event = {} kwargs = { 'destination_country': package.get('FSecondCountry', 0), 'friendly_name': package.get('FRemark'), 'info_text': event.get('z'), 'location': event.get('c'), 'origin_country': package.get('FFirstCountry', 0), 'package_type': package.get('FTrackStateType', 0), 'status': package.get('FPackageState', 0) } packages.append(Package(package['FTrackNo'], **kwargs)) return packages
python
async def packages( self, package_state: Union[int, str] = '', show_archived: bool = False) -> list: """Get the list of packages associated with the account.""" packages_resp = await self._request( 'post', API_URL_BUYER, json={ 'version': '1.0', 'method': 'GetTrackInfoList', 'param': { 'IsArchived': show_archived, 'Item': '', 'Page': 1, 'PerPage': 40, 'PackageState': package_state, 'Sequence': '0' }, 'sourcetype': 0 }) _LOGGER.debug('Packages response: %s', packages_resp) packages = [] for package in packages_resp.get('Json', []): last_event = package.get('FLastEvent') if last_event: event = json.loads(last_event) else: event = {} kwargs = { 'destination_country': package.get('FSecondCountry', 0), 'friendly_name': package.get('FRemark'), 'info_text': event.get('z'), 'location': event.get('c'), 'origin_country': package.get('FFirstCountry', 0), 'package_type': package.get('FTrackStateType', 0), 'status': package.get('FPackageState', 0) } packages.append(Package(package['FTrackNo'], **kwargs)) return packages
[ "async", "def", "packages", "(", "self", ",", "package_state", ":", "Union", "[", "int", ",", "str", "]", "=", "''", ",", "show_archived", ":", "bool", "=", "False", ")", "->", "list", ":", "packages_resp", "=", "await", "self", ".", "_request", "(", ...
Get the list of packages associated with the account.
[ "Get", "the", "list", "of", "packages", "associated", "with", "the", "account", "." ]
train
https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/profile.py#L47-L88
bachya/py17track
py17track/profile.py
Profile.summary
async def summary(self, show_archived: bool = False) -> dict: """Get a quick summary of how many packages are in an account.""" summary_resp = await self._request( 'post', API_URL_BUYER, json={ 'version': '1.0', 'method': 'GetIndexData', 'param': { 'IsArchived': show_archived }, 'sourcetype': 0 }) _LOGGER.debug('Summary response: %s', summary_resp) results = {} for kind in summary_resp.get('Json', {}).get('eitem', []): results[PACKAGE_STATUS_MAP[kind['e']]] = kind['ec'] return results
python
async def summary(self, show_archived: bool = False) -> dict: """Get a quick summary of how many packages are in an account.""" summary_resp = await self._request( 'post', API_URL_BUYER, json={ 'version': '1.0', 'method': 'GetIndexData', 'param': { 'IsArchived': show_archived }, 'sourcetype': 0 }) _LOGGER.debug('Summary response: %s', summary_resp) results = {} for kind in summary_resp.get('Json', {}).get('eitem', []): results[PACKAGE_STATUS_MAP[kind['e']]] = kind['ec'] return results
[ "async", "def", "summary", "(", "self", ",", "show_archived", ":", "bool", "=", "False", ")", "->", "dict", ":", "summary_resp", "=", "await", "self", ".", "_request", "(", "'post'", ",", "API_URL_BUYER", ",", "json", "=", "{", "'version'", ":", "'1.0'",...
Get a quick summary of how many packages are in an account.
[ "Get", "a", "quick", "summary", "of", "how", "many", "packages", "are", "in", "an", "account", "." ]
train
https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/profile.py#L90-L109
markperdue/pyvesync
home_assistant/custom_components/switch.py
setup_platform
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the VeSync switch platform.""" if discovery_info is None: return switches = [] manager = hass.data[DOMAIN]['manager'] if manager.outlets is not None and manager.outlets: if len(manager.outlets) == 1: count_string = 'switch' else: count_string = 'switches' _LOGGER.info("Discovered %d VeSync %s", len(manager.outlets), count_string) if len(manager.outlets) > 1: for switch in manager.outlets: switch._energy_update_interval = ENERGY_UPDATE_INT switches.append(VeSyncSwitchHA(switch)) _LOGGER.info("Added a VeSync switch named '%s'", switch.device_name) else: switches.append(VeSyncSwitchHA(manager.outlets)) else: _LOGGER.info("No VeSync switches found") add_entities(switches)
python
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the VeSync switch platform.""" if discovery_info is None: return switches = [] manager = hass.data[DOMAIN]['manager'] if manager.outlets is not None and manager.outlets: if len(manager.outlets) == 1: count_string = 'switch' else: count_string = 'switches' _LOGGER.info("Discovered %d VeSync %s", len(manager.outlets), count_string) if len(manager.outlets) > 1: for switch in manager.outlets: switch._energy_update_interval = ENERGY_UPDATE_INT switches.append(VeSyncSwitchHA(switch)) _LOGGER.info("Added a VeSync switch named '%s'", switch.device_name) else: switches.append(VeSyncSwitchHA(manager.outlets)) else: _LOGGER.info("No VeSync switches found") add_entities(switches)
[ "def", "setup_platform", "(", "hass", ",", "config", ",", "add_entities", ",", "discovery_info", "=", "None", ")", ":", "if", "discovery_info", "is", "None", ":", "return", "switches", "=", "[", "]", "manager", "=", "hass", ".", "data", "[", "DOMAIN", "]...
Set up the VeSync switch platform.
[ "Set", "up", "the", "VeSync", "switch", "platform", "." ]
train
https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/home_assistant/custom_components/switch.py#L12-L41
markperdue/pyvesync
home_assistant/custom_components/switch.py
VeSyncSwitchHA.device_state_attributes
def device_state_attributes(self): """Return the state attributes of the device.""" attr = {} attr['active_time'] = self.smartplug.active_time attr['voltage'] = self.smartplug.voltage attr['active_time'] = self.smartplug.active_time attr['weekly_energy_total'] = self.smartplug.weekly_energy_total attr['monthly_energy_total'] = self.smartplug.monthly_energy_total attr['yearly_energy_total'] = self.smartplug.yearly_energy_total return attr
python
def device_state_attributes(self): """Return the state attributes of the device.""" attr = {} attr['active_time'] = self.smartplug.active_time attr['voltage'] = self.smartplug.voltage attr['active_time'] = self.smartplug.active_time attr['weekly_energy_total'] = self.smartplug.weekly_energy_total attr['monthly_energy_total'] = self.smartplug.monthly_energy_total attr['yearly_energy_total'] = self.smartplug.yearly_energy_total return attr
[ "def", "device_state_attributes", "(", "self", ")", ":", "attr", "=", "{", "}", "attr", "[", "'active_time'", "]", "=", "self", ".", "smartplug", ".", "active_time", "attr", "[", "'voltage'", "]", "=", "self", ".", "smartplug", ".", "voltage", "attr", "[...
Return the state attributes of the device.
[ "Return", "the", "state", "attributes", "of", "the", "device", "." ]
train
https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/home_assistant/custom_components/switch.py#L62-L71
moonso/loqusdb
loqusdb/plugins/mongo/variant.py
VariantMixin._get_update
def _get_update(self, variant): """Convert a variant to a proper update Args: variant(dict) Returns: update(dict) """ update = { '$inc': { 'homozygote': variant.get('homozygote', 0), 'hemizygote': variant.get('hemizygote', 0), 'observations': 1 }, '$set': { 'chrom': variant.get('chrom'), 'start': variant.get('pos'), 'end': variant.get('end'), 'ref': variant.get('ref'), 'alt': variant.get('alt'), } } if variant.get('case_id'): update['$push'] = { 'families': { '$each': [variant.get('case_id')], '$slice': -50 } } return update
python
def _get_update(self, variant): """Convert a variant to a proper update Args: variant(dict) Returns: update(dict) """ update = { '$inc': { 'homozygote': variant.get('homozygote', 0), 'hemizygote': variant.get('hemizygote', 0), 'observations': 1 }, '$set': { 'chrom': variant.get('chrom'), 'start': variant.get('pos'), 'end': variant.get('end'), 'ref': variant.get('ref'), 'alt': variant.get('alt'), } } if variant.get('case_id'): update['$push'] = { 'families': { '$each': [variant.get('case_id')], '$slice': -50 } } return update
[ "def", "_get_update", "(", "self", ",", "variant", ")", ":", "update", "=", "{", "'$inc'", ":", "{", "'homozygote'", ":", "variant", ".", "get", "(", "'homozygote'", ",", "0", ")", ",", "'hemizygote'", ":", "variant", ".", "get", "(", "'hemizygote'", "...
Convert a variant to a proper update Args: variant(dict) Returns: update(dict)
[ "Convert", "a", "variant", "to", "a", "proper", "update", "Args", ":", "variant", "(", "dict", ")", "Returns", ":", "update", "(", "dict", ")" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L14-L44
moonso/loqusdb
loqusdb/plugins/mongo/variant.py
VariantMixin.add_variant
def add_variant(self, variant): """Add a variant to the variant collection If the variant exists we update the count else we insert a new variant object. Args: variant (dict): A variant dictionary """ LOG.debug("Upserting variant: {0}".format(variant.get('_id'))) update = self._get_update(variant) message = self.db.variant.update_one( {'_id': variant['_id']}, update, upsert=True ) if message.modified_count == 1: LOG.debug("Variant %s was updated", variant.get('_id')) else: LOG.debug("Variant was added to database for first time") return
python
def add_variant(self, variant): """Add a variant to the variant collection If the variant exists we update the count else we insert a new variant object. Args: variant (dict): A variant dictionary """ LOG.debug("Upserting variant: {0}".format(variant.get('_id'))) update = self._get_update(variant) message = self.db.variant.update_one( {'_id': variant['_id']}, update, upsert=True ) if message.modified_count == 1: LOG.debug("Variant %s was updated", variant.get('_id')) else: LOG.debug("Variant was added to database for first time") return
[ "def", "add_variant", "(", "self", ",", "variant", ")", ":", "LOG", ".", "debug", "(", "\"Upserting variant: {0}\"", ".", "format", "(", "variant", ".", "get", "(", "'_id'", ")", ")", ")", "update", "=", "self", ".", "_get_update", "(", "variant", ")", ...
Add a variant to the variant collection If the variant exists we update the count else we insert a new variant object. Args: variant (dict): A variant dictionary
[ "Add", "a", "variant", "to", "the", "variant", "collection", "If", "the", "variant", "exists", "we", "update", "the", "count", "else", "we", "insert", "a", "new", "variant", "object", ".", "Args", ":", "variant", "(", "dict", ")", ":", "A", "variant", ...
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L46-L68