repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
bosth/plpygis
plpygis/geometry.py
Geometry.from_shapely
def from_shapely(sgeom, srid=None): """ Create a Geometry from a Shapely geometry and the specified SRID. The Shapely geometry will not be modified. """ if SHAPELY: WKBWriter.defaults["include_srid"] = True if srid: lgeos.GEOSSetSRID(sgeom._geom, srid) return Geometry(sgeom.wkb_hex) else: raise DependencyError("Shapely")
python
def from_shapely(sgeom, srid=None): """ Create a Geometry from a Shapely geometry and the specified SRID. The Shapely geometry will not be modified. """ if SHAPELY: WKBWriter.defaults["include_srid"] = True if srid: lgeos.GEOSSetSRID(sgeom._geom, srid) return Geometry(sgeom.wkb_hex) else: raise DependencyError("Shapely")
[ "def", "from_shapely", "(", "sgeom", ",", "srid", "=", "None", ")", ":", "if", "SHAPELY", ":", "WKBWriter", ".", "defaults", "[", "\"include_srid\"", "]", "=", "True", "if", "srid", ":", "lgeos", ".", "GEOSSetSRID", "(", "sgeom", ".", "_geom", ",", "sr...
Create a Geometry from a Shapely geometry and the specified SRID. The Shapely geometry will not be modified.
[ "Create", "a", "Geometry", "from", "a", "Shapely", "geometry", "and", "the", "specified", "SRID", "." ]
train
https://github.com/bosth/plpygis/blob/9469cc469df4c8cd407de158903d5465cda804ea/plpygis/geometry.py#L105-L117
bosth/plpygis
plpygis/geometry.py
Geometry.wkb
def wkb(self): """ Get the geometry as an (E)WKB. """ return self._to_wkb(use_srid=True, dimz=self.dimz, dimm=self.dimm)
python
def wkb(self): """ Get the geometry as an (E)WKB. """ return self._to_wkb(use_srid=True, dimz=self.dimz, dimm=self.dimm)
[ "def", "wkb", "(", "self", ")", ":", "return", "self", ".", "_to_wkb", "(", "use_srid", "=", "True", ",", "dimz", "=", "self", ".", "dimz", ",", "dimm", "=", "self", ".", "dimm", ")" ]
Get the geometry as an (E)WKB.
[ "Get", "the", "geometry", "as", "an", "(", "E", ")", "WKB", "." ]
train
https://github.com/bosth/plpygis/blob/9469cc469df4c8cd407de158903d5465cda804ea/plpygis/geometry.py#L154-L158
bosth/plpygis
plpygis/geometry.py
Geometry.postgis_type
def postgis_type(self): """ Get the type of the geometry in PostGIS format, including additional dimensions and SRID if they exist. """ dimz = "Z" if self.dimz else "" dimm = "M" if self.dimm else "" if self.srid: return "geometry({}{}{},{})".format(self.type, dimz, dimm, self.srid) else: return "geometry({}{}{})".format(self.type, dimz, dimm)
python
def postgis_type(self): """ Get the type of the geometry in PostGIS format, including additional dimensions and SRID if they exist. """ dimz = "Z" if self.dimz else "" dimm = "M" if self.dimm else "" if self.srid: return "geometry({}{}{},{})".format(self.type, dimz, dimm, self.srid) else: return "geometry({}{}{})".format(self.type, dimz, dimm)
[ "def", "postgis_type", "(", "self", ")", ":", "dimz", "=", "\"Z\"", "if", "self", ".", "dimz", "else", "\"\"", "dimm", "=", "\"M\"", "if", "self", ".", "dimm", "else", "\"\"", "if", "self", ".", "srid", ":", "return", "\"geometry({}{}{},{})\"", ".", "f...
Get the type of the geometry in PostGIS format, including additional dimensions and SRID if they exist.
[ "Get", "the", "type", "of", "the", "geometry", "in", "PostGIS", "format", "including", "additional", "dimensions", "and", "SRID", "if", "they", "exist", "." ]
train
https://github.com/bosth/plpygis/blob/9469cc469df4c8cd407de158903d5465cda804ea/plpygis/geometry.py#L177-L187
yjzhang/uncurl_python
uncurl/pois_ll.py
poisson_ll
def poisson_ll(data, means): """ Calculates the Poisson log-likelihood. Args: data (array): 2d numpy array of genes x cells means (array): 2d numpy array of genes x k Returns: cells x k array of log-likelihood for each cell/cluster pair """ if sparse.issparse(data): return sparse_poisson_ll(data, means) genes, cells = data.shape clusters = means.shape[1] ll = np.zeros((cells, clusters)) for i in range(clusters): means_i = np.tile(means[:,i], (cells, 1)) means_i = means_i.transpose() + eps #ll[:,i] = np.sum(xlogy(data, means_i) - gammaln(data+1) - means_i, 0) ll[:,i] = np.sum(xlogy(data, means_i) - means_i, 0) return ll
python
def poisson_ll(data, means): """ Calculates the Poisson log-likelihood. Args: data (array): 2d numpy array of genes x cells means (array): 2d numpy array of genes x k Returns: cells x k array of log-likelihood for each cell/cluster pair """ if sparse.issparse(data): return sparse_poisson_ll(data, means) genes, cells = data.shape clusters = means.shape[1] ll = np.zeros((cells, clusters)) for i in range(clusters): means_i = np.tile(means[:,i], (cells, 1)) means_i = means_i.transpose() + eps #ll[:,i] = np.sum(xlogy(data, means_i) - gammaln(data+1) - means_i, 0) ll[:,i] = np.sum(xlogy(data, means_i) - means_i, 0) return ll
[ "def", "poisson_ll", "(", "data", ",", "means", ")", ":", "if", "sparse", ".", "issparse", "(", "data", ")", ":", "return", "sparse_poisson_ll", "(", "data", ",", "means", ")", "genes", ",", "cells", "=", "data", ".", "shape", "clusters", "=", "means",...
Calculates the Poisson log-likelihood. Args: data (array): 2d numpy array of genes x cells means (array): 2d numpy array of genes x k Returns: cells x k array of log-likelihood for each cell/cluster pair
[ "Calculates", "the", "Poisson", "log", "-", "likelihood", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/pois_ll.py#L22-L43
yjzhang/uncurl_python
uncurl/pois_ll.py
poisson_ll_2
def poisson_ll_2(p1, p2): """ Calculates Poisson LL(p1|p2). """ p1_1 = p1 + eps p2_1 = p2 + eps return np.sum(-p2_1 + p1_1*np.log(p2_1))
python
def poisson_ll_2(p1, p2): """ Calculates Poisson LL(p1|p2). """ p1_1 = p1 + eps p2_1 = p2 + eps return np.sum(-p2_1 + p1_1*np.log(p2_1))
[ "def", "poisson_ll_2", "(", "p1", ",", "p2", ")", ":", "p1_1", "=", "p1", "+", "eps", "p2_1", "=", "p2", "+", "eps", "return", "np", ".", "sum", "(", "-", "p2_1", "+", "p1_1", "*", "np", ".", "log", "(", "p2_1", ")", ")" ]
Calculates Poisson LL(p1|p2).
[ "Calculates", "Poisson", "LL", "(", "p1|p2", ")", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/pois_ll.py#L45-L51
yjzhang/uncurl_python
uncurl/pois_ll.py
poisson_dist
def poisson_dist(p1, p2): """ Calculates the Poisson distance between two vectors. p1 can be a sparse matrix, while p2 has to be a dense matrix. """ # ugh... p1_ = p1 + eps p2_ = p2 + eps return np.dot(p1_-p2_, np.log(p1_/p2_))
python
def poisson_dist(p1, p2): """ Calculates the Poisson distance between two vectors. p1 can be a sparse matrix, while p2 has to be a dense matrix. """ # ugh... p1_ = p1 + eps p2_ = p2 + eps return np.dot(p1_-p2_, np.log(p1_/p2_))
[ "def", "poisson_dist", "(", "p1", ",", "p2", ")", ":", "# ugh...", "p1_", "=", "p1", "+", "eps", "p2_", "=", "p2", "+", "eps", "return", "np", ".", "dot", "(", "p1_", "-", "p2_", ",", "np", ".", "log", "(", "p1_", "/", "p2_", ")", ")" ]
Calculates the Poisson distance between two vectors. p1 can be a sparse matrix, while p2 has to be a dense matrix.
[ "Calculates", "the", "Poisson", "distance", "between", "two", "vectors", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/pois_ll.py#L53-L62
moonso/loqusdb
loqusdb/commands/delete.py
delete
def delete(ctx, family_file, family_type, case_id): """Delete the variants of a case.""" if not (family_file or case_id): LOG.error("Please provide a family file") ctx.abort() adapter = ctx.obj['adapter'] # Get a ped_parser.Family object from family file family = None family_id = None if family_file: with open(family_file, 'r') as family_lines: family = get_case( family_lines=family_lines, family_type=family_type ) family_id = family.family_id # There has to be a case_id or a family at this stage. case_id = case_id or family_id if not case_id: LOG.warning("Please provide a case id") ctx.abort() existing_case = adapter.case({'case_id': case_id}) if not existing_case: LOG.warning("Case %s does not exist in database" %case_id) context.abort start_deleting = datetime.now() try: delete_command( adapter=adapter, case_obj=existing_case, ) except (CaseError, IOError) as error: LOG.warning(error) ctx.abort()
python
def delete(ctx, family_file, family_type, case_id): """Delete the variants of a case.""" if not (family_file or case_id): LOG.error("Please provide a family file") ctx.abort() adapter = ctx.obj['adapter'] # Get a ped_parser.Family object from family file family = None family_id = None if family_file: with open(family_file, 'r') as family_lines: family = get_case( family_lines=family_lines, family_type=family_type ) family_id = family.family_id # There has to be a case_id or a family at this stage. case_id = case_id or family_id if not case_id: LOG.warning("Please provide a case id") ctx.abort() existing_case = adapter.case({'case_id': case_id}) if not existing_case: LOG.warning("Case %s does not exist in database" %case_id) context.abort start_deleting = datetime.now() try: delete_command( adapter=adapter, case_obj=existing_case, ) except (CaseError, IOError) as error: LOG.warning(error) ctx.abort()
[ "def", "delete", "(", "ctx", ",", "family_file", ",", "family_type", ",", "case_id", ")", ":", "if", "not", "(", "family_file", "or", "case_id", ")", ":", "LOG", ".", "error", "(", "\"Please provide a family file\"", ")", "ctx", ".", "abort", "(", ")", "...
Delete the variants of a case.
[ "Delete", "the", "variants", "of", "a", "case", "." ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/delete.py#L28-L68
markperdue/pyvesync
src/pyvesync/vesyncoutlet.py
VeSyncOutlet.update_energy
def update_energy(self, bypass_check: bool = False): """Builds weekly, monthly and yearly dictionaries""" if bypass_check or (not bypass_check and self.update_time_check): self.get_weekly_energy() if 'week' in self.energy: self.get_monthly_energy() self.get_yearly_energy() if not bypass_check: self.update_energy_ts = time.time()
python
def update_energy(self, bypass_check: bool = False): """Builds weekly, monthly and yearly dictionaries""" if bypass_check or (not bypass_check and self.update_time_check): self.get_weekly_energy() if 'week' in self.energy: self.get_monthly_energy() self.get_yearly_energy() if not bypass_check: self.update_energy_ts = time.time()
[ "def", "update_energy", "(", "self", ",", "bypass_check", ":", "bool", "=", "False", ")", ":", "if", "bypass_check", "or", "(", "not", "bypass_check", "and", "self", ".", "update_time_check", ")", ":", "self", ".", "get_weekly_energy", "(", ")", "if", "'we...
Builds weekly, monthly and yearly dictionaries
[ "Builds", "weekly", "monthly", "and", "yearly", "dictionaries" ]
train
https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncoutlet.py#L61-L69
markperdue/pyvesync
src/pyvesync/vesyncoutlet.py
VeSyncOutlet15A.turn_on_nightlight
def turn_on_nightlight(self): """Turn on nightlight""" body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid body['mode'] = 'auto' response, _ = helpers.call_api( '/15a/v1/device/nightlightstatus', 'put', headers=helpers.req_headers(self.manager), json=body ) return helpers.check_response(response, '15a_ntlight')
python
def turn_on_nightlight(self): """Turn on nightlight""" body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid body['mode'] = 'auto' response, _ = helpers.call_api( '/15a/v1/device/nightlightstatus', 'put', headers=helpers.req_headers(self.manager), json=body ) return helpers.check_response(response, '15a_ntlight')
[ "def", "turn_on_nightlight", "(", "self", ")", ":", "body", "=", "helpers", ".", "req_body", "(", "self", ".", "manager", ",", "'devicestatus'", ")", "body", "[", "'uuid'", "]", "=", "self", ".", "uuid", "body", "[", "'mode'", "]", "=", "'auto'", "resp...
Turn on nightlight
[ "Turn", "on", "nightlight" ]
train
https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncoutlet.py#L441-L454
fbergmann/libSEDML
examples/python/echo_sedml.py
main
def main (args): """Usage: echo_sedml input-filename output-filename """ if len(args) != 3: print(main.__doc__) sys.exit(1) d = libsedml.readSedML(args[1]); if ( d.getErrorLog().getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_ERROR) > 0): print (d.getErrorLog().toString()); else: libsedml.writeSedML(d, args[2]); return 0;
python
def main (args): """Usage: echo_sedml input-filename output-filename """ if len(args) != 3: print(main.__doc__) sys.exit(1) d = libsedml.readSedML(args[1]); if ( d.getErrorLog().getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_ERROR) > 0): print (d.getErrorLog().toString()); else: libsedml.writeSedML(d, args[2]); return 0;
[ "def", "main", "(", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "3", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", "1", ")", "d", "=", "libsedml", ".", "readSedML", "(", "args", "[", "1", "]", ")", "if", ...
Usage: echo_sedml input-filename output-filename
[ "Usage", ":", "echo_sedml", "input", "-", "filename", "output", "-", "filename" ]
train
https://github.com/fbergmann/libSEDML/blob/2611274d993cb92c663f8f0296896a6e441f75fd/examples/python/echo_sedml.py#L40-L53
bachya/py17track
example.py
main
async def main() -> None: """Create the aiohttp session and run the example.""" logging.basicConfig(level=logging.INFO) async with ClientSession() as websession: try: client = Client(websession) await client.profile.login('<EMAIL>', '<PASSWORD>') _LOGGER.info('Account ID: %s', client.profile.account_id) summary = await client.profile.summary() _LOGGER.info('Account Summary: %s', summary) packages = await client.profile.packages() _LOGGER.info('Package Summary: %s', packages) except SeventeenTrackError as err: print(err)
python
async def main() -> None: """Create the aiohttp session and run the example.""" logging.basicConfig(level=logging.INFO) async with ClientSession() as websession: try: client = Client(websession) await client.profile.login('<EMAIL>', '<PASSWORD>') _LOGGER.info('Account ID: %s', client.profile.account_id) summary = await client.profile.summary() _LOGGER.info('Account Summary: %s', summary) packages = await client.profile.packages() _LOGGER.info('Package Summary: %s', packages) except SeventeenTrackError as err: print(err)
[ "async", "def", "main", "(", ")", "->", "None", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "async", "with", "ClientSession", "(", ")", "as", "websession", ":", "try", ":", "client", "=", "Client", "(", "websessi...
Create the aiohttp session and run the example.
[ "Create", "the", "aiohttp", "session", "and", "run", "the", "example", "." ]
train
https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/example.py#L13-L30
johncosta/django-like-button
like_button/templatetags/like_button.py
my_import
def my_import(name): """ dynamic importing """ module, attr = name.rsplit('.', 1) mod = __import__(module, fromlist=[attr]) klass = getattr(mod, attr) return klass()
python
def my_import(name): """ dynamic importing """ module, attr = name.rsplit('.', 1) mod = __import__(module, fromlist=[attr]) klass = getattr(mod, attr) return klass()
[ "def", "my_import", "(", "name", ")", ":", "module", ",", "attr", "=", "name", ".", "rsplit", "(", "'.'", ",", "1", ")", "mod", "=", "__import__", "(", "module", ",", "fromlist", "=", "[", "attr", "]", ")", "klass", "=", "getattr", "(", "mod", ",...
dynamic importing
[ "dynamic", "importing" ]
train
https://github.com/johncosta/django-like-button/blob/c93a1be9c041d76e8de9a26f424ad4f836ab97bd/like_button/templatetags/like_button.py#L20-L25
johncosta/django-like-button
like_button/templatetags/like_button.py
like_button_js_tag
def like_button_js_tag(context): """ This tag will check to see if they have the FACEBOOK_LIKE_APP_ID setup correctly in the django settings, if so then it will pass the data along to the intercom_tag template to be displayed. If something isn't perfect we will return False, which will then not install the javascript since it isn't needed. """ if FACEBOOK_APP_ID is None: log.warning("FACEBOOK_APP_ID isn't setup correctly in your settings") # make sure FACEBOOK_APP_ID is setup correct and user is authenticated if FACEBOOK_APP_ID: request = context.get('request', None) if request: return {"LIKE_BUTTON_IS_VALID": True, "facebook_app_id": FACEBOOK_APP_ID, "channel_base_url": request.get_host()} # if it is here, it isn't a valid setup, return False to not show the tag. return {"LIKE_BUTTON_IS_VALID": False}
python
def like_button_js_tag(context): """ This tag will check to see if they have the FACEBOOK_LIKE_APP_ID setup correctly in the django settings, if so then it will pass the data along to the intercom_tag template to be displayed. If something isn't perfect we will return False, which will then not install the javascript since it isn't needed. """ if FACEBOOK_APP_ID is None: log.warning("FACEBOOK_APP_ID isn't setup correctly in your settings") # make sure FACEBOOK_APP_ID is setup correct and user is authenticated if FACEBOOK_APP_ID: request = context.get('request', None) if request: return {"LIKE_BUTTON_IS_VALID": True, "facebook_app_id": FACEBOOK_APP_ID, "channel_base_url": request.get_host()} # if it is here, it isn't a valid setup, return False to not show the tag. return {"LIKE_BUTTON_IS_VALID": False}
[ "def", "like_button_js_tag", "(", "context", ")", ":", "if", "FACEBOOK_APP_ID", "is", "None", ":", "log", ".", "warning", "(", "\"FACEBOOK_APP_ID isn't setup correctly in your settings\"", ")", "# make sure FACEBOOK_APP_ID is setup correct and user is authenticated", "if", "FAC...
This tag will check to see if they have the FACEBOOK_LIKE_APP_ID setup correctly in the django settings, if so then it will pass the data along to the intercom_tag template to be displayed. If something isn't perfect we will return False, which will then not install the javascript since it isn't needed.
[ "This", "tag", "will", "check", "to", "see", "if", "they", "have", "the", "FACEBOOK_LIKE_APP_ID", "setup", "correctly", "in", "the", "django", "settings", "if", "so", "then", "it", "will", "pass", "the", "data", "along", "to", "the", "intercom_tag", "templat...
train
https://github.com/johncosta/django-like-button/blob/c93a1be9c041d76e8de9a26f424ad4f836ab97bd/like_button/templatetags/like_button.py#L34-L55
johncosta/django-like-button
like_button/templatetags/like_button.py
like_button_tag
def like_button_tag(context): """ This tag will check to see if they have the FACEBOOK_APP_ID setup correctly in the django settings, if so then it will pass the data along to the intercom_tag template to be displayed. If something isn't perfect we will return False, which will then not install the javascript since it isn't needed. s """ if FACEBOOK_APP_ID is None: log.warning("FACEBOOK_APP_ID isn't setup correctly in your settings") # make sure INTERCOM_APPID is setup correct and user is authenticated if FACEBOOK_APP_ID: request = context.get('request', None) if request: path_to_like = ( "http://" + request.get_host() + request.get_full_path()) show_send = true_false_converter(FACEBOOK_SHOW_SEND) like_width = FACEBOOK_LIKE_WIDTH show_faces = true_false_converter(FACEBOOK_SHOW_FACES) font = FACEBOOK_FONT return {"LIKE_BUTTON_IS_VALID": True, "path_to_like": path_to_like, "show_send": show_send, "like_width": like_width, "show_faces": show_faces, "font": font, "like_layout": FACEBOOK_LIKE_LAYOUT} # if it is here, it isn't a valid setup, return False to not show the tag. return {"LIKE_BUTTON_IS_VALID": False}
python
def like_button_tag(context): """ This tag will check to see if they have the FACEBOOK_APP_ID setup correctly in the django settings, if so then it will pass the data along to the intercom_tag template to be displayed. If something isn't perfect we will return False, which will then not install the javascript since it isn't needed. s """ if FACEBOOK_APP_ID is None: log.warning("FACEBOOK_APP_ID isn't setup correctly in your settings") # make sure INTERCOM_APPID is setup correct and user is authenticated if FACEBOOK_APP_ID: request = context.get('request', None) if request: path_to_like = ( "http://" + request.get_host() + request.get_full_path()) show_send = true_false_converter(FACEBOOK_SHOW_SEND) like_width = FACEBOOK_LIKE_WIDTH show_faces = true_false_converter(FACEBOOK_SHOW_FACES) font = FACEBOOK_FONT return {"LIKE_BUTTON_IS_VALID": True, "path_to_like": path_to_like, "show_send": show_send, "like_width": like_width, "show_faces": show_faces, "font": font, "like_layout": FACEBOOK_LIKE_LAYOUT} # if it is here, it isn't a valid setup, return False to not show the tag. return {"LIKE_BUTTON_IS_VALID": False}
[ "def", "like_button_tag", "(", "context", ")", ":", "if", "FACEBOOK_APP_ID", "is", "None", ":", "log", ".", "warning", "(", "\"FACEBOOK_APP_ID isn't setup correctly in your settings\"", ")", "# make sure INTERCOM_APPID is setup correct and user is authenticated", "if", "FACEBOO...
This tag will check to see if they have the FACEBOOK_APP_ID setup correctly in the django settings, if so then it will pass the data along to the intercom_tag template to be displayed. If something isn't perfect we will return False, which will then not install the javascript since it isn't needed. s
[ "This", "tag", "will", "check", "to", "see", "if", "they", "have", "the", "FACEBOOK_APP_ID", "setup", "correctly", "in", "the", "django", "settings", "if", "so", "then", "it", "will", "pass", "the", "data", "along", "to", "the", "intercom_tag", "template", ...
train
https://github.com/johncosta/django-like-button/blob/c93a1be9c041d76e8de9a26f424ad4f836ab97bd/like_button/templatetags/like_button.py#L59-L94
moonso/loqusdb
loqusdb/plugins/mongo/structural_variant.py
SVMixin.add_structural_variant
def add_structural_variant(self, variant, max_window = 3000): """Add a variant to the structural variants collection The process of adding an SV variant differs quite a bit from the more straight forward case of SNV/INDEL. Variants are represented in the database by clusters that are two intervals, one interval for start(pos) and one for end. The size of the intervals changes according to the size of the variants. The maximum window size is a parameter. Here we need to search if the variant matches any of the existing clusters. Then we need to choose the closest cluster and update the boundaries for that cluster. Args: variant (dict): A variant dictionary max_window(int): Specify the maximum window size for large svs - - - - - - - - - - - |-----| |-----| /\ /\ | | pos - interval_size end + interval_size """ # This will return the cluster most similar to variant or None cluster = self.get_structural_variant(variant) # If there was no matcing cluster we need to create a new cluster if cluster is None: # The cluster will be populated with information later. cluster = { 'chrom': variant['chrom'], 'end_chrom': variant['end_chrom'], 'sv_type': variant['sv_type'], 'pos_sum': 0, 'end_sum': 0, 'observations': 0, 'length': 0, 'sv_type': variant['sv_type'], 'families': [], } # Insert variant to get a _id _id = self.db.structural_variant.insert_one(cluster).inserted_id cluster['_id'] = _id case_id = variant.get('case_id') if case_id: # If the variant is already added for this case we continue # One case will only give duplicated information if case_id in cluster['families']: return else: # Insert the new case in the beginning of array cluster['families'].insert(0,case_id) # Make sure array does not grow out of bounds cluster['families'] = cluster['families'][:50] # Update number of times we have seen the event nr_events = cluster['observations'] + 1 # - # - - # - - # - - # - - # - - # |--.--| |--.--| # This indicates the center for each of the end points for the event AFTER the new variant # is added to the cluster # i.e. the dots in the picture above pos_mean = int((cluster['pos_sum'] + variant['pos']) // (nr_events)) end_mean = int((cluster['end_sum'] + variant['end']) // (nr_events)) # We need to calculate the new cluster length # Handle translocation as a special case if cluster['sv_type'] != 'BND': cluster_len = end_mean - pos_mean # We need to adapt the interval size depending on the size of the cluster divider = 10 if cluster_len < 1000: # We allow intervals for smaller variants to be relatively larger divider = 2 elif cluster_len < 1000: # We allow intervals for smaller variants to be relatively larger divider = 5 interval_size = int(min(round(cluster_len/divider, -2), max_window)) else: # We need to treat translocations as a special case. # Set length to a huge number that mongodb can handle, float('inf') would not work. cluster_len = 10e10 # This number seems large, if compared with SV size it is fairly small. interval_size = max_window * 2 # If the length of SV is shorter than 500 the variant # is considered precise # Otherwise the interval size is closest whole 100 number res = self.db.structural_variant.find_one_and_update( {'_id': cluster['_id']}, { '$inc': { 'observations': 1, 'pos_sum': variant['pos'], 'end_sum': variant['end'], }, '$set': { 'pos_left': max(pos_mean - interval_size, 0), 'pos_right': pos_mean + interval_size, 'end_left': max(end_mean - interval_size, 0), 'end_right': end_mean + interval_size, 'families': cluster['families'], 'length': cluster_len, } } ) # Insert an identity object to link cases to variants and clusters identity_obj = Identity(cluster_id=cluster['_id'], variant_id=variant['id_column'], case_id=case_id) self.db.identity.insert_one(identity_obj) return
python
def add_structural_variant(self, variant, max_window = 3000): """Add a variant to the structural variants collection The process of adding an SV variant differs quite a bit from the more straight forward case of SNV/INDEL. Variants are represented in the database by clusters that are two intervals, one interval for start(pos) and one for end. The size of the intervals changes according to the size of the variants. The maximum window size is a parameter. Here we need to search if the variant matches any of the existing clusters. Then we need to choose the closest cluster and update the boundaries for that cluster. Args: variant (dict): A variant dictionary max_window(int): Specify the maximum window size for large svs - - - - - - - - - - - |-----| |-----| /\ /\ | | pos - interval_size end + interval_size """ # This will return the cluster most similar to variant or None cluster = self.get_structural_variant(variant) # If there was no matcing cluster we need to create a new cluster if cluster is None: # The cluster will be populated with information later. cluster = { 'chrom': variant['chrom'], 'end_chrom': variant['end_chrom'], 'sv_type': variant['sv_type'], 'pos_sum': 0, 'end_sum': 0, 'observations': 0, 'length': 0, 'sv_type': variant['sv_type'], 'families': [], } # Insert variant to get a _id _id = self.db.structural_variant.insert_one(cluster).inserted_id cluster['_id'] = _id case_id = variant.get('case_id') if case_id: # If the variant is already added for this case we continue # One case will only give duplicated information if case_id in cluster['families']: return else: # Insert the new case in the beginning of array cluster['families'].insert(0,case_id) # Make sure array does not grow out of bounds cluster['families'] = cluster['families'][:50] # Update number of times we have seen the event nr_events = cluster['observations'] + 1 # - # - - # - - # - - # - - # - - # |--.--| |--.--| # This indicates the center for each of the end points for the event AFTER the new variant # is added to the cluster # i.e. the dots in the picture above pos_mean = int((cluster['pos_sum'] + variant['pos']) // (nr_events)) end_mean = int((cluster['end_sum'] + variant['end']) // (nr_events)) # We need to calculate the new cluster length # Handle translocation as a special case if cluster['sv_type'] != 'BND': cluster_len = end_mean - pos_mean # We need to adapt the interval size depending on the size of the cluster divider = 10 if cluster_len < 1000: # We allow intervals for smaller variants to be relatively larger divider = 2 elif cluster_len < 1000: # We allow intervals for smaller variants to be relatively larger divider = 5 interval_size = int(min(round(cluster_len/divider, -2), max_window)) else: # We need to treat translocations as a special case. # Set length to a huge number that mongodb can handle, float('inf') would not work. cluster_len = 10e10 # This number seems large, if compared with SV size it is fairly small. interval_size = max_window * 2 # If the length of SV is shorter than 500 the variant # is considered precise # Otherwise the interval size is closest whole 100 number res = self.db.structural_variant.find_one_and_update( {'_id': cluster['_id']}, { '$inc': { 'observations': 1, 'pos_sum': variant['pos'], 'end_sum': variant['end'], }, '$set': { 'pos_left': max(pos_mean - interval_size, 0), 'pos_right': pos_mean + interval_size, 'end_left': max(end_mean - interval_size, 0), 'end_right': end_mean + interval_size, 'families': cluster['families'], 'length': cluster_len, } } ) # Insert an identity object to link cases to variants and clusters identity_obj = Identity(cluster_id=cluster['_id'], variant_id=variant['id_column'], case_id=case_id) self.db.identity.insert_one(identity_obj) return
[ "def", "add_structural_variant", "(", "self", ",", "variant", ",", "max_window", "=", "3000", ")", ":", "# This will return the cluster most similar to variant or None", "cluster", "=", "self", ".", "get_structural_variant", "(", "variant", ")", "# If there was no matcing c...
Add a variant to the structural variants collection The process of adding an SV variant differs quite a bit from the more straight forward case of SNV/INDEL. Variants are represented in the database by clusters that are two intervals, one interval for start(pos) and one for end. The size of the intervals changes according to the size of the variants. The maximum window size is a parameter. Here we need to search if the variant matches any of the existing clusters. Then we need to choose the closest cluster and update the boundaries for that cluster. Args: variant (dict): A variant dictionary max_window(int): Specify the maximum window size for large svs - - - - - - - - - - - |-----| |-----| /\ /\ | | pos - interval_size end + interval_size
[ "Add", "a", "variant", "to", "the", "structural", "variants", "collection", "The", "process", "of", "adding", "an", "SV", "variant", "differs", "quite", "a", "bit", "from", "the", "more", "straight", "forward", "case", "of", "SNV", "/", "INDEL", ".", "Vari...
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/structural_variant.py#L14-L143
moonso/loqusdb
loqusdb/plugins/mongo/structural_variant.py
SVMixin.get_structural_variant
def get_structural_variant(self, variant): """Check if there are any overlapping sv clusters Search the sv variants with chrom start end_chrom end and sv_type Args: variant (dict): A variant dictionary Returns: variant (dict): A variant dictionary """ # Create a query for the database # This will include more variants than we want # The rest of the calculations will be done in python query = { 'chrom': variant['chrom'], 'end_chrom': variant['end_chrom'], 'sv_type': variant['sv_type'], '$and': [ {'pos_left': {'$lte': variant['pos']}}, {'pos_right': {'$gte': variant['pos']}}, ] } res = self.db.structural_variant.find(query).sort('pos_left',1) match = None distance = None closest_hit = None # First we check that the coordinates are correct # Then we count the distance to mean on both ends to see which variant is closest for hit in res: # We know from the query that the variants position is larger than the left most part of # the cluster. # If the right most part of the cluster is smaller than the variant position they do # not overlap if hit['end_left'] > variant['end']: continue if hit['end_right'] < variant['end']: continue # We need to calculate the distance to see what cluster that was closest to the variant distance = (abs(variant['pos'] - (hit['pos_left'] + hit['pos_right'])/2) + abs(variant['end'] - (hit['end_left'] + hit['end_right'])/2)) # If we have no cluster yet we set the curent to be the hit if closest_hit is None: match = hit closest_hit = distance continue # If the distance is closer than previous we choose current cluster if distance < closest_hit: # Set match to the current closest hit match = hit # Update the closest distance closest_hit = distance return match
python
def get_structural_variant(self, variant): """Check if there are any overlapping sv clusters Search the sv variants with chrom start end_chrom end and sv_type Args: variant (dict): A variant dictionary Returns: variant (dict): A variant dictionary """ # Create a query for the database # This will include more variants than we want # The rest of the calculations will be done in python query = { 'chrom': variant['chrom'], 'end_chrom': variant['end_chrom'], 'sv_type': variant['sv_type'], '$and': [ {'pos_left': {'$lte': variant['pos']}}, {'pos_right': {'$gte': variant['pos']}}, ] } res = self.db.structural_variant.find(query).sort('pos_left',1) match = None distance = None closest_hit = None # First we check that the coordinates are correct # Then we count the distance to mean on both ends to see which variant is closest for hit in res: # We know from the query that the variants position is larger than the left most part of # the cluster. # If the right most part of the cluster is smaller than the variant position they do # not overlap if hit['end_left'] > variant['end']: continue if hit['end_right'] < variant['end']: continue # We need to calculate the distance to see what cluster that was closest to the variant distance = (abs(variant['pos'] - (hit['pos_left'] + hit['pos_right'])/2) + abs(variant['end'] - (hit['end_left'] + hit['end_right'])/2)) # If we have no cluster yet we set the curent to be the hit if closest_hit is None: match = hit closest_hit = distance continue # If the distance is closer than previous we choose current cluster if distance < closest_hit: # Set match to the current closest hit match = hit # Update the closest distance closest_hit = distance return match
[ "def", "get_structural_variant", "(", "self", ",", "variant", ")", ":", "# Create a query for the database", "# This will include more variants than we want", "# The rest of the calculations will be done in python", "query", "=", "{", "'chrom'", ":", "variant", "[", "'chrom'", ...
Check if there are any overlapping sv clusters Search the sv variants with chrom start end_chrom end and sv_type Args: variant (dict): A variant dictionary Returns: variant (dict): A variant dictionary
[ "Check", "if", "there", "are", "any", "overlapping", "sv", "clusters" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/structural_variant.py#L145-L202
moonso/loqusdb
loqusdb/plugins/mongo/structural_variant.py
SVMixin.get_sv_variants
def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None, pos=None, end=None): """Return all structural variants in the database Args: chromosome (str) end_chromosome (str) sv_type (str) pos (int): Left position of SV end (int): Right position of SV Returns: variants (Iterable(Variant)) """ query = {} if chromosome: query['chrom'] = chromosome if end_chromosome: query['end_chrom'] = end_chromosome if sv_type: query['sv_type'] = sv_type if pos: if not '$and' in query: query['$and'] = [] query['$and'].append({'pos_left': {'$lte': pos}}) query['$and'].append({'pos_right': {'$gte': pos}}) if end: if not '$and' in query: query['$and'] = [] query['$and'].append({'end_left': {'$lte': end}}) query['$and'].append({'end_right': {'$gte': end}}) LOG.info("Find all sv variants {}".format(query)) return self.db.structural_variant.find(query).sort([('chrom', ASCENDING), ('pos_left', ASCENDING)])
python
def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None, pos=None, end=None): """Return all structural variants in the database Args: chromosome (str) end_chromosome (str) sv_type (str) pos (int): Left position of SV end (int): Right position of SV Returns: variants (Iterable(Variant)) """ query = {} if chromosome: query['chrom'] = chromosome if end_chromosome: query['end_chrom'] = end_chromosome if sv_type: query['sv_type'] = sv_type if pos: if not '$and' in query: query['$and'] = [] query['$and'].append({'pos_left': {'$lte': pos}}) query['$and'].append({'pos_right': {'$gte': pos}}) if end: if not '$and' in query: query['$and'] = [] query['$and'].append({'end_left': {'$lte': end}}) query['$and'].append({'end_right': {'$gte': end}}) LOG.info("Find all sv variants {}".format(query)) return self.db.structural_variant.find(query).sort([('chrom', ASCENDING), ('pos_left', ASCENDING)])
[ "def", "get_sv_variants", "(", "self", ",", "chromosome", "=", "None", ",", "end_chromosome", "=", "None", ",", "sv_type", "=", "None", ",", "pos", "=", "None", ",", "end", "=", "None", ")", ":", "query", "=", "{", "}", "if", "chromosome", ":", "quer...
Return all structural variants in the database Args: chromosome (str) end_chromosome (str) sv_type (str) pos (int): Left position of SV end (int): Right position of SV Returns: variants (Iterable(Variant))
[ "Return", "all", "structural", "variants", "in", "the", "database" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/structural_variant.py#L204-L240
moonso/loqusdb
loqusdb/plugins/mongo/structural_variant.py
SVMixin.get_clusters
def get_clusters(self, variant_id): """Search what clusters a variant belongs to Args: variant_id(str): From ID column in vcf Returns: clusters() """ query = {'variant_id':variant_id} identities = self.db.identity.find(query) return identities
python
def get_clusters(self, variant_id): """Search what clusters a variant belongs to Args: variant_id(str): From ID column in vcf Returns: clusters() """ query = {'variant_id':variant_id} identities = self.db.identity.find(query) return identities
[ "def", "get_clusters", "(", "self", ",", "variant_id", ")", ":", "query", "=", "{", "'variant_id'", ":", "variant_id", "}", "identities", "=", "self", ".", "db", ".", "identity", ".", "find", "(", "query", ")", "return", "identities" ]
Search what clusters a variant belongs to Args: variant_id(str): From ID column in vcf Returns: clusters()
[ "Search", "what", "clusters", "a", "variant", "belongs", "to", "Args", ":", "variant_id", "(", "str", ")", ":", "From", "ID", "column", "in", "vcf", "Returns", ":", "clusters", "()" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/structural_variant.py#L242-L253
yjzhang/uncurl_python
uncurl/run_se.py
run_state_estimation
def run_state_estimation(data, clusters, dist='Poiss', reps=1, **kwargs): """ Runs state estimation for multiple initializations, returning the result with the highest log-likelihood. All the arguments are passed to the underlying state estimation functions (poisson_estimate_state, nb_estimate_state, zip_estimate_state). Args: data (array): genes x cells clusters (int): number of mixture components. If this is set to 0, this is automatically estimated using gap score. dist (str, optional): Distribution used in state estimation. Options: 'Poiss', 'NB', 'ZIP', 'LogNorm', 'Gaussian'. Default: 'Poiss' reps (int, optional): number of times to run the state estimation, taking the result with the highest log-likelihood. **kwargs: arguments to pass to the underlying state estimation function. Returns: M (array): genes x clusters - state means W (array): clusters x cells - state mixing components for each cell ll (float): final log-likelihood """ clusters = int(clusters) func = poisson_estimate_state dist = dist.lower() if dist=='poiss' or dist=='poisson': pass elif dist=='nb': func = nb_estimate_state elif dist=='zip': func = zip_estimate_state elif dist=='lognorm' or dist=='log-normal' or dist=='lognormal': func = log_norm_nmf elif dist=='gaussian' or dist=='norm' or dist=='normal': func = norm_nmf else: print('dist should be one of Poiss, NB, ZIP, LogNorm, or Gaussian. Using Poiss.') # TODO: estimate number of clusters if clusters == 0: from .gap_score import run_gap_k_selection, preproc_data data_tsvd = preproc_data(data, gene_subset=False) max_k, gap_vals, sk_vals = run_gap_k_selection(data_tsvd, k_min=1, k_max=50, skip=5, B=6) clusters = min(max_k, data.shape[0] - 1, data.shape[1] - 1) best_ll = np.inf best_M = None best_W = None for i in range(reps): results = func(data, clusters, **kwargs) M = results[0] W = results[1] if dist=='NB': ll = results[3] else: ll = results[2] if ll < best_ll: best_ll = ll best_M = M best_W = W return best_M, best_W, best_ll
python
def run_state_estimation(data, clusters, dist='Poiss', reps=1, **kwargs): """ Runs state estimation for multiple initializations, returning the result with the highest log-likelihood. All the arguments are passed to the underlying state estimation functions (poisson_estimate_state, nb_estimate_state, zip_estimate_state). Args: data (array): genes x cells clusters (int): number of mixture components. If this is set to 0, this is automatically estimated using gap score. dist (str, optional): Distribution used in state estimation. Options: 'Poiss', 'NB', 'ZIP', 'LogNorm', 'Gaussian'. Default: 'Poiss' reps (int, optional): number of times to run the state estimation, taking the result with the highest log-likelihood. **kwargs: arguments to pass to the underlying state estimation function. Returns: M (array): genes x clusters - state means W (array): clusters x cells - state mixing components for each cell ll (float): final log-likelihood """ clusters = int(clusters) func = poisson_estimate_state dist = dist.lower() if dist=='poiss' or dist=='poisson': pass elif dist=='nb': func = nb_estimate_state elif dist=='zip': func = zip_estimate_state elif dist=='lognorm' or dist=='log-normal' or dist=='lognormal': func = log_norm_nmf elif dist=='gaussian' or dist=='norm' or dist=='normal': func = norm_nmf else: print('dist should be one of Poiss, NB, ZIP, LogNorm, or Gaussian. Using Poiss.') # TODO: estimate number of clusters if clusters == 0: from .gap_score import run_gap_k_selection, preproc_data data_tsvd = preproc_data(data, gene_subset=False) max_k, gap_vals, sk_vals = run_gap_k_selection(data_tsvd, k_min=1, k_max=50, skip=5, B=6) clusters = min(max_k, data.shape[0] - 1, data.shape[1] - 1) best_ll = np.inf best_M = None best_W = None for i in range(reps): results = func(data, clusters, **kwargs) M = results[0] W = results[1] if dist=='NB': ll = results[3] else: ll = results[2] if ll < best_ll: best_ll = ll best_M = M best_W = W return best_M, best_W, best_ll
[ "def", "run_state_estimation", "(", "data", ",", "clusters", ",", "dist", "=", "'Poiss'", ",", "reps", "=", "1", ",", "*", "*", "kwargs", ")", ":", "clusters", "=", "int", "(", "clusters", ")", "func", "=", "poisson_estimate_state", "dist", "=", "dist", ...
Runs state estimation for multiple initializations, returning the result with the highest log-likelihood. All the arguments are passed to the underlying state estimation functions (poisson_estimate_state, nb_estimate_state, zip_estimate_state). Args: data (array): genes x cells clusters (int): number of mixture components. If this is set to 0, this is automatically estimated using gap score. dist (str, optional): Distribution used in state estimation. Options: 'Poiss', 'NB', 'ZIP', 'LogNorm', 'Gaussian'. Default: 'Poiss' reps (int, optional): number of times to run the state estimation, taking the result with the highest log-likelihood. **kwargs: arguments to pass to the underlying state estimation function. Returns: M (array): genes x clusters - state means W (array): clusters x cells - state mixing components for each cell ll (float): final log-likelihood
[ "Runs", "state", "estimation", "for", "multiple", "initializations", "returning", "the", "result", "with", "the", "highest", "log", "-", "likelihood", ".", "All", "the", "arguments", "are", "passed", "to", "the", "underlying", "state", "estimation", "functions", ...
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/run_se.py#L11-L64
markperdue/pyvesync
src/pyvesync/vesyncfan.py
VeSyncAir131.get_details
def get_details(self): """Build details dictionary""" body = helpers.req_body(self.manager, 'devicedetail') head = helpers.req_headers(self.manager) r, _ = helpers.call_api('/131airpurifier/v1/device/deviceDetail', method='post', headers=head, json=body) if r is not None and helpers.check_response(r, 'airpur_detail'): self.device_status = r.get('deviceStatus', 'unknown') self.connection_status = r.get('connectionStatus', 'unknown') self.details['active_time'] = r.get('activeTime', 0) self.details['filter_life'] = r.get('filterLife', {}) self.details['screeen_status'] = r.get('screenStatus', 'unknown') self.details['mode'] = r.get('mode', 'unknown') self.details['level'] = r.get('level', None)
python
def get_details(self): """Build details dictionary""" body = helpers.req_body(self.manager, 'devicedetail') head = helpers.req_headers(self.manager) r, _ = helpers.call_api('/131airpurifier/v1/device/deviceDetail', method='post', headers=head, json=body) if r is not None and helpers.check_response(r, 'airpur_detail'): self.device_status = r.get('deviceStatus', 'unknown') self.connection_status = r.get('connectionStatus', 'unknown') self.details['active_time'] = r.get('activeTime', 0) self.details['filter_life'] = r.get('filterLife', {}) self.details['screeen_status'] = r.get('screenStatus', 'unknown') self.details['mode'] = r.get('mode', 'unknown') self.details['level'] = r.get('level', None)
[ "def", "get_details", "(", "self", ")", ":", "body", "=", "helpers", ".", "req_body", "(", "self", ".", "manager", ",", "'devicedetail'", ")", "head", "=", "helpers", ".", "req_headers", "(", "self", ".", "manager", ")", "r", ",", "_", "=", "helpers", ...
Build details dictionary
[ "Build", "details", "dictionary" ]
train
https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncfan.py#L17-L32
markperdue/pyvesync
src/pyvesync/vesyncfan.py
VeSyncAir131.turn_on
def turn_on(self): """Turn Air Purifier on""" if self.device_status != 'on': body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid body['status'] = 'on' head = helpers.req_headers(self.manager) r, _ = helpers.call_api('/131airPurifier/v1/device/deviceStatus', 'put', json=body, headers=head) if r is not None and helpers.check_response(r, 'airpur_status'): self.device_status = 'on' return True else: return False
python
def turn_on(self): """Turn Air Purifier on""" if self.device_status != 'on': body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid body['status'] = 'on' head = helpers.req_headers(self.manager) r, _ = helpers.call_api('/131airPurifier/v1/device/deviceStatus', 'put', json=body, headers=head) if r is not None and helpers.check_response(r, 'airpur_status'): self.device_status = 'on' return True else: return False
[ "def", "turn_on", "(", "self", ")", ":", "if", "self", ".", "device_status", "!=", "'on'", ":", "body", "=", "helpers", ".", "req_body", "(", "self", ".", "manager", ",", "'devicestatus'", ")", "body", "[", "'uuid'", "]", "=", "self", ".", "uuid", "b...
Turn Air Purifier on
[ "Turn", "Air", "Purifier", "on" ]
train
https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncfan.py#L44-L59
markperdue/pyvesync
src/pyvesync/vesyncfan.py
VeSyncAir131.fan_speed
def fan_speed(self, speed: int = None) -> bool: """Adjust Fan Speed by Specifying 1,2,3 as argument or cycle through speeds increasing by one""" body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid head = helpers.req_headers(self.manager) if self.details.get('mode') != 'manual': self.mode_toggle('manual') else: if speed is not None: level = int(self.details.get('level')) if speed == level: return False elif speed in [1, 2, 3]: body['level'] = speed else: if (level + 1) > 3: body['level'] = 1 else: body['level'] = int(level + 1) r, _ = helpers.call_api('/131airPurifier/v1/device/updateSpeed', 'put', json=body, headers=head) if r is not None and helpers.check_response(r, 'airpur_status'): self.details['level'] = body['level'] return True else: return False
python
def fan_speed(self, speed: int = None) -> bool: """Adjust Fan Speed by Specifying 1,2,3 as argument or cycle through speeds increasing by one""" body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid head = helpers.req_headers(self.manager) if self.details.get('mode') != 'manual': self.mode_toggle('manual') else: if speed is not None: level = int(self.details.get('level')) if speed == level: return False elif speed in [1, 2, 3]: body['level'] = speed else: if (level + 1) > 3: body['level'] = 1 else: body['level'] = int(level + 1) r, _ = helpers.call_api('/131airPurifier/v1/device/updateSpeed', 'put', json=body, headers=head) if r is not None and helpers.check_response(r, 'airpur_status'): self.details['level'] = body['level'] return True else: return False
[ "def", "fan_speed", "(", "self", ",", "speed", ":", "int", "=", "None", ")", "->", "bool", ":", "body", "=", "helpers", ".", "req_body", "(", "self", ".", "manager", ",", "'devicestatus'", ")", "body", "[", "'uuid'", "]", "=", "self", ".", "uuid", ...
Adjust Fan Speed by Specifying 1,2,3 as argument or cycle through speeds increasing by one
[ "Adjust", "Fan", "Speed", "by", "Specifying", "1", "2", "3", "as", "argument", "or", "cycle", "through", "speeds", "increasing", "by", "one" ]
train
https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncfan.py#L90-L118
markperdue/pyvesync
src/pyvesync/vesyncfan.py
VeSyncAir131.mode_toggle
def mode_toggle(self, mode: str) -> bool: """Set mode to manual, auto or sleep""" head = helpers.req_headers(self.manager) body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid if mode != body['mode'] and mode in ['sleep', 'auto', 'manual']: body['mode'] = mode if mode == 'manual': body['level'] = 1 r, _ = helpers.call_api('/131airPurifier/v1/device/updateMode', 'put', json=body, headers=head) if r is not None and helpers.check_response(r, 'airpur_status'): self.details['mode'] = mode return True return False
python
def mode_toggle(self, mode: str) -> bool: """Set mode to manual, auto or sleep""" head = helpers.req_headers(self.manager) body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid if mode != body['mode'] and mode in ['sleep', 'auto', 'manual']: body['mode'] = mode if mode == 'manual': body['level'] = 1 r, _ = helpers.call_api('/131airPurifier/v1/device/updateMode', 'put', json=body, headers=head) if r is not None and helpers.check_response(r, 'airpur_status'): self.details['mode'] = mode return True return False
[ "def", "mode_toggle", "(", "self", ",", "mode", ":", "str", ")", "->", "bool", ":", "head", "=", "helpers", ".", "req_headers", "(", "self", ".", "manager", ")", "body", "=", "helpers", ".", "req_body", "(", "self", ".", "manager", ",", "'devicestatus'...
Set mode to manual, auto or sleep
[ "Set", "mode", "to", "manual", "auto", "or", "sleep" ]
train
https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncfan.py#L120-L137
yjzhang/uncurl_python
uncurl/lineage.py
fourier_series
def fourier_series(x, *a): """ Arbitrary dimensionality fourier series. The first parameter is a_0, and the second parameter is the interval/scale parameter. The parameters are altering sin and cos paramters. n = (len(a)-2)/2 """ output = 0 output += a[0]/2 w = a[1] for n in range(2, len(a), 2): n_ = n/2 val1 = a[n] val2 = a[n+1] output += val1*np.sin(n_*x*w) output += val2*np.cos(n_*x*w) return output
python
def fourier_series(x, *a): """ Arbitrary dimensionality fourier series. The first parameter is a_0, and the second parameter is the interval/scale parameter. The parameters are altering sin and cos paramters. n = (len(a)-2)/2 """ output = 0 output += a[0]/2 w = a[1] for n in range(2, len(a), 2): n_ = n/2 val1 = a[n] val2 = a[n+1] output += val1*np.sin(n_*x*w) output += val2*np.cos(n_*x*w) return output
[ "def", "fourier_series", "(", "x", ",", "*", "a", ")", ":", "output", "=", "0", "output", "+=", "a", "[", "0", "]", "/", "2", "w", "=", "a", "[", "1", "]", "for", "n", "in", "range", "(", "2", ",", "len", "(", "a", ")", ",", "2", ")", "...
Arbitrary dimensionality fourier series. The first parameter is a_0, and the second parameter is the interval/scale parameter. The parameters are altering sin and cos paramters. n = (len(a)-2)/2
[ "Arbitrary", "dimensionality", "fourier", "series", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/lineage.py#L10-L30
yjzhang/uncurl_python
uncurl/lineage.py
graph_distances
def graph_distances(start, edges, distances): """ Given an undirected adjacency list and a pairwise distance matrix between all nodes: calculates distances along graph from start node. Args: start (int): start node edges (list): adjacency list of tuples distances (array): 2d array of distances between nodes Returns: dict of node to distance from start """ # convert adjacency list to adjacency dict adj = {x: [] for x in range(len(distances))} for n1, n2 in edges: adj[n1].append(n2) adj[n2].append(n1) # run dijkstra's algorithm to_visit = [] new_dist = {} for n in adj[start]: heapq.heappush(to_visit, (distances[start, n], n)) while to_visit: d, next_node = heapq.heappop(to_visit) if next_node not in new_dist: new_dist[next_node] = d for n in adj[next_node]: if n not in new_dist: heapq.heappush(to_visit, (d + distances[next_node, n], n)) return new_dist
python
def graph_distances(start, edges, distances): """ Given an undirected adjacency list and a pairwise distance matrix between all nodes: calculates distances along graph from start node. Args: start (int): start node edges (list): adjacency list of tuples distances (array): 2d array of distances between nodes Returns: dict of node to distance from start """ # convert adjacency list to adjacency dict adj = {x: [] for x in range(len(distances))} for n1, n2 in edges: adj[n1].append(n2) adj[n2].append(n1) # run dijkstra's algorithm to_visit = [] new_dist = {} for n in adj[start]: heapq.heappush(to_visit, (distances[start, n], n)) while to_visit: d, next_node = heapq.heappop(to_visit) if next_node not in new_dist: new_dist[next_node] = d for n in adj[next_node]: if n not in new_dist: heapq.heappush(to_visit, (d + distances[next_node, n], n)) return new_dist
[ "def", "graph_distances", "(", "start", ",", "edges", ",", "distances", ")", ":", "# convert adjacency list to adjacency dict", "adj", "=", "{", "x", ":", "[", "]", "for", "x", "in", "range", "(", "len", "(", "distances", ")", ")", "}", "for", "n1", ",",...
Given an undirected adjacency list and a pairwise distance matrix between all nodes: calculates distances along graph from start node. Args: start (int): start node edges (list): adjacency list of tuples distances (array): 2d array of distances between nodes Returns: dict of node to distance from start
[ "Given", "an", "undirected", "adjacency", "list", "and", "a", "pairwise", "distance", "matrix", "between", "all", "nodes", ":", "calculates", "distances", "along", "graph", "from", "start", "node", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/lineage.py#L32-L62
yjzhang/uncurl_python
uncurl/lineage.py
poly_curve
def poly_curve(x, *a): """ Arbitrary dimension polynomial. """ output = 0.0 for n in range(0, len(a)): output += a[n]*x**n return output
python
def poly_curve(x, *a): """ Arbitrary dimension polynomial. """ output = 0.0 for n in range(0, len(a)): output += a[n]*x**n return output
[ "def", "poly_curve", "(", "x", ",", "*", "a", ")", ":", "output", "=", "0.0", "for", "n", "in", "range", "(", "0", ",", "len", "(", "a", ")", ")", ":", "output", "+=", "a", "[", "n", "]", "*", "x", "**", "n", "return", "output" ]
Arbitrary dimension polynomial.
[ "Arbitrary", "dimension", "polynomial", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/lineage.py#L65-L72
yjzhang/uncurl_python
uncurl/lineage.py
run_lineage
def run_lineage(means, weights, curve_function='poly', curve_dimensions=6): """ Lineage graph produced by minimum spanning tree Args: means (array): genes x clusters - output of state estimation weights (array): clusters x cells - output of state estimation curve_function (string): either 'poly' or 'fourier'. Default: 'poly' curve_dimensions (int): number of parameters for the curve. Default: 6 Returns: curve parameters: list of lists for each cluster smoothed data in 2d space: 2 x cells list of edges: pairs of cell indices cell cluster assignments: list of ints """ if curve_function=='poly': func = poly_curve elif curve_function=='fourier': func = fourier_series # step 1: dimensionality reduction X = dim_reduce(means, weights, 2) reduced_data = np.dot(X.T, weights) if X.shape[0]==2: reduced_data = np.dot(X, weights) # 2. identifying dominant cell types - max weight for each cell cells = weights.shape[1] clusters = means.shape[1] cell_cluster_assignments = weights.argmax(0) # 3. fit smooth curve over cell types -5th order fourier series # cluster_curves contains the parameters for each curve. cluster_curves = [] # cluster_fitted_vals is a 2 x cells array cluster_fitted_vals = reduced_data.copy() # cluster_edges contain a list of ordered pairs (indices) connecting cells # in each cluster. cluster_edges = [] for c in range(clusters): cluster_cells = reduced_data[:, cell_cluster_assignments==c] if len(cluster_cells) == 0: cluster_edges.append([]) continue if cluster_cells.shape[1] < 2: cluster_edges.append([]) continue elif cluster_cells.shape[1] < curve_dimensions: tc = cluster_cells.shape[1]-1 else: tc = curve_dimensions # y = f(x) if curve_function=='fourier': p0 = [1.0]*tc # scipy is bad at finding the correct scale p0[1] = 0.0001 bounds = (-np.inf, np.inf) else: p0 = [1.0]*tc bounds = (-np.inf, np.inf) p_x, pcov_x = curve_fit(func, cluster_cells[0,:], cluster_cells[1,:], p0=p0, bounds=bounds) perr_x = np.sum(np.sqrt(np.diag(pcov_x))) # x = f(y) p_y, pcov_y = curve_fit(func, cluster_cells[1,:], cluster_cells[0,:], p0=p0, bounds=bounds) perr_y = np.sum(np.sqrt(np.diag(pcov_y))) if perr_x <= perr_y: x_vals = reduced_data[0,:] cluster_curves.append(p_x) y_vals = np.array([func(x, *p_x) for x in x_vals]) #print 'error:', np.sum(np.sqrt((y_vals - reduced_data[1,:])**2)[cell_cluster_assignments==c]) fitted_vals = np.array([x_vals, y_vals]) cluster_fitted_vals[:,cell_cluster_assignments==c] = fitted_vals[:,cell_cluster_assignments==c] # sort points by increasing X, connect points x_indices = np.argsort(x_vals) x_indices = [x for x in x_indices if cell_cluster_assignments[x]==c] new_cluster_edges = [] for i, j in zip(x_indices[:-1], x_indices[1:]): new_cluster_edges.append((i,j)) cluster_edges.append(new_cluster_edges) else: y_vals = reduced_data[1,:] cluster_curves.append(p_y) x_vals = np.array([func(x, *p_y) for x in y_vals]) #print 'error:', np.sum(np.sqrt((x_vals - reduced_data[0,:])**2)[cell_cluster_assignments==c]) fitted_vals = np.array([x_vals, y_vals]) cluster_fitted_vals[:,cell_cluster_assignments==c] = fitted_vals[:,cell_cluster_assignments==c] # sort points by increasing Y, connect points y_indices = np.argsort(y_vals) y_indices = [x for x in y_indices if cell_cluster_assignments[x]==c] new_cluster_edges = [] for i,j in zip(y_indices[:-1], y_indices[1:]): new_cluster_edges.append((i,j)) cluster_edges.append(new_cluster_edges) # 4. connect each cluster together # for each cluster, find the closest point in another cluster, and connect # those points. Add that point to cluster_edges. # build a distance matrix between the reduced points... distances = squareform(pdist(cluster_fitted_vals.T)) for c1 in range(clusters): min_dist = np.inf min_index = None if sum(cell_cluster_assignments==c1)==0: continue for c2 in range(clusters): if sum(cell_cluster_assignments==c2)==0: continue if c1!=c2: distances_c = distances[cell_cluster_assignments==c1,:][:, cell_cluster_assignments==c2] mindex = np.unravel_index(distances_c.argmin(), distances_c.shape) if distances_c[mindex] < min_dist: min_dist = distances_c[mindex] min_index = np.where(distances==min_dist) min_index = (min_index[0][0], min_index[1][0]) cluster_edges[c1].append(min_index) # flatten cluster_edges? cluster_edges = [i for sublist in cluster_edges for i in sublist] return cluster_curves, cluster_fitted_vals, cluster_edges, cell_cluster_assignments
python
def run_lineage(means, weights, curve_function='poly', curve_dimensions=6): """ Lineage graph produced by minimum spanning tree Args: means (array): genes x clusters - output of state estimation weights (array): clusters x cells - output of state estimation curve_function (string): either 'poly' or 'fourier'. Default: 'poly' curve_dimensions (int): number of parameters for the curve. Default: 6 Returns: curve parameters: list of lists for each cluster smoothed data in 2d space: 2 x cells list of edges: pairs of cell indices cell cluster assignments: list of ints """ if curve_function=='poly': func = poly_curve elif curve_function=='fourier': func = fourier_series # step 1: dimensionality reduction X = dim_reduce(means, weights, 2) reduced_data = np.dot(X.T, weights) if X.shape[0]==2: reduced_data = np.dot(X, weights) # 2. identifying dominant cell types - max weight for each cell cells = weights.shape[1] clusters = means.shape[1] cell_cluster_assignments = weights.argmax(0) # 3. fit smooth curve over cell types -5th order fourier series # cluster_curves contains the parameters for each curve. cluster_curves = [] # cluster_fitted_vals is a 2 x cells array cluster_fitted_vals = reduced_data.copy() # cluster_edges contain a list of ordered pairs (indices) connecting cells # in each cluster. cluster_edges = [] for c in range(clusters): cluster_cells = reduced_data[:, cell_cluster_assignments==c] if len(cluster_cells) == 0: cluster_edges.append([]) continue if cluster_cells.shape[1] < 2: cluster_edges.append([]) continue elif cluster_cells.shape[1] < curve_dimensions: tc = cluster_cells.shape[1]-1 else: tc = curve_dimensions # y = f(x) if curve_function=='fourier': p0 = [1.0]*tc # scipy is bad at finding the correct scale p0[1] = 0.0001 bounds = (-np.inf, np.inf) else: p0 = [1.0]*tc bounds = (-np.inf, np.inf) p_x, pcov_x = curve_fit(func, cluster_cells[0,:], cluster_cells[1,:], p0=p0, bounds=bounds) perr_x = np.sum(np.sqrt(np.diag(pcov_x))) # x = f(y) p_y, pcov_y = curve_fit(func, cluster_cells[1,:], cluster_cells[0,:], p0=p0, bounds=bounds) perr_y = np.sum(np.sqrt(np.diag(pcov_y))) if perr_x <= perr_y: x_vals = reduced_data[0,:] cluster_curves.append(p_x) y_vals = np.array([func(x, *p_x) for x in x_vals]) #print 'error:', np.sum(np.sqrt((y_vals - reduced_data[1,:])**2)[cell_cluster_assignments==c]) fitted_vals = np.array([x_vals, y_vals]) cluster_fitted_vals[:,cell_cluster_assignments==c] = fitted_vals[:,cell_cluster_assignments==c] # sort points by increasing X, connect points x_indices = np.argsort(x_vals) x_indices = [x for x in x_indices if cell_cluster_assignments[x]==c] new_cluster_edges = [] for i, j in zip(x_indices[:-1], x_indices[1:]): new_cluster_edges.append((i,j)) cluster_edges.append(new_cluster_edges) else: y_vals = reduced_data[1,:] cluster_curves.append(p_y) x_vals = np.array([func(x, *p_y) for x in y_vals]) #print 'error:', np.sum(np.sqrt((x_vals - reduced_data[0,:])**2)[cell_cluster_assignments==c]) fitted_vals = np.array([x_vals, y_vals]) cluster_fitted_vals[:,cell_cluster_assignments==c] = fitted_vals[:,cell_cluster_assignments==c] # sort points by increasing Y, connect points y_indices = np.argsort(y_vals) y_indices = [x for x in y_indices if cell_cluster_assignments[x]==c] new_cluster_edges = [] for i,j in zip(y_indices[:-1], y_indices[1:]): new_cluster_edges.append((i,j)) cluster_edges.append(new_cluster_edges) # 4. connect each cluster together # for each cluster, find the closest point in another cluster, and connect # those points. Add that point to cluster_edges. # build a distance matrix between the reduced points... distances = squareform(pdist(cluster_fitted_vals.T)) for c1 in range(clusters): min_dist = np.inf min_index = None if sum(cell_cluster_assignments==c1)==0: continue for c2 in range(clusters): if sum(cell_cluster_assignments==c2)==0: continue if c1!=c2: distances_c = distances[cell_cluster_assignments==c1,:][:, cell_cluster_assignments==c2] mindex = np.unravel_index(distances_c.argmin(), distances_c.shape) if distances_c[mindex] < min_dist: min_dist = distances_c[mindex] min_index = np.where(distances==min_dist) min_index = (min_index[0][0], min_index[1][0]) cluster_edges[c1].append(min_index) # flatten cluster_edges? cluster_edges = [i for sublist in cluster_edges for i in sublist] return cluster_curves, cluster_fitted_vals, cluster_edges, cell_cluster_assignments
[ "def", "run_lineage", "(", "means", ",", "weights", ",", "curve_function", "=", "'poly'", ",", "curve_dimensions", "=", "6", ")", ":", "if", "curve_function", "==", "'poly'", ":", "func", "=", "poly_curve", "elif", "curve_function", "==", "'fourier'", ":", "...
Lineage graph produced by minimum spanning tree Args: means (array): genes x clusters - output of state estimation weights (array): clusters x cells - output of state estimation curve_function (string): either 'poly' or 'fourier'. Default: 'poly' curve_dimensions (int): number of parameters for the curve. Default: 6 Returns: curve parameters: list of lists for each cluster smoothed data in 2d space: 2 x cells list of edges: pairs of cell indices cell cluster assignments: list of ints
[ "Lineage", "graph", "produced", "by", "minimum", "spanning", "tree" ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/lineage.py#L75-L193
yjzhang/uncurl_python
uncurl/lineage.py
pseudotime
def pseudotime(starting_node, edges, fitted_vals): """ Args: starting_node (int): index of the starting node edges (list): list of tuples (node1, node2) fitted_vals (array): output of lineage (2 x cells) Returns: A 1d array containing the pseudotime value of each cell. """ # TODO # 1. calculate a distance matrix... distances = np.array([[sum((x - y)**2) for x in fitted_vals.T] for y in fitted_vals.T]) # 2. start from the root node/cell, calculate distance along graph distance_dict = graph_distances(starting_node, edges, distances) output = [] for i in range(fitted_vals.shape[1]): output.append(distance_dict[i]) return np.array(output)
python
def pseudotime(starting_node, edges, fitted_vals): """ Args: starting_node (int): index of the starting node edges (list): list of tuples (node1, node2) fitted_vals (array): output of lineage (2 x cells) Returns: A 1d array containing the pseudotime value of each cell. """ # TODO # 1. calculate a distance matrix... distances = np.array([[sum((x - y)**2) for x in fitted_vals.T] for y in fitted_vals.T]) # 2. start from the root node/cell, calculate distance along graph distance_dict = graph_distances(starting_node, edges, distances) output = [] for i in range(fitted_vals.shape[1]): output.append(distance_dict[i]) return np.array(output)
[ "def", "pseudotime", "(", "starting_node", ",", "edges", ",", "fitted_vals", ")", ":", "# TODO", "# 1. calculate a distance matrix...", "distances", "=", "np", ".", "array", "(", "[", "[", "sum", "(", "(", "x", "-", "y", ")", "**", "2", ")", "for", "x", ...
Args: starting_node (int): index of the starting node edges (list): list of tuples (node1, node2) fitted_vals (array): output of lineage (2 x cells) Returns: A 1d array containing the pseudotime value of each cell.
[ "Args", ":", "starting_node", "(", "int", ")", ":", "index", "of", "the", "starting", "node", "edges", "(", "list", ")", ":", "list", "of", "tuples", "(", "node1", "node2", ")", "fitted_vals", "(", "array", ")", ":", "output", "of", "lineage", "(", "...
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/lineage.py#L195-L213
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country._add_countriesdata
def _add_countriesdata(cls, iso3, country): # type: (str, hxl.Row) -> None """ Set up countries data from data in form provided by UNStats and World Bank Args: iso3 (str): ISO3 code for country country (hxl.Row): Country information Returns: None """ countryname = country.get('#country+name+preferred') cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3 iso2 = country.get('#country+code+v_iso2') if iso2: cls._countriesdata['iso2iso3'][iso2] = iso3 # different types so keys won't clash cls._countriesdata['iso2iso3'][iso3] = iso2 m49 = country.get('#country+code+num+v_m49') if m49: m49 = int(m49) cls._countriesdata['m49iso3'][m49] = iso3 # different types so keys won't clash cls._countriesdata['m49iso3'][iso3] = m49 cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE) regionname = country.get('#region+main+name+preferred') sub_regionname = country.get('#region+sub+name+preferred') intermediate_regionname = country.get('#region+intermediate+name+preferred') regionid = country.get('#region+main+code') if regionid: regionid = int(regionid) sub_regionid = country.get('#region+sub+code') if sub_regionid: sub_regionid = int(sub_regionid) intermediate_regionid = country.get('#region+intermediate+code') if intermediate_regionid: intermediate_regionid = int(intermediate_regionid) # region, subregion and intermediate region codes do not clash so only need one dict def add_country_to_set(colname, idval, iso3): value = cls._countriesdata[colname].get(idval) if value is None: value = set() cls._countriesdata['regioncodes2countries'][idval] = value value.add(iso3) if regionname: add_country_to_set('regioncodes2countries', regionid, iso3) cls._countriesdata['regioncodes2names'][regionid] = regionname cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid if sub_regionname: add_country_to_set('regioncodes2countries', sub_regionid, iso3) cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid if intermediate_regionname: add_country_to_set('regioncodes2countries', intermediate_regionid, iso3) cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \ intermediate_regionid
python
def _add_countriesdata(cls, iso3, country): # type: (str, hxl.Row) -> None """ Set up countries data from data in form provided by UNStats and World Bank Args: iso3 (str): ISO3 code for country country (hxl.Row): Country information Returns: None """ countryname = country.get('#country+name+preferred') cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3 iso2 = country.get('#country+code+v_iso2') if iso2: cls._countriesdata['iso2iso3'][iso2] = iso3 # different types so keys won't clash cls._countriesdata['iso2iso3'][iso3] = iso2 m49 = country.get('#country+code+num+v_m49') if m49: m49 = int(m49) cls._countriesdata['m49iso3'][m49] = iso3 # different types so keys won't clash cls._countriesdata['m49iso3'][iso3] = m49 cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE) regionname = country.get('#region+main+name+preferred') sub_regionname = country.get('#region+sub+name+preferred') intermediate_regionname = country.get('#region+intermediate+name+preferred') regionid = country.get('#region+main+code') if regionid: regionid = int(regionid) sub_regionid = country.get('#region+sub+code') if sub_regionid: sub_regionid = int(sub_regionid) intermediate_regionid = country.get('#region+intermediate+code') if intermediate_regionid: intermediate_regionid = int(intermediate_regionid) # region, subregion and intermediate region codes do not clash so only need one dict def add_country_to_set(colname, idval, iso3): value = cls._countriesdata[colname].get(idval) if value is None: value = set() cls._countriesdata['regioncodes2countries'][idval] = value value.add(iso3) if regionname: add_country_to_set('regioncodes2countries', regionid, iso3) cls._countriesdata['regioncodes2names'][regionid] = regionname cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid if sub_regionname: add_country_to_set('regioncodes2countries', sub_regionid, iso3) cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid if intermediate_regionname: add_country_to_set('regioncodes2countries', intermediate_regionid, iso3) cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \ intermediate_regionid
[ "def", "_add_countriesdata", "(", "cls", ",", "iso3", ",", "country", ")", ":", "# type: (str, hxl.Row) -> None", "countryname", "=", "country", ".", "get", "(", "'#country+name+preferred'", ")", "cls", ".", "_countriesdata", "[", "'countrynames2iso3'", "]", "[", ...
Set up countries data from data in form provided by UNStats and World Bank Args: iso3 (str): ISO3 code for country country (hxl.Row): Country information Returns: None
[ "Set", "up", "countries", "data", "from", "data", "in", "form", "provided", "by", "UNStats", "and", "World", "Bank" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L45-L104
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.set_countriesdata
def set_countriesdata(cls, countries): # type: (str) -> None """ Set up countries data from data in form provided by UNStats and World Bank Args: countries (str): Countries data in HTML format provided by UNStats Returns: None """ cls._countriesdata = dict() cls._countriesdata['countries'] = dict() cls._countriesdata['iso2iso3'] = dict() cls._countriesdata['m49iso3'] = dict() cls._countriesdata['countrynames2iso3'] = dict() cls._countriesdata['regioncodes2countries'] = dict() cls._countriesdata['regioncodes2names'] = dict() cls._countriesdata['regionnames2codes'] = dict() cls._countriesdata['aliases'] = dict() for country in countries: iso3 = country.get('#country+code+v_iso3') if not iso3: continue iso3 = iso3.upper() cls._add_countriesdata(iso3, country) cls._countriesdata['countries'][iso3] = country.dictionary def sort_list(colname): for idval in cls._countriesdata[colname]: cls._countriesdata[colname][idval] = \ sorted(list(cls._countriesdata[colname][idval])) sort_list('regioncodes2countries')
python
def set_countriesdata(cls, countries): # type: (str) -> None """ Set up countries data from data in form provided by UNStats and World Bank Args: countries (str): Countries data in HTML format provided by UNStats Returns: None """ cls._countriesdata = dict() cls._countriesdata['countries'] = dict() cls._countriesdata['iso2iso3'] = dict() cls._countriesdata['m49iso3'] = dict() cls._countriesdata['countrynames2iso3'] = dict() cls._countriesdata['regioncodes2countries'] = dict() cls._countriesdata['regioncodes2names'] = dict() cls._countriesdata['regionnames2codes'] = dict() cls._countriesdata['aliases'] = dict() for country in countries: iso3 = country.get('#country+code+v_iso3') if not iso3: continue iso3 = iso3.upper() cls._add_countriesdata(iso3, country) cls._countriesdata['countries'][iso3] = country.dictionary def sort_list(colname): for idval in cls._countriesdata[colname]: cls._countriesdata[colname][idval] = \ sorted(list(cls._countriesdata[colname][idval])) sort_list('regioncodes2countries')
[ "def", "set_countriesdata", "(", "cls", ",", "countries", ")", ":", "# type: (str) -> None", "cls", ".", "_countriesdata", "=", "dict", "(", ")", "cls", ".", "_countriesdata", "[", "'countries'", "]", "=", "dict", "(", ")", "cls", ".", "_countriesdata", "[",...
Set up countries data from data in form provided by UNStats and World Bank Args: countries (str): Countries data in HTML format provided by UNStats Returns: None
[ "Set", "up", "countries", "data", "from", "data", "in", "form", "provided", "by", "UNStats", "and", "World", "Bank" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L107-L141
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.countriesdata
def countriesdata(cls, use_live=True): # type: (bool) -> List[Dict[Dict]] """ Read countries data from OCHA countries feed (falling back to file) Args: use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. Returns: List[Dict[Dict]]: Countries dictionaries """ if cls._countriesdata is None: countries = None if use_live: try: countries = hxl.data(cls._ochaurl) except IOError: logger.exception('Download from OCHA feed failed! Falling back to stored file.') if countries is None: countries = hxl.data( script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv', Country), allow_local=True) cls.set_countriesdata(countries) return cls._countriesdata
python
def countriesdata(cls, use_live=True): # type: (bool) -> List[Dict[Dict]] """ Read countries data from OCHA countries feed (falling back to file) Args: use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. Returns: List[Dict[Dict]]: Countries dictionaries """ if cls._countriesdata is None: countries = None if use_live: try: countries = hxl.data(cls._ochaurl) except IOError: logger.exception('Download from OCHA feed failed! Falling back to stored file.') if countries is None: countries = hxl.data( script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv', Country), allow_local=True) cls.set_countriesdata(countries) return cls._countriesdata
[ "def", "countriesdata", "(", "cls", ",", "use_live", "=", "True", ")", ":", "# type: (bool) -> List[Dict[Dict]]", "if", "cls", ".", "_countriesdata", "is", "None", ":", "countries", "=", "None", "if", "use_live", ":", "try", ":", "countries", "=", "hxl", "."...
Read countries data from OCHA countries feed (falling back to file) Args: use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. Returns: List[Dict[Dict]]: Countries dictionaries
[ "Read", "countries", "data", "from", "OCHA", "countries", "feed", "(", "falling", "back", "to", "file", ")" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L144-L167
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.set_ocha_url
def set_ocha_url(cls, url=None): # type: (str) -> None """ Set World Bank url from which to retrieve countries data Args: url (str): World Bank url from which to retrieve countries data. Defaults to internal value. Returns: None """ if url is None: url = cls._ochaurl_int cls._ochaurl = url
python
def set_ocha_url(cls, url=None): # type: (str) -> None """ Set World Bank url from which to retrieve countries data Args: url (str): World Bank url from which to retrieve countries data. Defaults to internal value. Returns: None """ if url is None: url = cls._ochaurl_int cls._ochaurl = url
[ "def", "set_ocha_url", "(", "cls", ",", "url", "=", "None", ")", ":", "# type: (str) -> None", "if", "url", "is", "None", ":", "url", "=", "cls", ".", "_ochaurl_int", "cls", ".", "_ochaurl", "=", "url" ]
Set World Bank url from which to retrieve countries data Args: url (str): World Bank url from which to retrieve countries data. Defaults to internal value. Returns: None
[ "Set", "World", "Bank", "url", "from", "which", "to", "retrieve", "countries", "data" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L170-L183
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_country_info_from_iso3
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]] """Get country information from ISO3 code Args: iso3 (str): ISO3 code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str]]: country information """ countriesdata = cls.countriesdata(use_live=use_live) country = countriesdata['countries'].get(iso3.upper()) if country is not None: return country if exception is not None: raise exception return None
python
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]] """Get country information from ISO3 code Args: iso3 (str): ISO3 code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str]]: country information """ countriesdata = cls.countriesdata(use_live=use_live) country = countriesdata['countries'].get(iso3.upper()) if country is not None: return country if exception is not None: raise exception return None
[ "def", "get_country_info_from_iso3", "(", "cls", ",", "iso3", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]", "countriesdata", "=", "cls", ".", "countriesdata", "(", "use...
Get country information from ISO3 code Args: iso3 (str): ISO3 code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str]]: country information
[ "Get", "country", "information", "from", "ISO3", "code" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L186-L205
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_country_name_from_iso3
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get country name from ISO3 code Args: iso3 (str): ISO3 code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name """ countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception) if countryinfo is not None: return countryinfo.get('#country+name+preferred') return None
python
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get country name from ISO3 code Args: iso3 (str): ISO3 code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name """ countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception) if countryinfo is not None: return countryinfo.get('#country+name+preferred') return None
[ "def", "get_country_name_from_iso3", "(", "cls", ",", "iso3", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]", "countryinfo", "=", "cls", ".", "get_country_info_from_iso3", "(", ...
Get country name from ISO3 code Args: iso3 (str): ISO3 code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name
[ "Get", "country", "name", "from", "ISO3", "code" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L208-L223
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_iso2_from_iso3
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get ISO2 from ISO3 code Args: iso3 (str): ISO3 code for which to get ISO2 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: ISO2 code """ countriesdata = cls.countriesdata(use_live=use_live) iso2 = countriesdata['iso2iso3'].get(iso3.upper()) if iso2 is not None: return iso2 if exception is not None: raise exception return None
python
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get ISO2 from ISO3 code Args: iso3 (str): ISO3 code for which to get ISO2 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: ISO2 code """ countriesdata = cls.countriesdata(use_live=use_live) iso2 = countriesdata['iso2iso3'].get(iso3.upper()) if iso2 is not None: return iso2 if exception is not None: raise exception return None
[ "def", "get_iso2_from_iso3", "(", "cls", ",", "iso3", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]", "countriesdata", "=", "cls", ".", "countriesdata", "(", "use_live", "=",...
Get ISO2 from ISO3 code Args: iso3 (str): ISO3 code for which to get ISO2 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: ISO2 code
[ "Get", "ISO2", "from", "ISO3", "code" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L226-L245
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_country_info_from_iso2
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]] """Get country name from ISO2 code Args: iso2 (str): ISO2 code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str]]: Country information """ iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception) return None
python
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]] """Get country name from ISO2 code Args: iso2 (str): ISO2 code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str]]: Country information """ iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception) return None
[ "def", "get_country_info_from_iso2", "(", "cls", ",", "iso2", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]", "iso3", "=", "cls", ".", "get_iso3_from_iso2", "(", "iso2", ...
Get country name from ISO2 code Args: iso2 (str): ISO2 code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str]]: Country information
[ "Get", "country", "name", "from", "ISO2", "code" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L270-L285
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_country_name_from_iso2
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get country name from ISO2 code Args: iso2 (str): ISO2 code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name """ iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_name_from_iso3(iso3, exception=exception) return None
python
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get country name from ISO2 code Args: iso2 (str): ISO2 code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name """ iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_name_from_iso3(iso3, exception=exception) return None
[ "def", "get_country_name_from_iso2", "(", "cls", ",", "iso2", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]", "iso3", "=", "cls", ".", "get_iso3_from_iso2", "(", "iso2", ",",...
Get country name from ISO2 code Args: iso2 (str): ISO2 code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name
[ "Get", "country", "name", "from", "ISO2", "code" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L288-L303
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_m49_from_iso3
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int] """Get M49 from ISO3 code Args: iso3 (str): ISO3 code for which to get M49 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[int]: M49 code """ countriesdata = cls.countriesdata(use_live=use_live) m49 = countriesdata['m49iso3'].get(iso3) if m49 is not None: return m49 if exception is not None: raise exception return None
python
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int] """Get M49 from ISO3 code Args: iso3 (str): ISO3 code for which to get M49 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[int]: M49 code """ countriesdata = cls.countriesdata(use_live=use_live) m49 = countriesdata['m49iso3'].get(iso3) if m49 is not None: return m49 if exception is not None: raise exception return None
[ "def", "get_m49_from_iso3", "(", "cls", ",", "iso3", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]", "countriesdata", "=", "cls", ".", "countriesdata", "(", "use_live", "=", ...
Get M49 from ISO3 code Args: iso3 (str): ISO3 code for which to get M49 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[int]: M49 code
[ "Get", "M49", "from", "ISO3", "code" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L306-L325
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_country_info_from_m49
def get_country_info_from_m49(cls, m49, use_live=True, exception=None): # type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]] """Get country name from M49 code Args: m49 (int): M49 numeric code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str]]: Country information """ iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_info_from_iso3(iso3, exception=exception) return None
python
def get_country_info_from_m49(cls, m49, use_live=True, exception=None): # type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]] """Get country name from M49 code Args: m49 (int): M49 numeric code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str]]: Country information """ iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_info_from_iso3(iso3, exception=exception) return None
[ "def", "get_country_info_from_m49", "(", "cls", ",", "m49", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]", "iso3", "=", "cls", ".", "get_iso3_from_m49", "(", "m49", ",...
Get country name from M49 code Args: m49 (int): M49 numeric code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str]]: Country information
[ "Get", "country", "name", "from", "M49", "code" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L350-L365
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_country_name_from_m49
def get_country_name_from_m49(cls, m49, use_live=True, exception=None): # type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get country name from M49 code Args: m49 (int): M49 numeric code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name """ iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_name_from_iso3(iso3, exception=exception) return None
python
def get_country_name_from_m49(cls, m49, use_live=True, exception=None): # type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get country name from M49 code Args: m49 (int): M49 numeric code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name """ iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_name_from_iso3(iso3, exception=exception) return None
[ "def", "get_country_name_from_m49", "(", "cls", ",", "m49", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]", "iso3", "=", "cls", ".", "get_iso3_from_m49", "(", "m49", ",", "...
Get country name from M49 code Args: m49 (int): M49 numeric code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name
[ "Get", "country", "name", "from", "M49", "code" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L368-L383
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.expand_countryname_abbrevs
def expand_countryname_abbrevs(cls, country): # type: (str) -> List[str] """Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.) Args: country (str): Country with abbreviation(s)to expand Returns: List[str]: Uppercase country name with abbreviation(s) expanded in various ways """ def replace_ensure_space(word, replace, replacement): return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip() countryupper = country.upper() for abbreviation in cls.abbreviations: countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation]) candidates = [countryupper] for abbreviation in cls.multiple_abbreviations: if abbreviation in countryupper: for expanded in cls.multiple_abbreviations[abbreviation]: candidates.append(replace_ensure_space(countryupper, abbreviation, expanded)) return candidates
python
def expand_countryname_abbrevs(cls, country): # type: (str) -> List[str] """Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.) Args: country (str): Country with abbreviation(s)to expand Returns: List[str]: Uppercase country name with abbreviation(s) expanded in various ways """ def replace_ensure_space(word, replace, replacement): return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip() countryupper = country.upper() for abbreviation in cls.abbreviations: countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation]) candidates = [countryupper] for abbreviation in cls.multiple_abbreviations: if abbreviation in countryupper: for expanded in cls.multiple_abbreviations[abbreviation]: candidates.append(replace_ensure_space(countryupper, abbreviation, expanded)) return candidates
[ "def", "expand_countryname_abbrevs", "(", "cls", ",", "country", ")", ":", "# type: (str) -> List[str]", "def", "replace_ensure_space", "(", "word", ",", "replace", ",", "replacement", ")", ":", "return", "word", ".", "replace", "(", "replace", ",", "'%s '", "%"...
Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.) Args: country (str): Country with abbreviation(s)to expand Returns: List[str]: Uppercase country name with abbreviation(s) expanded in various ways
[ "Expands", "abbreviation", "(", "s", ")", "in", "country", "name", "in", "various", "ways", "(", "eg", ".", "FED", "-", ">", "FEDERATED", "FEDERAL", "etc", ".", ")" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L386-L406
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.simplify_countryname
def simplify_countryname(cls, country): # type: (str) -> (str, List[str]) """Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc. Args: country (str): Country name to simplify Returns: Tuple[str, List[str]]: Uppercase simplified country name and list of removed words """ countryupper = country.upper() words = get_words_in_sentence(countryupper) index = countryupper.find(',') if index != -1: countryupper = countryupper[:index] index = countryupper.find(':') if index != -1: countryupper = countryupper[:index] regex = re.compile('\(.+?\)') countryupper = regex.sub('', countryupper) remove = copy.deepcopy(cls.simplifications) for simplification1, simplification2 in cls.abbreviations.items(): countryupper = countryupper.replace(simplification1, '') remove.append(simplification2) for simplification1, simplifications in cls.multiple_abbreviations.items(): countryupper = countryupper.replace(simplification1, '') for simplification2 in simplifications: remove.append(simplification2) remove = '|'.join(remove) regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE) countryupper = regex.sub('', countryupper) countryupper = countryupper.strip() countryupper_words = get_words_in_sentence(countryupper) if len(countryupper_words) > 1: countryupper = countryupper_words[0] if countryupper: words.remove(countryupper) return countryupper, words
python
def simplify_countryname(cls, country): # type: (str) -> (str, List[str]) """Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc. Args: country (str): Country name to simplify Returns: Tuple[str, List[str]]: Uppercase simplified country name and list of removed words """ countryupper = country.upper() words = get_words_in_sentence(countryupper) index = countryupper.find(',') if index != -1: countryupper = countryupper[:index] index = countryupper.find(':') if index != -1: countryupper = countryupper[:index] regex = re.compile('\(.+?\)') countryupper = regex.sub('', countryupper) remove = copy.deepcopy(cls.simplifications) for simplification1, simplification2 in cls.abbreviations.items(): countryupper = countryupper.replace(simplification1, '') remove.append(simplification2) for simplification1, simplifications in cls.multiple_abbreviations.items(): countryupper = countryupper.replace(simplification1, '') for simplification2 in simplifications: remove.append(simplification2) remove = '|'.join(remove) regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE) countryupper = regex.sub('', countryupper) countryupper = countryupper.strip() countryupper_words = get_words_in_sentence(countryupper) if len(countryupper_words) > 1: countryupper = countryupper_words[0] if countryupper: words.remove(countryupper) return countryupper, words
[ "def", "simplify_countryname", "(", "cls", ",", "country", ")", ":", "# type: (str) -> (str, List[str])", "countryupper", "=", "country", ".", "upper", "(", ")", "words", "=", "get_words_in_sentence", "(", "countryupper", ")", "index", "=", "countryupper", ".", "f...
Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc. Args: country (str): Country name to simplify Returns: Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
[ "Simplifies", "country", "name", "by", "removing", "descriptive", "text", "eg", ".", "DEMOCRATIC", "REPUBLIC", "OF", "etc", "." ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L409-L446
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_iso3_country_code
def get_iso3_country_code(cls, country, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get ISO3 code for cls. Only exact matches or None are returned. Args: country (str): Country for which to get ISO3 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: ISO3 country code or None """ countriesdata = cls.countriesdata(use_live=use_live) countryupper = country.upper() len_countryupper = len(countryupper) if len_countryupper == 3: if countryupper in countriesdata['countries']: return countryupper elif len_countryupper == 2: iso3 = countriesdata['iso2iso3'].get(countryupper) if iso3 is not None: return iso3 iso3 = countriesdata['countrynames2iso3'].get(countryupper) if iso3 is not None: return iso3 for candidate in cls.expand_countryname_abbrevs(countryupper): iso3 = countriesdata['countrynames2iso3'].get(candidate) if iso3 is not None: return iso3 if exception is not None: raise exception return None
python
def get_iso3_country_code(cls, country, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get ISO3 code for cls. Only exact matches or None are returned. Args: country (str): Country for which to get ISO3 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: ISO3 country code or None """ countriesdata = cls.countriesdata(use_live=use_live) countryupper = country.upper() len_countryupper = len(countryupper) if len_countryupper == 3: if countryupper in countriesdata['countries']: return countryupper elif len_countryupper == 2: iso3 = countriesdata['iso2iso3'].get(countryupper) if iso3 is not None: return iso3 iso3 = countriesdata['countrynames2iso3'].get(countryupper) if iso3 is not None: return iso3 for candidate in cls.expand_countryname_abbrevs(countryupper): iso3 = countriesdata['countrynames2iso3'].get(candidate) if iso3 is not None: return iso3 if exception is not None: raise exception return None
[ "def", "get_iso3_country_code", "(", "cls", ",", "country", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]", "countriesdata", "=", "cls", ".", "countriesdata", "(", "use_live", ...
Get ISO3 code for cls. Only exact matches or None are returned. Args: country (str): Country for which to get ISO3 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: ISO3 country code or None
[ "Get", "ISO3", "code", "for", "cls", ".", "Only", "exact", "matches", "or", "None", "are", "returned", "." ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L449-L483
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_iso3_country_code_fuzzy
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]] """Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second showing if the match is exact or not. Args: country (str): Country for which to get ISO3 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False). """ countriesdata = cls.countriesdata(use_live=use_live) iso3 = cls.get_iso3_country_code(country, use_live=use_live) # don't put exception param here as we don't want it to throw if iso3 is not None: return iso3, True def remove_matching_from_list(wordlist, word_or_part): for word in wordlist: if word_or_part in word: wordlist.remove(word) # fuzzy matching expanded_country_candidates = cls.expand_countryname_abbrevs(country) match_strength = 0 matches = set() for countryname in sorted(countriesdata['countrynames2iso3']): for candidate in expanded_country_candidates: simplified_country, removed_words = cls.simplify_countryname(candidate) if simplified_country in countryname: words = get_words_in_sentence(countryname) new_match_strength = 0 if simplified_country: remove_matching_from_list(words, simplified_country) new_match_strength += 32 for word in removed_words: if word in countryname: remove_matching_from_list(words, word) new_match_strength += 4 else: if word in cls.major_differentiators: new_match_strength -= 16 else: new_match_strength -= 1 for word in words: if word in cls.major_differentiators: new_match_strength -= 16 else: new_match_strength -= 1 iso3 = countriesdata['countrynames2iso3'][countryname] if new_match_strength > match_strength: match_strength = new_match_strength matches = set() if new_match_strength == match_strength: matches.add(iso3) if len(matches) == 1 and match_strength > 16: return matches.pop(), False # regex lookup for iso3, regex in countriesdata['aliases'].items(): index = re.search(regex, country.upper()) if index is not None: return iso3, False if exception is not None: raise exception return None, False
python
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]] """Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second showing if the match is exact or not. Args: country (str): Country for which to get ISO3 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False). """ countriesdata = cls.countriesdata(use_live=use_live) iso3 = cls.get_iso3_country_code(country, use_live=use_live) # don't put exception param here as we don't want it to throw if iso3 is not None: return iso3, True def remove_matching_from_list(wordlist, word_or_part): for word in wordlist: if word_or_part in word: wordlist.remove(word) # fuzzy matching expanded_country_candidates = cls.expand_countryname_abbrevs(country) match_strength = 0 matches = set() for countryname in sorted(countriesdata['countrynames2iso3']): for candidate in expanded_country_candidates: simplified_country, removed_words = cls.simplify_countryname(candidate) if simplified_country in countryname: words = get_words_in_sentence(countryname) new_match_strength = 0 if simplified_country: remove_matching_from_list(words, simplified_country) new_match_strength += 32 for word in removed_words: if word in countryname: remove_matching_from_list(words, word) new_match_strength += 4 else: if word in cls.major_differentiators: new_match_strength -= 16 else: new_match_strength -= 1 for word in words: if word in cls.major_differentiators: new_match_strength -= 16 else: new_match_strength -= 1 iso3 = countriesdata['countrynames2iso3'][countryname] if new_match_strength > match_strength: match_strength = new_match_strength matches = set() if new_match_strength == match_strength: matches.add(iso3) if len(matches) == 1 and match_strength > 16: return matches.pop(), False # regex lookup for iso3, regex in countriesdata['aliases'].items(): index = re.search(regex, country.upper()) if index is not None: return iso3, False if exception is not None: raise exception return None, False
[ "def", "get_iso3_country_code_fuzzy", "(", "cls", ",", "country", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]", "countriesdata", "=", "cls", ".", "countriesdata", ...
Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second showing if the match is exact or not. Args: country (str): Country for which to get ISO3 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
[ "Get", "ISO3", "code", "for", "cls", ".", "A", "tuple", "is", "returned", "with", "the", "first", "value", "being", "the", "ISO3", "code", "and", "the", "second", "showing", "if", "the", "match", "is", "exact", "or", "not", "." ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L486-L556
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
Country.get_countries_in_region
def get_countries_in_region(cls, region, use_live=True, exception=None): # type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str] """Get countries (ISO3 codes) in region Args: region (Union[int,str]): Three digit UNStats M49 region code or region name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None. Returns: List(str): Sorted list of ISO3 country names """ countriesdata = cls.countriesdata(use_live=use_live) if isinstance(region, int): regioncode = region else: regionupper = region.upper() regioncode = countriesdata['regionnames2codes'].get(regionupper) if regioncode is not None: return countriesdata['regioncodes2countries'][regioncode] if exception is not None: raise exception return list()
python
def get_countries_in_region(cls, region, use_live=True, exception=None): # type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str] """Get countries (ISO3 codes) in region Args: region (Union[int,str]): Three digit UNStats M49 region code or region name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None. Returns: List(str): Sorted list of ISO3 country names """ countriesdata = cls.countriesdata(use_live=use_live) if isinstance(region, int): regioncode = region else: regionupper = region.upper() regioncode = countriesdata['regionnames2codes'].get(regionupper) if regioncode is not None: return countriesdata['regioncodes2countries'][regioncode] if exception is not None: raise exception return list()
[ "def", "get_countries_in_region", "(", "cls", ",", "region", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]", "countriesdata", "=", "cls", ".", "countriesdata", "(", "use...
Get countries (ISO3 codes) in region Args: region (Union[int,str]): Three digit UNStats M49 region code or region name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None. Returns: List(str): Sorted list of ISO3 country names
[ "Get", "countries", "(", "ISO3", "codes", ")", "in", "region" ]
train
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L559-L583
moonso/loqusdb
loqusdb/commands/load_profile.py
load_profile
def load_profile(ctx, variant_file, update, stats, profile_threshold): """ Command for profiling of samples. User may upload variants used in profiling from a vcf, update the profiles for all samples, and get some stats from the profiles in the database. Profiling is used to monitor duplicates in the database. The profile is based on the variants in the 'profile_variant' collection, assessing the genotypes for each sample at the position of these variants. """ adapter = ctx.obj['adapter'] if variant_file: load_profile_variants(adapter, variant_file) if update: update_profiles(adapter) if stats: distance_dict = profile_stats(adapter, threshold=profile_threshold) click.echo(table_from_dict(distance_dict))
python
def load_profile(ctx, variant_file, update, stats, profile_threshold): """ Command for profiling of samples. User may upload variants used in profiling from a vcf, update the profiles for all samples, and get some stats from the profiles in the database. Profiling is used to monitor duplicates in the database. The profile is based on the variants in the 'profile_variant' collection, assessing the genotypes for each sample at the position of these variants. """ adapter = ctx.obj['adapter'] if variant_file: load_profile_variants(adapter, variant_file) if update: update_profiles(adapter) if stats: distance_dict = profile_stats(adapter, threshold=profile_threshold) click.echo(table_from_dict(distance_dict))
[ "def", "load_profile", "(", "ctx", ",", "variant_file", ",", "update", ",", "stats", ",", "profile_threshold", ")", ":", "adapter", "=", "ctx", ".", "obj", "[", "'adapter'", "]", "if", "variant_file", ":", "load_profile_variants", "(", "adapter", ",", "varia...
Command for profiling of samples. User may upload variants used in profiling from a vcf, update the profiles for all samples, and get some stats from the profiles in the database. Profiling is used to monitor duplicates in the database. The profile is based on the variants in the 'profile_variant' collection, assessing the genotypes for each sample at the position of these variants.
[ "Command", "for", "profiling", "of", "samples", ".", "User", "may", "upload", "variants", "used", "in", "profiling", "from", "a", "vcf", "update", "the", "profiles", "for", "all", "samples", "and", "get", "some", "stats", "from", "the", "profiles", "in", "...
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/load_profile.py#L36-L59
moonso/loqusdb
loqusdb/plugins/mongo/profile_variant.py
ProfileVariantMixin.add_profile_variants
def add_profile_variants(self, profile_variants): """Add several variants to the profile_variant collection in the database Args: profile_variants(list(models.ProfileVariant)) """ results = self.db.profile_variant.insert_many(profile_variants) return results
python
def add_profile_variants(self, profile_variants): """Add several variants to the profile_variant collection in the database Args: profile_variants(list(models.ProfileVariant)) """ results = self.db.profile_variant.insert_many(profile_variants) return results
[ "def", "add_profile_variants", "(", "self", ",", "profile_variants", ")", ":", "results", "=", "self", ".", "db", ".", "profile_variant", ".", "insert_many", "(", "profile_variants", ")", "return", "results" ]
Add several variants to the profile_variant collection in the database Args: profile_variants(list(models.ProfileVariant))
[ "Add", "several", "variants", "to", "the", "profile_variant", "collection", "in", "the", "database" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/profile_variant.py#L7-L20
yjzhang/uncurl_python
uncurl/zip_clustering.py
zip_fit_params
def zip_fit_params(data): """ Returns the ZIP parameters that best fit a given data set. Args: data (array): 2d array of genes x cells belonging to a given cluster Returns: L (array): 1d array of means M (array): 1d array of zero-inflation parameter """ genes, cells = data.shape m = data.mean(1) v = data.var(1) M = (v-m)/(m**2+v-m) #M = v/(v+m**2) #M[np.isnan(M)] = 0.0 M = np.array([min(1.0, max(0.0, x)) for x in M]) L = m + v/m - 1.0 #L = (v + m**2)/m L[np.isnan(L)] = 0.0 L = np.array([max(0.0, x) for x in L]) return L, M
python
def zip_fit_params(data): """ Returns the ZIP parameters that best fit a given data set. Args: data (array): 2d array of genes x cells belonging to a given cluster Returns: L (array): 1d array of means M (array): 1d array of zero-inflation parameter """ genes, cells = data.shape m = data.mean(1) v = data.var(1) M = (v-m)/(m**2+v-m) #M = v/(v+m**2) #M[np.isnan(M)] = 0.0 M = np.array([min(1.0, max(0.0, x)) for x in M]) L = m + v/m - 1.0 #L = (v + m**2)/m L[np.isnan(L)] = 0.0 L = np.array([max(0.0, x) for x in L]) return L, M
[ "def", "zip_fit_params", "(", "data", ")", ":", "genes", ",", "cells", "=", "data", ".", "shape", "m", "=", "data", ".", "mean", "(", "1", ")", "v", "=", "data", ".", "var", "(", "1", ")", "M", "=", "(", "v", "-", "m", ")", "/", "(", "m", ...
Returns the ZIP parameters that best fit a given data set. Args: data (array): 2d array of genes x cells belonging to a given cluster Returns: L (array): 1d array of means M (array): 1d array of zero-inflation parameter
[ "Returns", "the", "ZIP", "parameters", "that", "best", "fit", "a", "given", "data", "set", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_clustering.py#L11-L33
yjzhang/uncurl_python
uncurl/zip_clustering.py
zip_cluster
def zip_cluster(data, k, init=None, max_iters=100): """ Performs hard EM clustering using the zero-inflated Poisson distribution. Args: data (array): A 2d array- genes x cells k (int): Number of clusters init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++ max_iters (int, optional): Maximum number of iterations. Default: 100 Returns: assignments (array): integer assignments of cells to clusters (length cells) L (array): Poisson parameter (genes x k) M (array): zero-inflation parameter (genes x k) """ genes, cells = data.shape init, new_assignments = kmeans_pp(data+eps, k, centers=init) centers = np.copy(init) M = np.zeros(centers.shape) assignments = new_assignments for c in range(k): centers[:,c], M[:,c] = zip_fit_params_mle(data[:, assignments==c]) for it in range(max_iters): lls = zip_ll(data, centers, M) new_assignments = np.argmax(lls, 1) if np.equal(assignments, new_assignments).all(): return assignments, centers, M for c in range(k): centers[:,c], M[:,c] = zip_fit_params_mle(data[:, assignments==c]) assignments = new_assignments return assignments, centers, M
python
def zip_cluster(data, k, init=None, max_iters=100): """ Performs hard EM clustering using the zero-inflated Poisson distribution. Args: data (array): A 2d array- genes x cells k (int): Number of clusters init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++ max_iters (int, optional): Maximum number of iterations. Default: 100 Returns: assignments (array): integer assignments of cells to clusters (length cells) L (array): Poisson parameter (genes x k) M (array): zero-inflation parameter (genes x k) """ genes, cells = data.shape init, new_assignments = kmeans_pp(data+eps, k, centers=init) centers = np.copy(init) M = np.zeros(centers.shape) assignments = new_assignments for c in range(k): centers[:,c], M[:,c] = zip_fit_params_mle(data[:, assignments==c]) for it in range(max_iters): lls = zip_ll(data, centers, M) new_assignments = np.argmax(lls, 1) if np.equal(assignments, new_assignments).all(): return assignments, centers, M for c in range(k): centers[:,c], M[:,c] = zip_fit_params_mle(data[:, assignments==c]) assignments = new_assignments return assignments, centers, M
[ "def", "zip_cluster", "(", "data", ",", "k", ",", "init", "=", "None", ",", "max_iters", "=", "100", ")", ":", "genes", ",", "cells", "=", "data", ".", "shape", "init", ",", "new_assignments", "=", "kmeans_pp", "(", "data", "+", "eps", ",", "k", ",...
Performs hard EM clustering using the zero-inflated Poisson distribution. Args: data (array): A 2d array- genes x cells k (int): Number of clusters init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++ max_iters (int, optional): Maximum number of iterations. Default: 100 Returns: assignments (array): integer assignments of cells to clusters (length cells) L (array): Poisson parameter (genes x k) M (array): zero-inflation parameter (genes x k)
[ "Performs", "hard", "EM", "clustering", "using", "the", "zero", "-", "inflated", "Poisson", "distribution", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_clustering.py#L46-L76
yjzhang/uncurl_python
uncurl/dimensionality_reduction.py
diffusion_mds
def diffusion_mds(means, weights, d, diffusion_rounds=10): """ Dimensionality reduction using MDS, while running diffusion on W. Args: means (array): genes x clusters weights (array): clusters x cells d (int): desired dimensionality Returns: W_reduced (array): array of shape (d, cells) """ for i in range(diffusion_rounds): weights = weights*weights weights = weights/weights.sum(0) X = dim_reduce(means, weights, d) if X.shape[0]==2: return X.dot(weights) else: return X.T.dot(weights)
python
def diffusion_mds(means, weights, d, diffusion_rounds=10): """ Dimensionality reduction using MDS, while running diffusion on W. Args: means (array): genes x clusters weights (array): clusters x cells d (int): desired dimensionality Returns: W_reduced (array): array of shape (d, cells) """ for i in range(diffusion_rounds): weights = weights*weights weights = weights/weights.sum(0) X = dim_reduce(means, weights, d) if X.shape[0]==2: return X.dot(weights) else: return X.T.dot(weights)
[ "def", "diffusion_mds", "(", "means", ",", "weights", ",", "d", ",", "diffusion_rounds", "=", "10", ")", ":", "for", "i", "in", "range", "(", "diffusion_rounds", ")", ":", "weights", "=", "weights", "*", "weights", "weights", "=", "weights", "/", "weight...
Dimensionality reduction using MDS, while running diffusion on W. Args: means (array): genes x clusters weights (array): clusters x cells d (int): desired dimensionality Returns: W_reduced (array): array of shape (d, cells)
[ "Dimensionality", "reduction", "using", "MDS", "while", "running", "diffusion", "on", "W", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/dimensionality_reduction.py#L9-L28
yjzhang/uncurl_python
uncurl/dimensionality_reduction.py
mds
def mds(means, weights, d): """ Dimensionality reduction using MDS. Args: means (array): genes x clusters weights (array): clusters x cells d (int): desired dimensionality Returns: W_reduced (array): array of shape (d, cells) """ X = dim_reduce(means, weights, d) if X.shape[0]==2: return X.dot(weights) else: return X.T.dot(weights)
python
def mds(means, weights, d): """ Dimensionality reduction using MDS. Args: means (array): genes x clusters weights (array): clusters x cells d (int): desired dimensionality Returns: W_reduced (array): array of shape (d, cells) """ X = dim_reduce(means, weights, d) if X.shape[0]==2: return X.dot(weights) else: return X.T.dot(weights)
[ "def", "mds", "(", "means", ",", "weights", ",", "d", ")", ":", "X", "=", "dim_reduce", "(", "means", ",", "weights", ",", "d", ")", "if", "X", ".", "shape", "[", "0", "]", "==", "2", ":", "return", "X", ".", "dot", "(", "weights", ")", "else...
Dimensionality reduction using MDS. Args: means (array): genes x clusters weights (array): clusters x cells d (int): desired dimensionality Returns: W_reduced (array): array of shape (d, cells)
[ "Dimensionality", "reduction", "using", "MDS", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/dimensionality_reduction.py#L31-L47
yjzhang/uncurl_python
uncurl/dimensionality_reduction.py
dim_reduce_data
def dim_reduce_data(data, d): """ Does a MDS on the data directly, not on the means. Args: data (array): genes x cells d (int): desired dimensionality Returns: X, a cells x d matrix """ genes, cells = data.shape distances = np.zeros((cells, cells)) for i in range(cells): for j in range(cells): distances[i,j] = poisson_dist(data[:,i], data[:,j]) # do MDS on the distance matrix (procedure from Wikipedia) proximity = distances**2 J = np.eye(cells) - 1./cells B = -0.5*np.dot(J, np.dot(proximity, J)) # B should be symmetric, so we can use eigh e_val, e_vec = np.linalg.eigh(B) # Note: lam should be ordered to be the largest eigenvalues lam = np.diag(e_val[-d:])[::-1] #lam = max_or_zero(lam) E = e_vec[:,-d:][::-1] X = np.dot(E, lam**0.5) return X
python
def dim_reduce_data(data, d): """ Does a MDS on the data directly, not on the means. Args: data (array): genes x cells d (int): desired dimensionality Returns: X, a cells x d matrix """ genes, cells = data.shape distances = np.zeros((cells, cells)) for i in range(cells): for j in range(cells): distances[i,j] = poisson_dist(data[:,i], data[:,j]) # do MDS on the distance matrix (procedure from Wikipedia) proximity = distances**2 J = np.eye(cells) - 1./cells B = -0.5*np.dot(J, np.dot(proximity, J)) # B should be symmetric, so we can use eigh e_val, e_vec = np.linalg.eigh(B) # Note: lam should be ordered to be the largest eigenvalues lam = np.diag(e_val[-d:])[::-1] #lam = max_or_zero(lam) E = e_vec[:,-d:][::-1] X = np.dot(E, lam**0.5) return X
[ "def", "dim_reduce_data", "(", "data", ",", "d", ")", ":", "genes", ",", "cells", "=", "data", ".", "shape", "distances", "=", "np", ".", "zeros", "(", "(", "cells", ",", "cells", ")", ")", "for", "i", "in", "range", "(", "cells", ")", ":", "for"...
Does a MDS on the data directly, not on the means. Args: data (array): genes x cells d (int): desired dimensionality Returns: X, a cells x d matrix
[ "Does", "a", "MDS", "on", "the", "data", "directly", "not", "on", "the", "means", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/dimensionality_reduction.py#L64-L91
moonso/loqusdb
loqusdb/plugins/mongo/case.py
CaseMixin.case
def case(self, case): """Get a case from the database Search the cases with the case id Args: case (dict): A case dictionary Returns: mongo_case (dict): A mongo case dictionary """ LOG.debug("Getting case {0} from database".format(case.get('case_id'))) case_id = case['case_id'] return self.db.case.find_one({'case_id': case_id})
python
def case(self, case): """Get a case from the database Search the cases with the case id Args: case (dict): A case dictionary Returns: mongo_case (dict): A mongo case dictionary """ LOG.debug("Getting case {0} from database".format(case.get('case_id'))) case_id = case['case_id'] return self.db.case.find_one({'case_id': case_id})
[ "def", "case", "(", "self", ",", "case", ")", ":", "LOG", ".", "debug", "(", "\"Getting case {0} from database\"", ".", "format", "(", "case", ".", "get", "(", "'case_id'", ")", ")", ")", "case_id", "=", "case", "[", "'case_id'", "]", "return", "self", ...
Get a case from the database Search the cases with the case id Args: case (dict): A case dictionary Returns: mongo_case (dict): A mongo case dictionary
[ "Get", "a", "case", "from", "the", "database" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/case.py#L11-L24
moonso/loqusdb
loqusdb/plugins/mongo/case.py
CaseMixin.nr_cases
def nr_cases(self, snv_cases=None, sv_cases=None): """Return the number of cases in the database Args: snv_cases(bool): If only snv cases should be searched sv_cases(bool): If only snv cases should be searched Returns: cases (Iterable(Case)): A iterable with mongo cases """ query = {} if snv_cases: query = {'vcf_path': {'$exists':True}} if sv_cases: query = {'vcf_sv_path': {'$exists':True}} if snv_cases and sv_cases: query = None return self.db.case.count_documents(query)
python
def nr_cases(self, snv_cases=None, sv_cases=None): """Return the number of cases in the database Args: snv_cases(bool): If only snv cases should be searched sv_cases(bool): If only snv cases should be searched Returns: cases (Iterable(Case)): A iterable with mongo cases """ query = {} if snv_cases: query = {'vcf_path': {'$exists':True}} if sv_cases: query = {'vcf_sv_path': {'$exists':True}} if snv_cases and sv_cases: query = None return self.db.case.count_documents(query)
[ "def", "nr_cases", "(", "self", ",", "snv_cases", "=", "None", ",", "sv_cases", "=", "None", ")", ":", "query", "=", "{", "}", "if", "snv_cases", ":", "query", "=", "{", "'vcf_path'", ":", "{", "'$exists'", ":", "True", "}", "}", "if", "sv_cases", ...
Return the number of cases in the database Args: snv_cases(bool): If only snv cases should be searched sv_cases(bool): If only snv cases should be searched Returns: cases (Iterable(Case)): A iterable with mongo cases
[ "Return", "the", "number", "of", "cases", "in", "the", "database" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/case.py#L35-L54
moonso/loqusdb
loqusdb/plugins/mongo/case.py
CaseMixin.add_case
def add_case(self, case, update=False): """Add a case to the case collection If the case exists and update is False raise error. Args: db (MongoClient): A connection to the mongodb case (dict): A case dictionary update(bool): If existing case should be updated Returns: mongo_case_id(ObjectId) """ existing_case = self.case(case) if existing_case and not update: raise CaseError("Case {} already exists".format(case['case_id'])) if existing_case: self.db.case.find_one_and_replace( {'case_id': case['case_id']}, case, ) else: self.db.case.insert_one(case) return case
python
def add_case(self, case, update=False): """Add a case to the case collection If the case exists and update is False raise error. Args: db (MongoClient): A connection to the mongodb case (dict): A case dictionary update(bool): If existing case should be updated Returns: mongo_case_id(ObjectId) """ existing_case = self.case(case) if existing_case and not update: raise CaseError("Case {} already exists".format(case['case_id'])) if existing_case: self.db.case.find_one_and_replace( {'case_id': case['case_id']}, case, ) else: self.db.case.insert_one(case) return case
[ "def", "add_case", "(", "self", ",", "case", ",", "update", "=", "False", ")", ":", "existing_case", "=", "self", ".", "case", "(", "case", ")", "if", "existing_case", "and", "not", "update", ":", "raise", "CaseError", "(", "\"Case {} already exists\"", "....
Add a case to the case collection If the case exists and update is False raise error. Args: db (MongoClient): A connection to the mongodb case (dict): A case dictionary update(bool): If existing case should be updated Returns: mongo_case_id(ObjectId)
[ "Add", "a", "case", "to", "the", "case", "collection" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/case.py#L57-L82
moonso/loqusdb
loqusdb/plugins/mongo/case.py
CaseMixin.delete_case
def delete_case(self, case): """Delete case from the database Delete a case from the database Args: case (dict): A case dictionary """ mongo_case = self.case(case) if not mongo_case: raise CaseError("Tried to delete case {0} but could not find case".format( case.get('case_id') )) LOG.info("Removing case {0} from database".format( mongo_case.get('case_id') )) self.db.case.delete_one({'_id': mongo_case['_id']}) return
python
def delete_case(self, case): """Delete case from the database Delete a case from the database Args: case (dict): A case dictionary """ mongo_case = self.case(case) if not mongo_case: raise CaseError("Tried to delete case {0} but could not find case".format( case.get('case_id') )) LOG.info("Removing case {0} from database".format( mongo_case.get('case_id') )) self.db.case.delete_one({'_id': mongo_case['_id']}) return
[ "def", "delete_case", "(", "self", ",", "case", ")", ":", "mongo_case", "=", "self", ".", "case", "(", "case", ")", "if", "not", "mongo_case", ":", "raise", "CaseError", "(", "\"Tried to delete case {0} but could not find case\"", ".", "format", "(", "case", "...
Delete case from the database Delete a case from the database Args: case (dict): A case dictionary
[ "Delete", "case", "from", "the", "database" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/case.py#L84-L104
MainRo/cyclotron-py
cyclotron/rx.py
make_sink_proxies
def make_sink_proxies(drivers): ''' Build a list of sink proxies. sink proxies are a two-level ordered dictionary. The first level contains the lst of drivers, and the second level contains the list of sink proxies for each driver: drv1-->sink1 | |->sink2 | drv2-->sink1 |->sink2 ''' sink_proxies = OrderedDict() if drivers is not None: for driver_name in drivers._fields: driver = getattr(drivers, driver_name) driver_sink = getattr(driver, 'input') if driver_sink is not None: driver_sink_proxies = OrderedDict() for name in driver_sink._fields: driver_sink_proxies[name] = Subject() sink_proxies[driver_name] = driver.input(**driver_sink_proxies) return sink_proxies
python
def make_sink_proxies(drivers): ''' Build a list of sink proxies. sink proxies are a two-level ordered dictionary. The first level contains the lst of drivers, and the second level contains the list of sink proxies for each driver: drv1-->sink1 | |->sink2 | drv2-->sink1 |->sink2 ''' sink_proxies = OrderedDict() if drivers is not None: for driver_name in drivers._fields: driver = getattr(drivers, driver_name) driver_sink = getattr(driver, 'input') if driver_sink is not None: driver_sink_proxies = OrderedDict() for name in driver_sink._fields: driver_sink_proxies[name] = Subject() sink_proxies[driver_name] = driver.input(**driver_sink_proxies) return sink_proxies
[ "def", "make_sink_proxies", "(", "drivers", ")", ":", "sink_proxies", "=", "OrderedDict", "(", ")", "if", "drivers", "is", "not", "None", ":", "for", "driver_name", "in", "drivers", ".", "_fields", ":", "driver", "=", "getattr", "(", "drivers", ",", "drive...
Build a list of sink proxies. sink proxies are a two-level ordered dictionary. The first level contains the lst of drivers, and the second level contains the list of sink proxies for each driver: drv1-->sink1 | |->sink2 | drv2-->sink1 |->sink2
[ "Build", "a", "list", "of", "sink", "proxies", ".", "sink", "proxies", "are", "a", "two", "-", "level", "ordered", "dictionary", ".", "The", "first", "level", "contains", "the", "lst", "of", "drivers", "and", "the", "second", "level", "contains", "the", ...
train
https://github.com/MainRo/cyclotron-py/blob/4530f65173aa4b9e27c3d4a2f5d33900fc19f754/cyclotron/rx.py#L8-L30
moonso/loqusdb
loqusdb/build_models/profile_variant.py
build_profile_variant
def build_profile_variant(variant): """Returns a ProfileVariant object Args: variant (cyvcf2.Variant) Returns: variant (models.ProfileVariant) """ chrom = variant.CHROM if chrom.startswith(('chr', 'CHR', 'Chr')): chrom = chrom[3:] pos = int(variant.POS) variant_id = get_variant_id(variant) ref = variant.REF alt = variant.ALT[0] maf = get_maf(variant) profile_variant = ProfileVariant( variant_id=variant_id, chrom=chrom, pos=pos, ref=ref, alt=alt, maf=maf, id_column = variant.ID ) return profile_variant
python
def build_profile_variant(variant): """Returns a ProfileVariant object Args: variant (cyvcf2.Variant) Returns: variant (models.ProfileVariant) """ chrom = variant.CHROM if chrom.startswith(('chr', 'CHR', 'Chr')): chrom = chrom[3:] pos = int(variant.POS) variant_id = get_variant_id(variant) ref = variant.REF alt = variant.ALT[0] maf = get_maf(variant) profile_variant = ProfileVariant( variant_id=variant_id, chrom=chrom, pos=pos, ref=ref, alt=alt, maf=maf, id_column = variant.ID ) return profile_variant
[ "def", "build_profile_variant", "(", "variant", ")", ":", "chrom", "=", "variant", ".", "CHROM", "if", "chrom", ".", "startswith", "(", "(", "'chr'", ",", "'CHR'", ",", "'Chr'", ")", ")", ":", "chrom", "=", "chrom", "[", "3", ":", "]", "pos", "=", ...
Returns a ProfileVariant object Args: variant (cyvcf2.Variant) Returns: variant (models.ProfileVariant)
[ "Returns", "a", "ProfileVariant", "object" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/profile_variant.py#L24-L57
moonso/loqusdb
loqusdb/utils/vcf.py
add_headers
def add_headers(vcf_obj, nr_cases=None, sv=False): """Add loqus specific information to a VCF header Args: vcf_obj(cyvcf2.VCF) """ vcf_obj.add_info_to_header( { 'ID':"Obs", 'Number': '1', 'Type': 'Integer', 'Description': "The number of observations for the variant"} ) if not sv: vcf_obj.add_info_to_header( { 'ID':"Hom", 'Number': '1', 'Type': 'Integer', 'Description': "The number of observed homozygotes"} ) vcf_obj.add_info_to_header( { 'ID':"Hem", 'Number': '1', 'Type': 'Integer', 'Description': "The number of observed hemizygotes"} ) if nr_cases: case_header = "##NrCases={}".format(nr_cases) vcf_obj.add_to_header(case_header) # head.add_version_tracking("loqusdb", version, datetime.now().strftime("%Y-%m-%d %H:%M")) return
python
def add_headers(vcf_obj, nr_cases=None, sv=False): """Add loqus specific information to a VCF header Args: vcf_obj(cyvcf2.VCF) """ vcf_obj.add_info_to_header( { 'ID':"Obs", 'Number': '1', 'Type': 'Integer', 'Description': "The number of observations for the variant"} ) if not sv: vcf_obj.add_info_to_header( { 'ID':"Hom", 'Number': '1', 'Type': 'Integer', 'Description': "The number of observed homozygotes"} ) vcf_obj.add_info_to_header( { 'ID':"Hem", 'Number': '1', 'Type': 'Integer', 'Description': "The number of observed hemizygotes"} ) if nr_cases: case_header = "##NrCases={}".format(nr_cases) vcf_obj.add_to_header(case_header) # head.add_version_tracking("loqusdb", version, datetime.now().strftime("%Y-%m-%d %H:%M")) return
[ "def", "add_headers", "(", "vcf_obj", ",", "nr_cases", "=", "None", ",", "sv", "=", "False", ")", ":", "vcf_obj", ".", "add_info_to_header", "(", "{", "'ID'", ":", "\"Obs\"", ",", "'Number'", ":", "'1'", ",", "'Type'", ":", "'Integer'", ",", "'Descriptio...
Add loqus specific information to a VCF header Args: vcf_obj(cyvcf2.VCF)
[ "Add", "loqus", "specific", "information", "to", "a", "VCF", "header" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/vcf.py#L12-L45
moonso/loqusdb
loqusdb/utils/vcf.py
get_file_handle
def get_file_handle(file_path): """Return cyvcf2 VCF object Args: file_path(str) Returns: vcf_obj(cyvcf2.VCF) """ LOG.debug("Check if file end is correct") if not os.path.exists(file_path): raise IOError("No such file:{0}".format(file_path)) if not os.path.splitext(file_path)[-1] in VALID_ENDINGS: raise IOError("Not a valid vcf file name: {}".format(file_path)) vcf_obj = VCF(file_path) return vcf_obj
python
def get_file_handle(file_path): """Return cyvcf2 VCF object Args: file_path(str) Returns: vcf_obj(cyvcf2.VCF) """ LOG.debug("Check if file end is correct") if not os.path.exists(file_path): raise IOError("No such file:{0}".format(file_path)) if not os.path.splitext(file_path)[-1] in VALID_ENDINGS: raise IOError("Not a valid vcf file name: {}".format(file_path)) vcf_obj = VCF(file_path) return vcf_obj
[ "def", "get_file_handle", "(", "file_path", ")", ":", "LOG", ".", "debug", "(", "\"Check if file end is correct\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "raise", "IOError", "(", "\"No such file:{0}\"", ".", "format", ...
Return cyvcf2 VCF object Args: file_path(str) Returns: vcf_obj(cyvcf2.VCF)
[ "Return", "cyvcf2", "VCF", "object" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/vcf.py#L49-L68
moonso/loqusdb
loqusdb/utils/vcf.py
check_vcf
def check_vcf(vcf_path, expected_type='snv'): """Check if there are any problems with the vcf file Args: vcf_path(str) expected_type(str): 'sv' or 'snv' Returns: vcf_info(dict): dict like { 'nr_variants':<INT>, 'variant_type': <STR> in ['snv', 'sv'], 'individuals': <LIST> individual positions in file } """ LOG.info("Check if vcf is on correct format...") vcf = VCF(vcf_path) individuals = vcf.samples variant_type = None previous_pos = None previous_chrom = None posititon_variants = set() nr_variants = 0 for nr_variants,variant in enumerate(vcf,1): # Check the type of variant current_type = 'sv' if variant.var_type == 'sv' else 'snv' if not variant_type: variant_type = current_type # Vcf can not include both snvs and svs if variant_type != current_type: raise VcfError("Vcf includes a mix of snvs and svs") current_chrom = variant.CHROM current_pos = variant.POS # We start with a simple id that can be used by SV:s variant_id = "{0}_{1}".format(current_chrom, current_pos) # For SNVs we can create a proper variant id with chrom_pos_ref_alt if variant_type == 'snv': variant_id = get_variant_id(variant) # Initiate variables if not previous_chrom: previous_chrom = current_chrom previous_pos = current_pos posititon_variants = set([variant_id]) continue # Update variables if new chromosome if current_chrom != previous_chrom: previous_chrom = current_chrom previous_pos = current_pos posititon_variants = set([variant_id]) continue if variant_type == 'snv': # Check if variant is unique if current_pos == previous_pos: if variant_id in posititon_variants: raise VcfError("Variant {0} occurs several times"\ " in vcf".format(variant_id)) else: posititon_variants.add(variant_id) # Check if vcf is sorted else: if not current_pos >= previous_pos: raise VcfError("Vcf if not sorted in a correct way") previous_pos = current_pos # Reset posititon_variants since we are on a new position posititon_variants = set([variant_id]) if variant_type != expected_type: raise VcfError("VCF file does not only include {0}s, please check vcf {1}".format( expected_type.upper(), vcf_path)) LOG.info("Vcf file %s looks fine", vcf_path) LOG.info("Nr of variants in vcf: {0}".format(nr_variants)) LOG.info("Type of variants in vcf: {0}".format(variant_type)) vcf_info = { 'nr_variants': nr_variants, 'variant_type': variant_type, 'individuals': individuals, } return vcf_info
python
def check_vcf(vcf_path, expected_type='snv'): """Check if there are any problems with the vcf file Args: vcf_path(str) expected_type(str): 'sv' or 'snv' Returns: vcf_info(dict): dict like { 'nr_variants':<INT>, 'variant_type': <STR> in ['snv', 'sv'], 'individuals': <LIST> individual positions in file } """ LOG.info("Check if vcf is on correct format...") vcf = VCF(vcf_path) individuals = vcf.samples variant_type = None previous_pos = None previous_chrom = None posititon_variants = set() nr_variants = 0 for nr_variants,variant in enumerate(vcf,1): # Check the type of variant current_type = 'sv' if variant.var_type == 'sv' else 'snv' if not variant_type: variant_type = current_type # Vcf can not include both snvs and svs if variant_type != current_type: raise VcfError("Vcf includes a mix of snvs and svs") current_chrom = variant.CHROM current_pos = variant.POS # We start with a simple id that can be used by SV:s variant_id = "{0}_{1}".format(current_chrom, current_pos) # For SNVs we can create a proper variant id with chrom_pos_ref_alt if variant_type == 'snv': variant_id = get_variant_id(variant) # Initiate variables if not previous_chrom: previous_chrom = current_chrom previous_pos = current_pos posititon_variants = set([variant_id]) continue # Update variables if new chromosome if current_chrom != previous_chrom: previous_chrom = current_chrom previous_pos = current_pos posititon_variants = set([variant_id]) continue if variant_type == 'snv': # Check if variant is unique if current_pos == previous_pos: if variant_id in posititon_variants: raise VcfError("Variant {0} occurs several times"\ " in vcf".format(variant_id)) else: posititon_variants.add(variant_id) # Check if vcf is sorted else: if not current_pos >= previous_pos: raise VcfError("Vcf if not sorted in a correct way") previous_pos = current_pos # Reset posititon_variants since we are on a new position posititon_variants = set([variant_id]) if variant_type != expected_type: raise VcfError("VCF file does not only include {0}s, please check vcf {1}".format( expected_type.upper(), vcf_path)) LOG.info("Vcf file %s looks fine", vcf_path) LOG.info("Nr of variants in vcf: {0}".format(nr_variants)) LOG.info("Type of variants in vcf: {0}".format(variant_type)) vcf_info = { 'nr_variants': nr_variants, 'variant_type': variant_type, 'individuals': individuals, } return vcf_info
[ "def", "check_vcf", "(", "vcf_path", ",", "expected_type", "=", "'snv'", ")", ":", "LOG", ".", "info", "(", "\"Check if vcf is on correct format...\"", ")", "vcf", "=", "VCF", "(", "vcf_path", ")", "individuals", "=", "vcf", ".", "samples", "variant_type", "="...
Check if there are any problems with the vcf file Args: vcf_path(str) expected_type(str): 'sv' or 'snv' Returns: vcf_info(dict): dict like { 'nr_variants':<INT>, 'variant_type': <STR> in ['snv', 'sv'], 'individuals': <LIST> individual positions in file }
[ "Check", "if", "there", "are", "any", "problems", "with", "the", "vcf", "file" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/vcf.py#L89-L180
xi/ldif3
ldif3.py
is_dn
def is_dn(s): """Return True if s is a LDAP DN.""" if s == '': return True rm = DN_REGEX.match(s) return rm is not None and rm.group(0) == s
python
def is_dn(s): """Return True if s is a LDAP DN.""" if s == '': return True rm = DN_REGEX.match(s) return rm is not None and rm.group(0) == s
[ "def", "is_dn", "(", "s", ")", ":", "if", "s", "==", "''", ":", "return", "True", "rm", "=", "DN_REGEX", ".", "match", "(", "s", ")", "return", "rm", "is", "not", "None", "and", "rm", ".", "group", "(", "0", ")", "==", "s" ]
Return True if s is a LDAP DN.
[ "Return", "True", "if", "s", "is", "a", "LDAP", "DN", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L43-L48
xi/ldif3
ldif3.py
LDIFWriter._fold_line
def _fold_line(self, line): """Write string line as one or more folded lines.""" if len(line) <= self._cols: self._output_file.write(line) self._output_file.write(self._line_sep) else: pos = self._cols self._output_file.write(line[0:self._cols]) self._output_file.write(self._line_sep) while pos < len(line): self._output_file.write(b' ') end = min(len(line), pos + self._cols - 1) self._output_file.write(line[pos:end]) self._output_file.write(self._line_sep) pos = end
python
def _fold_line(self, line): """Write string line as one or more folded lines.""" if len(line) <= self._cols: self._output_file.write(line) self._output_file.write(self._line_sep) else: pos = self._cols self._output_file.write(line[0:self._cols]) self._output_file.write(self._line_sep) while pos < len(line): self._output_file.write(b' ') end = min(len(line), pos + self._cols - 1) self._output_file.write(line[pos:end]) self._output_file.write(self._line_sep) pos = end
[ "def", "_fold_line", "(", "self", ",", "line", ")", ":", "if", "len", "(", "line", ")", "<=", "self", ".", "_cols", ":", "self", ".", "_output_file", ".", "write", "(", "line", ")", "self", ".", "_output_file", ".", "write", "(", "self", ".", "_lin...
Write string line as one or more folded lines.
[ "Write", "string", "line", "as", "one", "or", "more", "folded", "lines", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L100-L114
xi/ldif3
ldif3.py
LDIFWriter._needs_base64_encoding
def _needs_base64_encoding(self, attr_type, attr_value): """Return True if attr_value has to be base-64 encoded. This is the case because of special chars or because attr_type is in self._base64_attrs """ return attr_type.lower() in self._base64_attrs or \ isinstance(attr_value, bytes) or \ UNSAFE_STRING_RE.search(attr_value) is not None
python
def _needs_base64_encoding(self, attr_type, attr_value): """Return True if attr_value has to be base-64 encoded. This is the case because of special chars or because attr_type is in self._base64_attrs """ return attr_type.lower() in self._base64_attrs or \ isinstance(attr_value, bytes) or \ UNSAFE_STRING_RE.search(attr_value) is not None
[ "def", "_needs_base64_encoding", "(", "self", ",", "attr_type", ",", "attr_value", ")", ":", "return", "attr_type", ".", "lower", "(", ")", "in", "self", ".", "_base64_attrs", "or", "isinstance", "(", "attr_value", ",", "bytes", ")", "or", "UNSAFE_STRING_RE", ...
Return True if attr_value has to be base-64 encoded. This is the case because of special chars or because attr_type is in self._base64_attrs
[ "Return", "True", "if", "attr_value", "has", "to", "be", "base", "-", "64", "encoded", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L116-L124
xi/ldif3
ldif3.py
LDIFWriter._unparse_attr
def _unparse_attr(self, attr_type, attr_value): """Write a single attribute type/value pair.""" if self._needs_base64_encoding(attr_type, attr_value): if not isinstance(attr_value, bytes): attr_value = attr_value.encode(self._encoding) encoded = base64.encodestring(attr_value)\ .replace(b'\n', b'')\ .decode('ascii') line = ':: '.join([attr_type, encoded]) else: line = ': '.join([attr_type, attr_value]) self._fold_line(line.encode('ascii'))
python
def _unparse_attr(self, attr_type, attr_value): """Write a single attribute type/value pair.""" if self._needs_base64_encoding(attr_type, attr_value): if not isinstance(attr_value, bytes): attr_value = attr_value.encode(self._encoding) encoded = base64.encodestring(attr_value)\ .replace(b'\n', b'')\ .decode('ascii') line = ':: '.join([attr_type, encoded]) else: line = ': '.join([attr_type, attr_value]) self._fold_line(line.encode('ascii'))
[ "def", "_unparse_attr", "(", "self", ",", "attr_type", ",", "attr_value", ")", ":", "if", "self", ".", "_needs_base64_encoding", "(", "attr_type", ",", "attr_value", ")", ":", "if", "not", "isinstance", "(", "attr_value", ",", "bytes", ")", ":", "attr_value"...
Write a single attribute type/value pair.
[ "Write", "a", "single", "attribute", "type", "/", "value", "pair", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L126-L137
xi/ldif3
ldif3.py
LDIFWriter._unparse_entry_record
def _unparse_entry_record(self, entry): """ :type entry: Dict[string, List[string]] :param entry: Dictionary holding an entry """ for attr_type in sorted(entry.keys()): for attr_value in entry[attr_type]: self._unparse_attr(attr_type, attr_value)
python
def _unparse_entry_record(self, entry): """ :type entry: Dict[string, List[string]] :param entry: Dictionary holding an entry """ for attr_type in sorted(entry.keys()): for attr_value in entry[attr_type]: self._unparse_attr(attr_type, attr_value)
[ "def", "_unparse_entry_record", "(", "self", ",", "entry", ")", ":", "for", "attr_type", "in", "sorted", "(", "entry", ".", "keys", "(", ")", ")", ":", "for", "attr_value", "in", "entry", "[", "attr_type", "]", ":", "self", ".", "_unparse_attr", "(", "...
:type entry: Dict[string, List[string]] :param entry: Dictionary holding an entry
[ ":", "type", "entry", ":", "Dict", "[", "string", "List", "[", "string", "]]", ":", "param", "entry", ":", "Dictionary", "holding", "an", "entry" ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L139-L146
xi/ldif3
ldif3.py
LDIFWriter._unparse_changetype
def _unparse_changetype(self, mod_len): """Detect and write the changetype.""" if mod_len == 2: changetype = 'add' elif mod_len == 3: changetype = 'modify' else: raise ValueError("modlist item of wrong length") self._unparse_attr('changetype', changetype)
python
def _unparse_changetype(self, mod_len): """Detect and write the changetype.""" if mod_len == 2: changetype = 'add' elif mod_len == 3: changetype = 'modify' else: raise ValueError("modlist item of wrong length") self._unparse_attr('changetype', changetype)
[ "def", "_unparse_changetype", "(", "self", ",", "mod_len", ")", ":", "if", "mod_len", "==", "2", ":", "changetype", "=", "'add'", "elif", "mod_len", "==", "3", ":", "changetype", "=", "'modify'", "else", ":", "raise", "ValueError", "(", "\"modlist item of wr...
Detect and write the changetype.
[ "Detect", "and", "write", "the", "changetype", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L148-L157
xi/ldif3
ldif3.py
LDIFWriter._unparse_change_record
def _unparse_change_record(self, modlist): """ :type modlist: List[Tuple] :param modlist: List of additions (2-tuple) or modifications (3-tuple) """ mod_len = len(modlist[0]) self._unparse_changetype(mod_len) for mod in modlist: if len(mod) != mod_len: raise ValueError("Subsequent modlist item of wrong length") if mod_len == 2: mod_type, mod_vals = mod elif mod_len == 3: mod_op, mod_type, mod_vals = mod self._unparse_attr(MOD_OPS[mod_op], mod_type) for mod_val in mod_vals: self._unparse_attr(mod_type, mod_val) if mod_len == 3: self._output_file.write(b'-' + self._line_sep)
python
def _unparse_change_record(self, modlist): """ :type modlist: List[Tuple] :param modlist: List of additions (2-tuple) or modifications (3-tuple) """ mod_len = len(modlist[0]) self._unparse_changetype(mod_len) for mod in modlist: if len(mod) != mod_len: raise ValueError("Subsequent modlist item of wrong length") if mod_len == 2: mod_type, mod_vals = mod elif mod_len == 3: mod_op, mod_type, mod_vals = mod self._unparse_attr(MOD_OPS[mod_op], mod_type) for mod_val in mod_vals: self._unparse_attr(mod_type, mod_val) if mod_len == 3: self._output_file.write(b'-' + self._line_sep)
[ "def", "_unparse_change_record", "(", "self", ",", "modlist", ")", ":", "mod_len", "=", "len", "(", "modlist", "[", "0", "]", ")", "self", ".", "_unparse_changetype", "(", "mod_len", ")", "for", "mod", "in", "modlist", ":", "if", "len", "(", "mod", ")"...
:type modlist: List[Tuple] :param modlist: List of additions (2-tuple) or modifications (3-tuple)
[ ":", "type", "modlist", ":", "List", "[", "Tuple", "]", ":", "param", "modlist", ":", "List", "of", "additions", "(", "2", "-", "tuple", ")", "or", "modifications", "(", "3", "-", "tuple", ")" ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L159-L181
xi/ldif3
ldif3.py
LDIFWriter.unparse
def unparse(self, dn, record): """Write an entry or change record to the output file. :type dn: string :param dn: distinguished name :type record: Union[Dict[string, List[string]], List[Tuple]] :param record: Either a dictionary holding an entry or a list of additions (2-tuple) or modifications (3-tuple). """ self._unparse_attr('dn', dn) if isinstance(record, dict): self._unparse_entry_record(record) elif isinstance(record, list): self._unparse_change_record(record) else: raise ValueError("Argument record must be dictionary or list") self._output_file.write(self._line_sep) self.records_written += 1
python
def unparse(self, dn, record): """Write an entry or change record to the output file. :type dn: string :param dn: distinguished name :type record: Union[Dict[string, List[string]], List[Tuple]] :param record: Either a dictionary holding an entry or a list of additions (2-tuple) or modifications (3-tuple). """ self._unparse_attr('dn', dn) if isinstance(record, dict): self._unparse_entry_record(record) elif isinstance(record, list): self._unparse_change_record(record) else: raise ValueError("Argument record must be dictionary or list") self._output_file.write(self._line_sep) self.records_written += 1
[ "def", "unparse", "(", "self", ",", "dn", ",", "record", ")", ":", "self", ".", "_unparse_attr", "(", "'dn'", ",", "dn", ")", "if", "isinstance", "(", "record", ",", "dict", ")", ":", "self", ".", "_unparse_entry_record", "(", "record", ")", "elif", ...
Write an entry or change record to the output file. :type dn: string :param dn: distinguished name :type record: Union[Dict[string, List[string]], List[Tuple]] :param record: Either a dictionary holding an entry or a list of additions (2-tuple) or modifications (3-tuple).
[ "Write", "an", "entry", "or", "change", "record", "to", "the", "output", "file", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L183-L201
xi/ldif3
ldif3.py
LDIFParser._strip_line_sep
def _strip_line_sep(self, s): """Strip trailing line separators from s, but no other whitespaces.""" if s[-2:] == b'\r\n': return s[:-2] elif s[-1:] == b'\n': return s[:-1] else: return s
python
def _strip_line_sep(self, s): """Strip trailing line separators from s, but no other whitespaces.""" if s[-2:] == b'\r\n': return s[:-2] elif s[-1:] == b'\n': return s[:-1] else: return s
[ "def", "_strip_line_sep", "(", "self", ",", "s", ")", ":", "if", "s", "[", "-", "2", ":", "]", "==", "b'\\r\\n'", ":", "return", "s", "[", ":", "-", "2", "]", "elif", "s", "[", "-", "1", ":", "]", "==", "b'\\n'", ":", "return", "s", "[", ":...
Strip trailing line separators from s, but no other whitespaces.
[ "Strip", "trailing", "line", "separators", "from", "s", "but", "no", "other", "whitespaces", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L233-L240
xi/ldif3
ldif3.py
LDIFParser._iter_unfolded_lines
def _iter_unfolded_lines(self): """Iter input unfoled lines. Skip comments.""" line = self._input_file.readline() while line: self.line_counter += 1 self.byte_counter += len(line) line = self._strip_line_sep(line) nextline = self._input_file.readline() while nextline and nextline[:1] == b' ': line += self._strip_line_sep(nextline)[1:] nextline = self._input_file.readline() if not line.startswith(b'#'): yield line line = nextline
python
def _iter_unfolded_lines(self): """Iter input unfoled lines. Skip comments.""" line = self._input_file.readline() while line: self.line_counter += 1 self.byte_counter += len(line) line = self._strip_line_sep(line) nextline = self._input_file.readline() while nextline and nextline[:1] == b' ': line += self._strip_line_sep(nextline)[1:] nextline = self._input_file.readline() if not line.startswith(b'#'): yield line line = nextline
[ "def", "_iter_unfolded_lines", "(", "self", ")", ":", "line", "=", "self", ".", "_input_file", ".", "readline", "(", ")", "while", "line", ":", "self", ".", "line_counter", "+=", "1", "self", ".", "byte_counter", "+=", "len", "(", "line", ")", "line", ...
Iter input unfoled lines. Skip comments.
[ "Iter", "input", "unfoled", "lines", ".", "Skip", "comments", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L261-L277
xi/ldif3
ldif3.py
LDIFParser._iter_blocks
def _iter_blocks(self): """Iter input lines in blocks separated by blank lines.""" lines = [] for line in self._iter_unfolded_lines(): if line: lines.append(line) elif lines: self.records_read += 1 yield lines lines = [] if lines: self.records_read += 1 yield lines
python
def _iter_blocks(self): """Iter input lines in blocks separated by blank lines.""" lines = [] for line in self._iter_unfolded_lines(): if line: lines.append(line) elif lines: self.records_read += 1 yield lines lines = [] if lines: self.records_read += 1 yield lines
[ "def", "_iter_blocks", "(", "self", ")", ":", "lines", "=", "[", "]", "for", "line", "in", "self", ".", "_iter_unfolded_lines", "(", ")", ":", "if", "line", ":", "lines", ".", "append", "(", "line", ")", "elif", "lines", ":", "self", ".", "records_re...
Iter input lines in blocks separated by blank lines.
[ "Iter", "input", "lines", "in", "blocks", "separated", "by", "blank", "lines", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L279-L291
xi/ldif3
ldif3.py
LDIFParser._parse_attr
def _parse_attr(self, line): """Parse a single attribute type/value pair.""" colon_pos = line.index(b':') attr_type = line[0:colon_pos].decode('ascii') if line[colon_pos:].startswith(b'::'): attr_value = base64.decodestring(line[colon_pos + 2:]) elif line[colon_pos:].startswith(b':<'): url = line[colon_pos + 2:].strip() attr_value = b'' if self._process_url_schemes: u = urlparse(url) if u[0] in self._process_url_schemes: attr_value = urlopen(url.decode('ascii')).read() else: attr_value = line[colon_pos + 1:].strip() return self._decode_value(attr_type, attr_value)
python
def _parse_attr(self, line): """Parse a single attribute type/value pair.""" colon_pos = line.index(b':') attr_type = line[0:colon_pos].decode('ascii') if line[colon_pos:].startswith(b'::'): attr_value = base64.decodestring(line[colon_pos + 2:]) elif line[colon_pos:].startswith(b':<'): url = line[colon_pos + 2:].strip() attr_value = b'' if self._process_url_schemes: u = urlparse(url) if u[0] in self._process_url_schemes: attr_value = urlopen(url.decode('ascii')).read() else: attr_value = line[colon_pos + 1:].strip() return self._decode_value(attr_type, attr_value)
[ "def", "_parse_attr", "(", "self", ",", "line", ")", ":", "colon_pos", "=", "line", ".", "index", "(", "b':'", ")", "attr_type", "=", "line", "[", "0", ":", "colon_pos", "]", ".", "decode", "(", "'ascii'", ")", "if", "line", "[", "colon_pos", ":", ...
Parse a single attribute type/value pair.
[ "Parse", "a", "single", "attribute", "type", "/", "value", "pair", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L309-L326
xi/ldif3
ldif3.py
LDIFParser._check_dn
def _check_dn(self, dn, attr_value): """Check dn attribute for issues.""" if dn is not None: self._error('Two lines starting with dn: in one record.') if not is_dn(attr_value): self._error('No valid string-representation of ' 'distinguished name %s.' % attr_value)
python
def _check_dn(self, dn, attr_value): """Check dn attribute for issues.""" if dn is not None: self._error('Two lines starting with dn: in one record.') if not is_dn(attr_value): self._error('No valid string-representation of ' 'distinguished name %s.' % attr_value)
[ "def", "_check_dn", "(", "self", ",", "dn", ",", "attr_value", ")", ":", "if", "dn", "is", "not", "None", ":", "self", ".", "_error", "(", "'Two lines starting with dn: in one record.'", ")", "if", "not", "is_dn", "(", "attr_value", ")", ":", "self", ".", ...
Check dn attribute for issues.
[ "Check", "dn", "attribute", "for", "issues", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L334-L340
xi/ldif3
ldif3.py
LDIFParser._check_changetype
def _check_changetype(self, dn, changetype, attr_value): """Check changetype attribute for issues.""" if dn is None: self._error('Read changetype: before getting valid dn: line.') if changetype is not None: self._error('Two lines starting with changetype: in one record.') if attr_value not in CHANGE_TYPES: self._error('changetype value %s is invalid.' % attr_value)
python
def _check_changetype(self, dn, changetype, attr_value): """Check changetype attribute for issues.""" if dn is None: self._error('Read changetype: before getting valid dn: line.') if changetype is not None: self._error('Two lines starting with changetype: in one record.') if attr_value not in CHANGE_TYPES: self._error('changetype value %s is invalid.' % attr_value)
[ "def", "_check_changetype", "(", "self", ",", "dn", ",", "changetype", ",", "attr_value", ")", ":", "if", "dn", "is", "None", ":", "self", ".", "_error", "(", "'Read changetype: before getting valid dn: line.'", ")", "if", "changetype", "is", "not", "None", ":...
Check changetype attribute for issues.
[ "Check", "changetype", "attribute", "for", "issues", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L342-L349
xi/ldif3
ldif3.py
LDIFParser._parse_entry_record
def _parse_entry_record(self, lines): """Parse a single entry record from a list of lines.""" dn = None entry = OrderedDict() for line in lines: attr_type, attr_value = self._parse_attr(line) if attr_type == 'dn': self._check_dn(dn, attr_value) dn = attr_value elif attr_type == 'version' and dn is None: pass # version = 1 else: if dn is None: self._error('First line of record does not start ' 'with "dn:": %s' % attr_type) if attr_value is not None and \ attr_type.lower() not in self._ignored_attr_types: if attr_type in entry: entry[attr_type].append(attr_value) else: entry[attr_type] = [attr_value] return dn, entry
python
def _parse_entry_record(self, lines): """Parse a single entry record from a list of lines.""" dn = None entry = OrderedDict() for line in lines: attr_type, attr_value = self._parse_attr(line) if attr_type == 'dn': self._check_dn(dn, attr_value) dn = attr_value elif attr_type == 'version' and dn is None: pass # version = 1 else: if dn is None: self._error('First line of record does not start ' 'with "dn:": %s' % attr_type) if attr_value is not None and \ attr_type.lower() not in self._ignored_attr_types: if attr_type in entry: entry[attr_type].append(attr_value) else: entry[attr_type] = [attr_value] return dn, entry
[ "def", "_parse_entry_record", "(", "self", ",", "lines", ")", ":", "dn", "=", "None", "entry", "=", "OrderedDict", "(", ")", "for", "line", "in", "lines", ":", "attr_type", ",", "attr_value", "=", "self", ".", "_parse_attr", "(", "line", ")", "if", "at...
Parse a single entry record from a list of lines.
[ "Parse", "a", "single", "entry", "record", "from", "a", "list", "of", "lines", "." ]
train
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L351-L375
yjzhang/uncurl_python
uncurl/zip_state_estimation.py
_create_w_objective
def _create_w_objective(m, X, Z=None): """ Creates an objective function and its derivative for W, given M and X (data) Args: m (array): genes x clusters X (array): genes x cells Z (array): zero-inflation parameters - genes x 1 """ genes, clusters = m.shape cells = X.shape[1] nonzeros = (X!=0) def objective(w): # convert w into a matrix first... because it's a vector for # optimization purposes w = w.reshape((m.shape[1], X.shape[1])) d = m.dot(w)+eps # derivative of objective wrt all elements of w # for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus # x_ij temp = X/d m_sum = m.T.dot(nonzeros) m2 = m.T.dot(temp) deriv = m_sum - m2 return np.sum(nonzeros*(d - X*np.log(d)))/genes, deriv.flatten()/genes return objective
python
def _create_w_objective(m, X, Z=None): """ Creates an objective function and its derivative for W, given M and X (data) Args: m (array): genes x clusters X (array): genes x cells Z (array): zero-inflation parameters - genes x 1 """ genes, clusters = m.shape cells = X.shape[1] nonzeros = (X!=0) def objective(w): # convert w into a matrix first... because it's a vector for # optimization purposes w = w.reshape((m.shape[1], X.shape[1])) d = m.dot(w)+eps # derivative of objective wrt all elements of w # for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus # x_ij temp = X/d m_sum = m.T.dot(nonzeros) m2 = m.T.dot(temp) deriv = m_sum - m2 return np.sum(nonzeros*(d - X*np.log(d)))/genes, deriv.flatten()/genes return objective
[ "def", "_create_w_objective", "(", "m", ",", "X", ",", "Z", "=", "None", ")", ":", "genes", ",", "clusters", "=", "m", ".", "shape", "cells", "=", "X", ".", "shape", "[", "1", "]", "nonzeros", "=", "(", "X", "!=", "0", ")", "def", "objective", ...
Creates an objective function and its derivative for W, given M and X (data) Args: m (array): genes x clusters X (array): genes x cells Z (array): zero-inflation parameters - genes x 1
[ "Creates", "an", "objective", "function", "and", "its", "derivative", "for", "W", "given", "M", "and", "X", "(", "data", ")" ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_state_estimation.py#L13-L38
yjzhang/uncurl_python
uncurl/zip_state_estimation.py
zip_estimate_state
def zip_estimate_state(data, clusters, init_means=None, init_weights=None, max_iters=10, tol=1e-4, disp=True, inner_max_iters=400, normalize=True): """ Uses a Zero-inflated Poisson Mixture model to estimate cell states and cell state mixing weights. Args: data (array): genes x cells clusters (int): number of mixture components init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1) max_iters (int, optional): maximum number of iterations. Default: 10 tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4 disp (bool, optional): whether or not to display optimization parameters. Default: True inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400 normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True. Returns: M: genes x clusters - state centers W: clusters x cells - state mixing components for each cell ll: final log-likelihood """ genes, cells = data.shape # TODO: estimate ZIP parameter? if init_means is None: means, assignments = kmeans_pp(data, clusters) else: means = init_means.copy() clusters = means.shape[1] w_init = np.random.random(cells*clusters) if init_weights is not None: if len(init_weights.shape)==1: init_weights = initialize_from_assignments(init_weights, clusters) w_init = init_weights.reshape(cells*clusters) m_init = means.reshape(genes*clusters) # using zero-inflated parameters... L, Z = zip_fit_params_mle(data) # repeat steps 1 and 2 until convergence: ll = np.inf for i in range(max_iters): if disp: print('iter: {0}'.format(i)) w_bounds = [(0, 1.0) for x in w_init] m_bounds = [(0, None) for x in m_init] # step 1: given M, estimate W w_objective = _create_w_objective(means, data, Z) w_res = minimize(w_objective, w_init, method='L-BFGS-B', jac=True, bounds=w_bounds, options={'disp':disp, 'maxiter':inner_max_iters}) w_diff = np.sqrt(np.sum((w_res.x-w_init)**2))/w_init.size w_new = w_res.x.reshape((clusters, cells)) w_init = w_res.x # step 2: given W, update M m_objective = _create_m_objective(w_new, data, Z) # method could be 'L-BFGS-B' or 'SLSQP'... SLSQP gives a memory error... # or use TNC... m_res = minimize(m_objective, m_init, method='L-BFGS-B', jac=True, bounds=m_bounds, options={'disp':disp, 'maxiter':inner_max_iters}) ll = m_res.fun m_diff = np.sqrt(np.sum((m_res.x-m_init)**2))/m_init.size m_new = m_res.x.reshape((genes, clusters)) m_init = m_res.x means = m_new if w_diff < tol and m_diff < tol: break if normalize: w_new = w_new/w_new.sum(0) return m_new, w_new, ll
python
def zip_estimate_state(data, clusters, init_means=None, init_weights=None, max_iters=10, tol=1e-4, disp=True, inner_max_iters=400, normalize=True): """ Uses a Zero-inflated Poisson Mixture model to estimate cell states and cell state mixing weights. Args: data (array): genes x cells clusters (int): number of mixture components init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1) max_iters (int, optional): maximum number of iterations. Default: 10 tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4 disp (bool, optional): whether or not to display optimization parameters. Default: True inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400 normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True. Returns: M: genes x clusters - state centers W: clusters x cells - state mixing components for each cell ll: final log-likelihood """ genes, cells = data.shape # TODO: estimate ZIP parameter? if init_means is None: means, assignments = kmeans_pp(data, clusters) else: means = init_means.copy() clusters = means.shape[1] w_init = np.random.random(cells*clusters) if init_weights is not None: if len(init_weights.shape)==1: init_weights = initialize_from_assignments(init_weights, clusters) w_init = init_weights.reshape(cells*clusters) m_init = means.reshape(genes*clusters) # using zero-inflated parameters... L, Z = zip_fit_params_mle(data) # repeat steps 1 and 2 until convergence: ll = np.inf for i in range(max_iters): if disp: print('iter: {0}'.format(i)) w_bounds = [(0, 1.0) for x in w_init] m_bounds = [(0, None) for x in m_init] # step 1: given M, estimate W w_objective = _create_w_objective(means, data, Z) w_res = minimize(w_objective, w_init, method='L-BFGS-B', jac=True, bounds=w_bounds, options={'disp':disp, 'maxiter':inner_max_iters}) w_diff = np.sqrt(np.sum((w_res.x-w_init)**2))/w_init.size w_new = w_res.x.reshape((clusters, cells)) w_init = w_res.x # step 2: given W, update M m_objective = _create_m_objective(w_new, data, Z) # method could be 'L-BFGS-B' or 'SLSQP'... SLSQP gives a memory error... # or use TNC... m_res = minimize(m_objective, m_init, method='L-BFGS-B', jac=True, bounds=m_bounds, options={'disp':disp, 'maxiter':inner_max_iters}) ll = m_res.fun m_diff = np.sqrt(np.sum((m_res.x-m_init)**2))/m_init.size m_new = m_res.x.reshape((genes, clusters)) m_init = m_res.x means = m_new if w_diff < tol and m_diff < tol: break if normalize: w_new = w_new/w_new.sum(0) return m_new, w_new, ll
[ "def", "zip_estimate_state", "(", "data", ",", "clusters", ",", "init_means", "=", "None", ",", "init_weights", "=", "None", ",", "max_iters", "=", "10", ",", "tol", "=", "1e-4", ",", "disp", "=", "True", ",", "inner_max_iters", "=", "400", ",", "normali...
Uses a Zero-inflated Poisson Mixture model to estimate cell states and cell state mixing weights. Args: data (array): genes x cells clusters (int): number of mixture components init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1) max_iters (int, optional): maximum number of iterations. Default: 10 tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4 disp (bool, optional): whether or not to display optimization parameters. Default: True inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400 normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True. Returns: M: genes x clusters - state centers W: clusters x cells - state mixing components for each cell ll: final log-likelihood
[ "Uses", "a", "Zero", "-", "inflated", "Poisson", "Mixture", "model", "to", "estimate", "cell", "states", "and", "cell", "state", "mixing", "weights", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_state_estimation.py#L64-L127
yjzhang/uncurl_python
uncurl/clustering.py
kmeans_pp
def kmeans_pp(data, k, centers=None): """ Generates kmeans++ initial centers. Args: data (array): A 2d array- genes x cells k (int): Number of clusters centers (array, optional): if provided, these are one or more known cluster centers. 2d array of genes x number of centers (<=k). Returns: centers - a genes x k array of cluster means. assignments - a cells x 1 array of cluster assignments """ # TODO: what if there is missing data for a given gene? # missing data could be if all the entires are -1. genes, cells = data.shape if sparse.issparse(data) and not sparse.isspmatrix_csc(data): data = sparse.csc_matrix(data) num_known_centers = 0 if centers is None: centers = np.zeros((genes, k)) else: num_known_centers = centers.shape[1] centers = np.concatenate((centers, np.zeros((genes, k-num_known_centers))), 1) distances = np.zeros((cells, k)) distances[:] = np.inf if num_known_centers == 0: init = np.random.randint(0, cells) if sparse.issparse(data): centers[:,0] = data[:, init].toarray().flatten() else: centers[:,0] = data[:, init] num_known_centers+=1 available_cells = list(range(cells)) for c in range(num_known_centers, k): c2 = c-1 # use different formulation for distance... if sparse, use lls # if not sparse, use poisson_dist if sparse.issparse(data): lls = poisson_ll(data, centers[:,c2:c2+1]).flatten() distances[:,c2] = 1 + lls.max() - lls distances[:,c2] /= distances[:,c2].max() else: for cell in range(cells): distances[cell, c2] = poisson_dist(data[:,cell], centers[:,c2]) # choose a new data point as center... probability proportional # to distance^2 min_distances = np.min(distances, 1) min_distances = min_distances**2 min_distances = min_distances[available_cells] # should be sampling without replacement min_dist = np.random.choice(available_cells, p=min_distances/min_distances.sum()) available_cells.pop(available_cells.index(min_dist)) if sparse.issparse(data): centers[:,c] = data[:, min_dist].toarray().flatten() else: centers[:,c] = data[:, min_dist] lls = poisson_ll(data, centers) new_assignments = np.argmax(lls, 1) centers[centers==0.0] = eps return centers, new_assignments
python
def kmeans_pp(data, k, centers=None): """ Generates kmeans++ initial centers. Args: data (array): A 2d array- genes x cells k (int): Number of clusters centers (array, optional): if provided, these are one or more known cluster centers. 2d array of genes x number of centers (<=k). Returns: centers - a genes x k array of cluster means. assignments - a cells x 1 array of cluster assignments """ # TODO: what if there is missing data for a given gene? # missing data could be if all the entires are -1. genes, cells = data.shape if sparse.issparse(data) and not sparse.isspmatrix_csc(data): data = sparse.csc_matrix(data) num_known_centers = 0 if centers is None: centers = np.zeros((genes, k)) else: num_known_centers = centers.shape[1] centers = np.concatenate((centers, np.zeros((genes, k-num_known_centers))), 1) distances = np.zeros((cells, k)) distances[:] = np.inf if num_known_centers == 0: init = np.random.randint(0, cells) if sparse.issparse(data): centers[:,0] = data[:, init].toarray().flatten() else: centers[:,0] = data[:, init] num_known_centers+=1 available_cells = list(range(cells)) for c in range(num_known_centers, k): c2 = c-1 # use different formulation for distance... if sparse, use lls # if not sparse, use poisson_dist if sparse.issparse(data): lls = poisson_ll(data, centers[:,c2:c2+1]).flatten() distances[:,c2] = 1 + lls.max() - lls distances[:,c2] /= distances[:,c2].max() else: for cell in range(cells): distances[cell, c2] = poisson_dist(data[:,cell], centers[:,c2]) # choose a new data point as center... probability proportional # to distance^2 min_distances = np.min(distances, 1) min_distances = min_distances**2 min_distances = min_distances[available_cells] # should be sampling without replacement min_dist = np.random.choice(available_cells, p=min_distances/min_distances.sum()) available_cells.pop(available_cells.index(min_dist)) if sparse.issparse(data): centers[:,c] = data[:, min_dist].toarray().flatten() else: centers[:,c] = data[:, min_dist] lls = poisson_ll(data, centers) new_assignments = np.argmax(lls, 1) centers[centers==0.0] = eps return centers, new_assignments
[ "def", "kmeans_pp", "(", "data", ",", "k", ",", "centers", "=", "None", ")", ":", "# TODO: what if there is missing data for a given gene?", "# missing data could be if all the entires are -1.", "genes", ",", "cells", "=", "data", ".", "shape", "if", "sparse", ".", "i...
Generates kmeans++ initial centers. Args: data (array): A 2d array- genes x cells k (int): Number of clusters centers (array, optional): if provided, these are one or more known cluster centers. 2d array of genes x number of centers (<=k). Returns: centers - a genes x k array of cluster means. assignments - a cells x 1 array of cluster assignments
[ "Generates", "kmeans", "++", "initial", "centers", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/clustering.py#L10-L71
yjzhang/uncurl_python
uncurl/clustering.py
poisson_cluster
def poisson_cluster(data, k, init=None, max_iters=100): """ Performs Poisson hard EM on the given data. Args: data (array): A 2d array- genes x cells. Can be dense or sparse; for best performance, sparse matrices should be in CSC format. k (int): Number of clusters init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++ max_iters (int, optional): Maximum number of iterations. Default: 100 Returns: a tuple of two arrays: a cells x 1 vector of cluster assignments, and a genes x k array of cluster means. """ # TODO: be able to use a combination of fixed and unknown starting points # e.g., have init values only for certain genes, have a row of all # zeros indicating that kmeans++ should be used for that row. genes, cells = data.shape #print 'starting: ', centers if sparse.issparse(data) and not sparse.isspmatrix_csc(data): data = sparse.csc_matrix(data) init, assignments = kmeans_pp(data, k, centers=init) centers = np.copy(init) assignments = np.zeros(cells) for it in range(max_iters): lls = poisson_ll(data, centers) #cluster_dists = np.zeros((cells, k)) new_assignments = np.argmax(lls, 1) if np.equal(assignments, new_assignments).all(): #print 'ending: ', centers return new_assignments, centers for c in range(k): if sparse.issparse(data): if data[:,new_assignments==c].shape[0]==0: # re-initialize centers? new_c, _ = kmeans_pp(data, k, centers[:,:c]) centers[:,c] = new_c[:,c] else: centers[:,c] = np.asarray(data[:,new_assignments==c].mean(1)).flatten() else: if len(data[:,new_assignments==c])==0: new_c, _ = kmeans_pp(data, k, centers[:,:c]) centers[:,c] = new_c[:,c] else: centers[:,c] = np.mean(data[:,new_assignments==c], 1) assignments = new_assignments return assignments, centers
python
def poisson_cluster(data, k, init=None, max_iters=100): """ Performs Poisson hard EM on the given data. Args: data (array): A 2d array- genes x cells. Can be dense or sparse; for best performance, sparse matrices should be in CSC format. k (int): Number of clusters init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++ max_iters (int, optional): Maximum number of iterations. Default: 100 Returns: a tuple of two arrays: a cells x 1 vector of cluster assignments, and a genes x k array of cluster means. """ # TODO: be able to use a combination of fixed and unknown starting points # e.g., have init values only for certain genes, have a row of all # zeros indicating that kmeans++ should be used for that row. genes, cells = data.shape #print 'starting: ', centers if sparse.issparse(data) and not sparse.isspmatrix_csc(data): data = sparse.csc_matrix(data) init, assignments = kmeans_pp(data, k, centers=init) centers = np.copy(init) assignments = np.zeros(cells) for it in range(max_iters): lls = poisson_ll(data, centers) #cluster_dists = np.zeros((cells, k)) new_assignments = np.argmax(lls, 1) if np.equal(assignments, new_assignments).all(): #print 'ending: ', centers return new_assignments, centers for c in range(k): if sparse.issparse(data): if data[:,new_assignments==c].shape[0]==0: # re-initialize centers? new_c, _ = kmeans_pp(data, k, centers[:,:c]) centers[:,c] = new_c[:,c] else: centers[:,c] = np.asarray(data[:,new_assignments==c].mean(1)).flatten() else: if len(data[:,new_assignments==c])==0: new_c, _ = kmeans_pp(data, k, centers[:,:c]) centers[:,c] = new_c[:,c] else: centers[:,c] = np.mean(data[:,new_assignments==c], 1) assignments = new_assignments return assignments, centers
[ "def", "poisson_cluster", "(", "data", ",", "k", ",", "init", "=", "None", ",", "max_iters", "=", "100", ")", ":", "# TODO: be able to use a combination of fixed and unknown starting points", "# e.g., have init values only for certain genes, have a row of all", "# zeros indicatin...
Performs Poisson hard EM on the given data. Args: data (array): A 2d array- genes x cells. Can be dense or sparse; for best performance, sparse matrices should be in CSC format. k (int): Number of clusters init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++ max_iters (int, optional): Maximum number of iterations. Default: 100 Returns: a tuple of two arrays: a cells x 1 vector of cluster assignments, and a genes x k array of cluster means.
[ "Performs", "Poisson", "hard", "EM", "on", "the", "given", "data", "." ]
train
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/clustering.py#L73-L119
moonso/loqusdb
loqusdb/commands/view.py
cases
def cases(ctx, case_id, to_json): """Display cases in the database.""" adapter = ctx.obj['adapter'] cases = [] if case_id: case_obj = adapter.case({'case_id':case_id}) if not case_obj: LOG.info("Case {0} does not exist in database".format(case_id)) return case_obj['_id'] = str(case_obj['_id']) cases.append(case_obj) else: cases = adapter.cases() if cases.count() == 0: LOG.info("No cases found in database") ctx.abort() if to_json: click.echo(json.dumps(cases)) return click.echo("#case_id\tvcf_path") for case_obj in cases: click.echo("{0}\t{1}".format(case_obj.get('case_id'), case_obj.get('vcf_path')))
python
def cases(ctx, case_id, to_json): """Display cases in the database.""" adapter = ctx.obj['adapter'] cases = [] if case_id: case_obj = adapter.case({'case_id':case_id}) if not case_obj: LOG.info("Case {0} does not exist in database".format(case_id)) return case_obj['_id'] = str(case_obj['_id']) cases.append(case_obj) else: cases = adapter.cases() if cases.count() == 0: LOG.info("No cases found in database") ctx.abort() if to_json: click.echo(json.dumps(cases)) return click.echo("#case_id\tvcf_path") for case_obj in cases: click.echo("{0}\t{1}".format(case_obj.get('case_id'), case_obj.get('vcf_path')))
[ "def", "cases", "(", "ctx", ",", "case_id", ",", "to_json", ")", ":", "adapter", "=", "ctx", ".", "obj", "[", "'adapter'", "]", "cases", "=", "[", "]", "if", "case_id", ":", "case_obj", "=", "adapter", ".", "case", "(", "{", "'case_id'", ":", "case...
Display cases in the database.
[ "Display", "cases", "in", "the", "database", "." ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/view.py#L19-L45
moonso/loqusdb
loqusdb/commands/view.py
variants
def variants(ctx, variant_id, chromosome, end_chromosome, start, end, variant_type, sv_type): """Display variants in the database.""" if sv_type: variant_type = 'sv' adapter = ctx.obj['adapter'] if (start or end): if not (chromosome and start and end): LOG.warning("Regions must be specified with chromosome, start and end") return if variant_id: variant = adapter.get_variant({'_id':variant_id}) if variant: click.echo(variant) else: LOG.info("Variant {0} does not exist in database".format(variant_id)) return if variant_type == 'snv': result = adapter.get_variants( chromosome=chromosome, start=start, end=end ) else: LOG.info("Search for svs") result = adapter.get_sv_variants( chromosome=chromosome, end_chromosome=end_chromosome, sv_type=sv_type, pos=start, end=end ) i = 0 for variant in result: i += 1 pp(variant) LOG.info("Number of variants found in database: %s", i)
python
def variants(ctx, variant_id, chromosome, end_chromosome, start, end, variant_type, sv_type): """Display variants in the database.""" if sv_type: variant_type = 'sv' adapter = ctx.obj['adapter'] if (start or end): if not (chromosome and start and end): LOG.warning("Regions must be specified with chromosome, start and end") return if variant_id: variant = adapter.get_variant({'_id':variant_id}) if variant: click.echo(variant) else: LOG.info("Variant {0} does not exist in database".format(variant_id)) return if variant_type == 'snv': result = adapter.get_variants( chromosome=chromosome, start=start, end=end ) else: LOG.info("Search for svs") result = adapter.get_sv_variants( chromosome=chromosome, end_chromosome=end_chromosome, sv_type=sv_type, pos=start, end=end ) i = 0 for variant in result: i += 1 pp(variant) LOG.info("Number of variants found in database: %s", i)
[ "def", "variants", "(", "ctx", ",", "variant_id", ",", "chromosome", ",", "end_chromosome", ",", "start", ",", "end", ",", "variant_type", ",", "sv_type", ")", ":", "if", "sv_type", ":", "variant_type", "=", "'sv'", "adapter", "=", "ctx", ".", "obj", "["...
Display variants in the database.
[ "Display", "variants", "in", "the", "database", "." ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/view.py#L77-L119
moonso/loqusdb
loqusdb/commands/view.py
index
def index(ctx, view): """Index the database.""" adapter = ctx.obj['adapter'] if view: click.echo(adapter.indexes()) return adapter.ensure_indexes()
python
def index(ctx, view): """Index the database.""" adapter = ctx.obj['adapter'] if view: click.echo(adapter.indexes()) return adapter.ensure_indexes()
[ "def", "index", "(", "ctx", ",", "view", ")", ":", "adapter", "=", "ctx", ".", "obj", "[", "'adapter'", "]", "if", "view", ":", "click", ".", "echo", "(", "adapter", ".", "indexes", "(", ")", ")", "return", "adapter", ".", "ensure_indexes", "(", ")...
Index the database.
[ "Index", "the", "database", "." ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/view.py#L127-L133
limix/numpy-sugar
numpy_sugar/linalg/dot.py
dotd
def dotd(A, B, out=None): r"""Diagonal of :math:`\mathrm A\mathrm B^\intercal`. If ``A`` is :math:`n\times p` and ``B`` is :math:`p\times n`, it is done in :math:`O(pn)`. Args: A (array_like): Left matrix. B (array_like): Right matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting diagonal. """ A = asarray(A, float) B = asarray(B, float) if A.ndim == 1 and B.ndim == 1: if out is None: return dot(A, B) return dot(A, B, out) if out is None: out = empty((A.shape[0],), float) return einsum("ij,ji->i", A, B, out=out)
python
def dotd(A, B, out=None): r"""Diagonal of :math:`\mathrm A\mathrm B^\intercal`. If ``A`` is :math:`n\times p` and ``B`` is :math:`p\times n`, it is done in :math:`O(pn)`. Args: A (array_like): Left matrix. B (array_like): Right matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting diagonal. """ A = asarray(A, float) B = asarray(B, float) if A.ndim == 1 and B.ndim == 1: if out is None: return dot(A, B) return dot(A, B, out) if out is None: out = empty((A.shape[0],), float) return einsum("ij,ji->i", A, B, out=out)
[ "def", "dotd", "(", "A", ",", "B", ",", "out", "=", "None", ")", ":", "A", "=", "asarray", "(", "A", ",", "float", ")", "B", "=", "asarray", "(", "B", ",", "float", ")", "if", "A", ".", "ndim", "==", "1", "and", "B", ".", "ndim", "==", "1...
r"""Diagonal of :math:`\mathrm A\mathrm B^\intercal`. If ``A`` is :math:`n\times p` and ``B`` is :math:`p\times n`, it is done in :math:`O(pn)`. Args: A (array_like): Left matrix. B (array_like): Right matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting diagonal.
[ "r", "Diagonal", "of", ":", "math", ":", "\\", "mathrm", "A", "\\", "mathrm", "B^", "\\", "intercal", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/dot.py#L4-L26
limix/numpy-sugar
numpy_sugar/linalg/dot.py
ddot
def ddot(L, R, left=None, out=None): r"""Dot product of a matrix and a diagonal one. Args: L (array_like): Left matrix. R (array_like): Right matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting matrix. """ L = asarray(L, float) R = asarray(R, float) if left is None: ok = min(L.ndim, R.ndim) == 1 and max(L.ndim, R.ndim) == 2 if not ok: msg = "Wrong array layout. One array should have" msg += " ndim=1 and the other one ndim=2." raise ValueError(msg) left = L.ndim == 1 if left: if out is None: out = copy(R) L = L.reshape(list(L.shape) + [1] * (R.ndim - 1)) return multiply(L, R, out=out) else: if out is None: out = copy(L) return multiply(L, R, out=out)
python
def ddot(L, R, left=None, out=None): r"""Dot product of a matrix and a diagonal one. Args: L (array_like): Left matrix. R (array_like): Right matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting matrix. """ L = asarray(L, float) R = asarray(R, float) if left is None: ok = min(L.ndim, R.ndim) == 1 and max(L.ndim, R.ndim) == 2 if not ok: msg = "Wrong array layout. One array should have" msg += " ndim=1 and the other one ndim=2." raise ValueError(msg) left = L.ndim == 1 if left: if out is None: out = copy(R) L = L.reshape(list(L.shape) + [1] * (R.ndim - 1)) return multiply(L, R, out=out) else: if out is None: out = copy(L) return multiply(L, R, out=out)
[ "def", "ddot", "(", "L", ",", "R", ",", "left", "=", "None", ",", "out", "=", "None", ")", ":", "L", "=", "asarray", "(", "L", ",", "float", ")", "R", "=", "asarray", "(", "R", ",", "float", ")", "if", "left", "is", "None", ":", "ok", "=", ...
r"""Dot product of a matrix and a diagonal one. Args: L (array_like): Left matrix. R (array_like): Right matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting matrix.
[ "r", "Dot", "product", "of", "a", "matrix", "and", "a", "diagonal", "one", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/dot.py#L29-L57
limix/numpy-sugar
numpy_sugar/linalg/dot.py
cdot
def cdot(L, out=None): r"""Product of a Cholesky matrix with itself transposed. Args: L (array_like): Cholesky matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`. """ L = asarray(L, float) layout_error = "Wrong matrix layout." if L.ndim != 2: raise ValueError(layout_error) if L.shape[0] != L.shape[1]: raise ValueError(layout_error) if out is None: out = empty((L.shape[0], L.shape[1]), float) return einsum("ij,kj->ik", L, L, out=out)
python
def cdot(L, out=None): r"""Product of a Cholesky matrix with itself transposed. Args: L (array_like): Cholesky matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`. """ L = asarray(L, float) layout_error = "Wrong matrix layout." if L.ndim != 2: raise ValueError(layout_error) if L.shape[0] != L.shape[1]: raise ValueError(layout_error) if out is None: out = empty((L.shape[0], L.shape[1]), float) return einsum("ij,kj->ik", L, L, out=out)
[ "def", "cdot", "(", "L", ",", "out", "=", "None", ")", ":", "L", "=", "asarray", "(", "L", ",", "float", ")", "layout_error", "=", "\"Wrong matrix layout.\"", "if", "L", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "layout_error", ")", "i...
r"""Product of a Cholesky matrix with itself transposed. Args: L (array_like): Cholesky matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`.
[ "r", "Product", "of", "a", "Cholesky", "matrix", "with", "itself", "transposed", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/dot.py#L60-L83
limix/numpy-sugar
numpy_sugar/_rankdata.py
nanrankdata
def nanrankdata(a, axis=-1, inplace=False): """ Rank data for arrays contaning NaN values. Parameters ---------- X : array_like Array of values. axis : int, optional Axis value. Defaults to `1`. inplace : bool, optional Defaults to `False`. Returns ------- array_like Ranked array. Examples -------- .. doctest:: >>> from numpy_sugar import nanrankdata >>> from numpy import arange >>> >>> X = arange(15).reshape((5, 3)).astype(float) >>> print(nanrankdata(X)) [[1. 1. 1.] [2. 2. 2.] [3. 3. 3.] [4. 4. 4.] [5. 5. 5.]] """ from scipy.stats import rankdata if hasattr(a, "dtype") and issubdtype(a.dtype, integer): raise ValueError("Integer type is not supported.") if isinstance(a, (tuple, list)): if inplace: raise ValueError("Can't use `inplace=True` for {}.".format(type(a))) a = asarray(a, float) orig_shape = a.shape if a.ndim == 1: a = a.reshape(orig_shape + (1,)) if not inplace: a = a.copy() def rank1d(x): idx = ~isnan(x) x[idx] = rankdata(x[idx]) return x a = a.swapaxes(1, axis) a = apply_along_axis(rank1d, 0, a) a = a.swapaxes(1, axis) return a.reshape(orig_shape)
python
def nanrankdata(a, axis=-1, inplace=False): """ Rank data for arrays contaning NaN values. Parameters ---------- X : array_like Array of values. axis : int, optional Axis value. Defaults to `1`. inplace : bool, optional Defaults to `False`. Returns ------- array_like Ranked array. Examples -------- .. doctest:: >>> from numpy_sugar import nanrankdata >>> from numpy import arange >>> >>> X = arange(15).reshape((5, 3)).astype(float) >>> print(nanrankdata(X)) [[1. 1. 1.] [2. 2. 2.] [3. 3. 3.] [4. 4. 4.] [5. 5. 5.]] """ from scipy.stats import rankdata if hasattr(a, "dtype") and issubdtype(a.dtype, integer): raise ValueError("Integer type is not supported.") if isinstance(a, (tuple, list)): if inplace: raise ValueError("Can't use `inplace=True` for {}.".format(type(a))) a = asarray(a, float) orig_shape = a.shape if a.ndim == 1: a = a.reshape(orig_shape + (1,)) if not inplace: a = a.copy() def rank1d(x): idx = ~isnan(x) x[idx] = rankdata(x[idx]) return x a = a.swapaxes(1, axis) a = apply_along_axis(rank1d, 0, a) a = a.swapaxes(1, axis) return a.reshape(orig_shape)
[ "def", "nanrankdata", "(", "a", ",", "axis", "=", "-", "1", ",", "inplace", "=", "False", ")", ":", "from", "scipy", ".", "stats", "import", "rankdata", "if", "hasattr", "(", "a", ",", "\"dtype\"", ")", "and", "issubdtype", "(", "a", ".", "dtype", ...
Rank data for arrays contaning NaN values. Parameters ---------- X : array_like Array of values. axis : int, optional Axis value. Defaults to `1`. inplace : bool, optional Defaults to `False`. Returns ------- array_like Ranked array. Examples -------- .. doctest:: >>> from numpy_sugar import nanrankdata >>> from numpy import arange >>> >>> X = arange(15).reshape((5, 3)).astype(float) >>> print(nanrankdata(X)) [[1. 1. 1.] [2. 2. 2.] [3. 3. 3.] [4. 4. 4.] [5. 5. 5.]]
[ "Rank", "data", "for", "arrays", "contaning", "NaN", "values", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/_rankdata.py#L4-L64
limix/numpy-sugar
numpy_sugar/linalg/det.py
plogdet
def plogdet(K): r"""Log of the pseudo-determinant. It assumes that ``K`` is a positive semi-definite matrix. Args: K (array_like): matrix. Returns: float: log of the pseudo-determinant. """ egvals = eigvalsh(K) return npsum(log(egvals[egvals > epsilon]))
python
def plogdet(K): r"""Log of the pseudo-determinant. It assumes that ``K`` is a positive semi-definite matrix. Args: K (array_like): matrix. Returns: float: log of the pseudo-determinant. """ egvals = eigvalsh(K) return npsum(log(egvals[egvals > epsilon]))
[ "def", "plogdet", "(", "K", ")", ":", "egvals", "=", "eigvalsh", "(", "K", ")", "return", "npsum", "(", "log", "(", "egvals", "[", "egvals", ">", "epsilon", "]", ")", ")" ]
r"""Log of the pseudo-determinant. It assumes that ``K`` is a positive semi-definite matrix. Args: K (array_like): matrix. Returns: float: log of the pseudo-determinant.
[ "r", "Log", "of", "the", "pseudo", "-", "determinant", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/det.py#L8-L20
limix/numpy-sugar
numpy_sugar/linalg/qs.py
economic_qs
def economic_qs(K, epsilon=sqrt(finfo(float).eps)): r"""Economic eigen decomposition for symmetric matrices. A symmetric matrix ``K`` can be decomposed in :math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\ \mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero matrix with size determined by ``K``'s rank deficiency. Args: K (array_like): Symmetric matrix. epsilon (float): Eigen value threshold. Default is ``sqrt(finfo(float).eps)``. Returns: tuple: ``((Q0, Q1), S0)``. """ (S, Q) = eigh(K) nok = abs(max(Q[0].min(), Q[0].max(), key=abs)) < epsilon nok = nok and abs(max(K.min(), K.max(), key=abs)) >= epsilon if nok: from scipy.linalg import eigh as sp_eigh (S, Q) = sp_eigh(K) ok = S >= epsilon nok = logical_not(ok) S0 = S[ok] Q0 = Q[:, ok] Q1 = Q[:, nok] return ((Q0, Q1), S0)
python
def economic_qs(K, epsilon=sqrt(finfo(float).eps)): r"""Economic eigen decomposition for symmetric matrices. A symmetric matrix ``K`` can be decomposed in :math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\ \mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero matrix with size determined by ``K``'s rank deficiency. Args: K (array_like): Symmetric matrix. epsilon (float): Eigen value threshold. Default is ``sqrt(finfo(float).eps)``. Returns: tuple: ``((Q0, Q1), S0)``. """ (S, Q) = eigh(K) nok = abs(max(Q[0].min(), Q[0].max(), key=abs)) < epsilon nok = nok and abs(max(K.min(), K.max(), key=abs)) >= epsilon if nok: from scipy.linalg import eigh as sp_eigh (S, Q) = sp_eigh(K) ok = S >= epsilon nok = logical_not(ok) S0 = S[ok] Q0 = Q[:, ok] Q1 = Q[:, nok] return ((Q0, Q1), S0)
[ "def", "economic_qs", "(", "K", ",", "epsilon", "=", "sqrt", "(", "finfo", "(", "float", ")", ".", "eps", ")", ")", ":", "(", "S", ",", "Q", ")", "=", "eigh", "(", "K", ")", "nok", "=", "abs", "(", "max", "(", "Q", "[", "0", "]", ".", "mi...
r"""Economic eigen decomposition for symmetric matrices. A symmetric matrix ``K`` can be decomposed in :math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\ \mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero matrix with size determined by ``K``'s rank deficiency. Args: K (array_like): Symmetric matrix. epsilon (float): Eigen value threshold. Default is ``sqrt(finfo(float).eps)``. Returns: tuple: ``((Q0, Q1), S0)``.
[ "r", "Economic", "eigen", "decomposition", "for", "symmetric", "matrices", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/qs.py#L5-L36
limix/numpy-sugar
numpy_sugar/linalg/qs.py
economic_qs_linear
def economic_qs_linear(G): r"""Economic eigen decomposition for symmetric matrices ``dot(G, G.T)``. It is theoretically equivalent to ``economic_qs(dot(G, G.T))``. Refer to :func:`numpy_sugar.economic_qs` for further information. Args: G (array_like): Matrix. Returns: tuple: ``((Q0, Q1), S0)``. """ import dask.array as da if not isinstance(G, da.Array): G = asarray(G, float) if G.shape[0] > G.shape[1]: (Q, Ssq, _) = svd(G, full_matrices=True) S0 = Ssq ** 2 rank = len(S0) Q0, Q1 = Q[:, :rank], Q[:, rank:] return ((Q0, Q1), S0) return economic_qs(G.dot(G.T))
python
def economic_qs_linear(G): r"""Economic eigen decomposition for symmetric matrices ``dot(G, G.T)``. It is theoretically equivalent to ``economic_qs(dot(G, G.T))``. Refer to :func:`numpy_sugar.economic_qs` for further information. Args: G (array_like): Matrix. Returns: tuple: ``((Q0, Q1), S0)``. """ import dask.array as da if not isinstance(G, da.Array): G = asarray(G, float) if G.shape[0] > G.shape[1]: (Q, Ssq, _) = svd(G, full_matrices=True) S0 = Ssq ** 2 rank = len(S0) Q0, Q1 = Q[:, :rank], Q[:, rank:] return ((Q0, Q1), S0) return economic_qs(G.dot(G.T))
[ "def", "economic_qs_linear", "(", "G", ")", ":", "import", "dask", ".", "array", "as", "da", "if", "not", "isinstance", "(", "G", ",", "da", ".", "Array", ")", ":", "G", "=", "asarray", "(", "G", ",", "float", ")", "if", "G", ".", "shape", "[", ...
r"""Economic eigen decomposition for symmetric matrices ``dot(G, G.T)``. It is theoretically equivalent to ``economic_qs(dot(G, G.T))``. Refer to :func:`numpy_sugar.economic_qs` for further information. Args: G (array_like): Matrix. Returns: tuple: ``((Q0, Q1), S0)``.
[ "r", "Economic", "eigen", "decomposition", "for", "symmetric", "matrices", "dot", "(", "G", "G", ".", "T", ")", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/qs.py#L39-L63
limix/numpy-sugar
numpy_sugar/_array.py
cartesian
def cartesian(shape): r"""Cartesian indexing. Returns a sequence of n-tuples indexing each element of a hypothetical matrix of the given shape. Args: shape (tuple): tuple of dimensions. Returns: array_like: indices. Example ------- .. doctest:: >>> from numpy_sugar import cartesian >>> print(cartesian((2, 3))) [[0 0] [0 1] [0 2] [1 0] [1 1] [1 2]] Reference: [1] http://stackoverflow.com/a/27286794 """ n = len(shape) idx = [slice(0, s) for s in shape] g = rollaxis(mgrid[idx], 0, n + 1) return g.reshape((prod(shape), n))
python
def cartesian(shape): r"""Cartesian indexing. Returns a sequence of n-tuples indexing each element of a hypothetical matrix of the given shape. Args: shape (tuple): tuple of dimensions. Returns: array_like: indices. Example ------- .. doctest:: >>> from numpy_sugar import cartesian >>> print(cartesian((2, 3))) [[0 0] [0 1] [0 2] [1 0] [1 1] [1 2]] Reference: [1] http://stackoverflow.com/a/27286794 """ n = len(shape) idx = [slice(0, s) for s in shape] g = rollaxis(mgrid[idx], 0, n + 1) return g.reshape((prod(shape), n))
[ "def", "cartesian", "(", "shape", ")", ":", "n", "=", "len", "(", "shape", ")", "idx", "=", "[", "slice", "(", "0", ",", "s", ")", "for", "s", "in", "shape", "]", "g", "=", "rollaxis", "(", "mgrid", "[", "idx", "]", ",", "0", ",", "n", "+",...
r"""Cartesian indexing. Returns a sequence of n-tuples indexing each element of a hypothetical matrix of the given shape. Args: shape (tuple): tuple of dimensions. Returns: array_like: indices. Example ------- .. doctest:: >>> from numpy_sugar import cartesian >>> print(cartesian((2, 3))) [[0 0] [0 1] [0 2] [1 0] [1 1] [1 2]] Reference: [1] http://stackoverflow.com/a/27286794
[ "r", "Cartesian", "indexing", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/_array.py#L96-L129
limix/numpy-sugar
numpy_sugar/_array.py
unique
def unique(ar): r"""Find the unique elements of an array. It uses ``dask.array.unique`` if necessary. Args: ar (array_like): Input array. Returns: array_like: the sorted unique elements. """ import dask.array as da if isinstance(ar, da.core.Array): return da.unique(ar) return _unique(ar)
python
def unique(ar): r"""Find the unique elements of an array. It uses ``dask.array.unique`` if necessary. Args: ar (array_like): Input array. Returns: array_like: the sorted unique elements. """ import dask.array as da if isinstance(ar, da.core.Array): return da.unique(ar) return _unique(ar)
[ "def", "unique", "(", "ar", ")", ":", "import", "dask", ".", "array", "as", "da", "if", "isinstance", "(", "ar", ",", "da", ".", "core", ".", "Array", ")", ":", "return", "da", ".", "unique", "(", "ar", ")", "return", "_unique", "(", "ar", ")" ]
r"""Find the unique elements of an array. It uses ``dask.array.unique`` if necessary. Args: ar (array_like): Input array. Returns: array_like: the sorted unique elements.
[ "r", "Find", "the", "unique", "elements", "of", "an", "array", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/_array.py#L132-L149
limix/numpy-sugar
numpy_sugar/linalg/lu.py
lu_slogdet
def lu_slogdet(LU): r"""Natural logarithm of a LU decomposition. Args: LU (tuple): LU decomposition. Returns: tuple: sign and log-determinant. """ LU = (asarray(LU[0], float), asarray(LU[1], float)) adet = _sum(log(_abs(LU[0].diagonal()))) s = prod(sign(LU[0].diagonal())) nrows_exchange = LU[1].size - _sum(LU[1] == arange(LU[1].size, dtype="int32")) odd = nrows_exchange % 2 == 1 if odd: s *= -1.0 return (s, adet)
python
def lu_slogdet(LU): r"""Natural logarithm of a LU decomposition. Args: LU (tuple): LU decomposition. Returns: tuple: sign and log-determinant. """ LU = (asarray(LU[0], float), asarray(LU[1], float)) adet = _sum(log(_abs(LU[0].diagonal()))) s = prod(sign(LU[0].diagonal())) nrows_exchange = LU[1].size - _sum(LU[1] == arange(LU[1].size, dtype="int32")) odd = nrows_exchange % 2 == 1 if odd: s *= -1.0 return (s, adet)
[ "def", "lu_slogdet", "(", "LU", ")", ":", "LU", "=", "(", "asarray", "(", "LU", "[", "0", "]", ",", "float", ")", ",", "asarray", "(", "LU", "[", "1", "]", ",", "float", ")", ")", "adet", "=", "_sum", "(", "log", "(", "_abs", "(", "LU", "["...
r"""Natural logarithm of a LU decomposition. Args: LU (tuple): LU decomposition. Returns: tuple: sign and log-determinant.
[ "r", "Natural", "logarithm", "of", "a", "LU", "decomposition", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/lu.py#L6-L26
limix/numpy-sugar
numpy_sugar/linalg/lu.py
lu_solve
def lu_solve(LU, b): r"""Solve for LU decomposition. Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`, given the LU factorization of :math:`\mathrm A`. Args: LU (array_like): LU decomposition. b (array_like): Right-hand side. Returns: :class:`numpy.ndarray`: The solution to the system :math:`\mathrm A \mathbf x = \mathbf b`. See Also -------- scipy.linalg.lu_factor : LU decomposition. scipy.linalg.lu_solve : Solve linear equations given LU factorization. """ from scipy.linalg import lu_solve as sp_lu_solve LU = (asarray(LU[0], float), asarray(LU[1], float)) b = asarray(b, float) return sp_lu_solve(LU, b, check_finite=False)
python
def lu_solve(LU, b): r"""Solve for LU decomposition. Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`, given the LU factorization of :math:`\mathrm A`. Args: LU (array_like): LU decomposition. b (array_like): Right-hand side. Returns: :class:`numpy.ndarray`: The solution to the system :math:`\mathrm A \mathbf x = \mathbf b`. See Also -------- scipy.linalg.lu_factor : LU decomposition. scipy.linalg.lu_solve : Solve linear equations given LU factorization. """ from scipy.linalg import lu_solve as sp_lu_solve LU = (asarray(LU[0], float), asarray(LU[1], float)) b = asarray(b, float) return sp_lu_solve(LU, b, check_finite=False)
[ "def", "lu_solve", "(", "LU", ",", "b", ")", ":", "from", "scipy", ".", "linalg", "import", "lu_solve", "as", "sp_lu_solve", "LU", "=", "(", "asarray", "(", "LU", "[", "0", "]", ",", "float", ")", ",", "asarray", "(", "LU", "[", "1", "]", ",", ...
r"""Solve for LU decomposition. Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`, given the LU factorization of :math:`\mathrm A`. Args: LU (array_like): LU decomposition. b (array_like): Right-hand side. Returns: :class:`numpy.ndarray`: The solution to the system :math:`\mathrm A \mathbf x = \mathbf b`. See Also -------- scipy.linalg.lu_factor : LU decomposition. scipy.linalg.lu_solve : Solve linear equations given LU factorization.
[ "r", "Solve", "for", "LU", "decomposition", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/lu.py#L29-L52
limix/numpy-sugar
numpy_sugar/linalg/lstsq.py
lstsq
def lstsq(A, b): r"""Return the least-squares solution to a linear matrix equation. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Least-squares solution. """ A = asarray(A, float) b = asarray(b, float) if A.ndim == 1: A = A[:, newaxis] if A.shape[1] == 1: return dot(A.T, b) / squeeze(dot(A.T, A)) rcond = finfo(double).eps * max(*A.shape) return npy_lstsq(A, b, rcond=rcond)[0]
python
def lstsq(A, b): r"""Return the least-squares solution to a linear matrix equation. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Least-squares solution. """ A = asarray(A, float) b = asarray(b, float) if A.ndim == 1: A = A[:, newaxis] if A.shape[1] == 1: return dot(A.T, b) / squeeze(dot(A.T, A)) rcond = finfo(double).eps * max(*A.shape) return npy_lstsq(A, b, rcond=rcond)[0]
[ "def", "lstsq", "(", "A", ",", "b", ")", ":", "A", "=", "asarray", "(", "A", ",", "float", ")", "b", "=", "asarray", "(", "b", ",", "float", ")", "if", "A", ".", "ndim", "==", "1", ":", "A", "=", "A", "[", ":", ",", "newaxis", "]", "if", ...
r"""Return the least-squares solution to a linear matrix equation. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Least-squares solution.
[ "r", "Return", "the", "least", "-", "squares", "solution", "to", "a", "linear", "matrix", "equation", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/lstsq.py#L6-L26
limix/numpy-sugar
numpy_sugar/ma/dot.py
dotd
def dotd(A, B): r"""Diagonal of :math:`\mathrm A\mathrm B^\intercal`. If ``A`` is :math:`n\times p` and ``B`` is :math:`p\times n`, it is done in :math:`O(pn)`. Args: A (array_like): Left matrix. B (array_like): Right matrix. Returns: :class:`numpy.ndarray`: Resulting diagonal. """ A = asarray(A, float) B = asarray(B, float) if A.ndim == 1 and B.ndim == 1: return dot(A, B) out = empty((A.shape[0],), float) out[:] = sum(A * B.T, axis=1) return out
python
def dotd(A, B): r"""Diagonal of :math:`\mathrm A\mathrm B^\intercal`. If ``A`` is :math:`n\times p` and ``B`` is :math:`p\times n`, it is done in :math:`O(pn)`. Args: A (array_like): Left matrix. B (array_like): Right matrix. Returns: :class:`numpy.ndarray`: Resulting diagonal. """ A = asarray(A, float) B = asarray(B, float) if A.ndim == 1 and B.ndim == 1: return dot(A, B) out = empty((A.shape[0],), float) out[:] = sum(A * B.T, axis=1) return out
[ "def", "dotd", "(", "A", ",", "B", ")", ":", "A", "=", "asarray", "(", "A", ",", "float", ")", "B", "=", "asarray", "(", "B", ",", "float", ")", "if", "A", ".", "ndim", "==", "1", "and", "B", ".", "ndim", "==", "1", ":", "return", "dot", ...
r"""Diagonal of :math:`\mathrm A\mathrm B^\intercal`. If ``A`` is :math:`n\times p` and ``B`` is :math:`p\times n`, it is done in :math:`O(pn)`. Args: A (array_like): Left matrix. B (array_like): Right matrix. Returns: :class:`numpy.ndarray`: Resulting diagonal.
[ "r", "Diagonal", "of", ":", "math", ":", "\\", "mathrm", "A", "\\", "mathrm", "B^", "\\", "intercal", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/ma/dot.py#L4-L25
limix/numpy-sugar
numpy_sugar/linalg/svd.py
economic_svd
def economic_svd(G, epsilon=sqrt(finfo(float).eps)): r"""Economic Singular Value Decomposition. Args: G (array_like): Matrix to be factorized. epsilon (float): Threshold on the square root of the eigen values. Default is ``sqrt(finfo(float).eps)``. Returns: :class:`numpy.ndarray`: Unitary matrix. :class:`numpy.ndarray`: Singular values. :class:`numpy.ndarray`: Unitary matrix. See Also -------- numpy.linalg.svd : Cholesky decomposition. scipy.linalg.svd : Cholesky decomposition. """ from scipy.linalg import svd G = asarray(G, float) (U, S, V) = svd(G, full_matrices=False, check_finite=False) ok = S >= epsilon S = S[ok] U = U[:, ok] V = V[ok, :] return (U, S, V)
python
def economic_svd(G, epsilon=sqrt(finfo(float).eps)): r"""Economic Singular Value Decomposition. Args: G (array_like): Matrix to be factorized. epsilon (float): Threshold on the square root of the eigen values. Default is ``sqrt(finfo(float).eps)``. Returns: :class:`numpy.ndarray`: Unitary matrix. :class:`numpy.ndarray`: Singular values. :class:`numpy.ndarray`: Unitary matrix. See Also -------- numpy.linalg.svd : Cholesky decomposition. scipy.linalg.svd : Cholesky decomposition. """ from scipy.linalg import svd G = asarray(G, float) (U, S, V) = svd(G, full_matrices=False, check_finite=False) ok = S >= epsilon S = S[ok] U = U[:, ok] V = V[ok, :] return (U, S, V)
[ "def", "economic_svd", "(", "G", ",", "epsilon", "=", "sqrt", "(", "finfo", "(", "float", ")", ".", "eps", ")", ")", ":", "from", "scipy", ".", "linalg", "import", "svd", "G", "=", "asarray", "(", "G", ",", "float", ")", "(", "U", ",", "S", ","...
r"""Economic Singular Value Decomposition. Args: G (array_like): Matrix to be factorized. epsilon (float): Threshold on the square root of the eigen values. Default is ``sqrt(finfo(float).eps)``. Returns: :class:`numpy.ndarray`: Unitary matrix. :class:`numpy.ndarray`: Singular values. :class:`numpy.ndarray`: Unitary matrix. See Also -------- numpy.linalg.svd : Cholesky decomposition. scipy.linalg.svd : Cholesky decomposition.
[ "r", "Economic", "Singular", "Value", "Decomposition", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/svd.py#L4-L30
limix/numpy-sugar
numpy_sugar/linalg/solve.py
hsolve
def hsolve(A, y): r"""Solver for the linear equations of two variables and equations only. It uses Householder reductions to solve ``Ax = y`` in a robust manner. Parameters ---------- A : array_like Coefficient matrix. y : array_like Ordinate values. Returns ------- :class:`numpy.ndarray` Solution ``x``. """ n = _norm(A[0, 0], A[1, 0]) u0 = A[0, 0] - n u1 = A[1, 0] nu = _norm(u0, u1) with errstate(invalid="ignore", divide="ignore"): v0 = nan_to_num(u0 / nu) v1 = nan_to_num(u1 / nu) B00 = 1 - 2 * v0 * v0 B01 = 0 - 2 * v0 * v1 B11 = 1 - 2 * v1 * v1 D00 = B00 * A[0, 0] + B01 * A[1, 0] D01 = B00 * A[0, 1] + B01 * A[1, 1] D11 = B01 * A[0, 1] + B11 * A[1, 1] b0 = y[0] - 2 * y[0] * v0 * v0 - 2 * y[1] * v0 * v1 b1 = y[1] - 2 * y[0] * v1 * v0 - 2 * y[1] * v1 * v1 n = _norm(D00, D01) u0 = D00 - n u1 = D01 nu = _norm(u0, u1) with errstate(invalid="ignore", divide="ignore"): v0 = nan_to_num(u0 / nu) v1 = nan_to_num(u1 / nu) E00 = 1 - 2 * v0 * v0 E01 = 0 - 2 * v0 * v1 E11 = 1 - 2 * v1 * v1 F00 = E00 * D00 + E01 * D01 F01 = E01 * D11 F11 = E11 * D11 F11 = (npy_abs(F11) > epsilon.small) * F11 with errstate(divide="ignore", invalid="ignore"): Fi00 = nan_to_num(F00 / F00 / F00) Fi11 = nan_to_num(F11 / F11 / F11) Fi10 = nan_to_num(-(F01 / F00) * Fi11) c0 = Fi00 * b0 c1 = Fi10 * b0 + Fi11 * b1 x0 = E00 * c0 + E01 * c1 x1 = E01 * c0 + E11 * c1 return array([x0, x1])
python
def hsolve(A, y): r"""Solver for the linear equations of two variables and equations only. It uses Householder reductions to solve ``Ax = y`` in a robust manner. Parameters ---------- A : array_like Coefficient matrix. y : array_like Ordinate values. Returns ------- :class:`numpy.ndarray` Solution ``x``. """ n = _norm(A[0, 0], A[1, 0]) u0 = A[0, 0] - n u1 = A[1, 0] nu = _norm(u0, u1) with errstate(invalid="ignore", divide="ignore"): v0 = nan_to_num(u0 / nu) v1 = nan_to_num(u1 / nu) B00 = 1 - 2 * v0 * v0 B01 = 0 - 2 * v0 * v1 B11 = 1 - 2 * v1 * v1 D00 = B00 * A[0, 0] + B01 * A[1, 0] D01 = B00 * A[0, 1] + B01 * A[1, 1] D11 = B01 * A[0, 1] + B11 * A[1, 1] b0 = y[0] - 2 * y[0] * v0 * v0 - 2 * y[1] * v0 * v1 b1 = y[1] - 2 * y[0] * v1 * v0 - 2 * y[1] * v1 * v1 n = _norm(D00, D01) u0 = D00 - n u1 = D01 nu = _norm(u0, u1) with errstate(invalid="ignore", divide="ignore"): v0 = nan_to_num(u0 / nu) v1 = nan_to_num(u1 / nu) E00 = 1 - 2 * v0 * v0 E01 = 0 - 2 * v0 * v1 E11 = 1 - 2 * v1 * v1 F00 = E00 * D00 + E01 * D01 F01 = E01 * D11 F11 = E11 * D11 F11 = (npy_abs(F11) > epsilon.small) * F11 with errstate(divide="ignore", invalid="ignore"): Fi00 = nan_to_num(F00 / F00 / F00) Fi11 = nan_to_num(F11 / F11 / F11) Fi10 = nan_to_num(-(F01 / F00) * Fi11) c0 = Fi00 * b0 c1 = Fi10 * b0 + Fi11 * b1 x0 = E00 * c0 + E01 * c1 x1 = E01 * c0 + E11 * c1 return array([x0, x1])
[ "def", "hsolve", "(", "A", ",", "y", ")", ":", "n", "=", "_norm", "(", "A", "[", "0", ",", "0", "]", ",", "A", "[", "1", ",", "0", "]", ")", "u0", "=", "A", "[", "0", ",", "0", "]", "-", "n", "u1", "=", "A", "[", "1", ",", "0", "]...
r"""Solver for the linear equations of two variables and equations only. It uses Householder reductions to solve ``Ax = y`` in a robust manner. Parameters ---------- A : array_like Coefficient matrix. y : array_like Ordinate values. Returns ------- :class:`numpy.ndarray` Solution ``x``.
[ "r", "Solver", "for", "the", "linear", "equations", "of", "two", "variables", "and", "equations", "only", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/solve.py#L31-L98
limix/numpy-sugar
numpy_sugar/linalg/solve.py
solve
def solve(A, b): r"""Solve for the linear equations :math:`\mathrm A \mathbf x = \mathbf b`. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``. """ A = asarray(A, float) b = asarray(b, float) if A.shape[0] == 1: with errstate(divide="ignore"): A_ = array([[1.0 / A[0, 0]]]) if not isfinite(A_[0, 0]): raise LinAlgError("Division error.") return dot(A_, b) elif A.shape[0] == 2: a = A[0, 0] b_ = A[0, 1] c = A[1, 0] d = A[1, 1] A_ = array([[d, -b_], [-c, a]]) with errstate(divide="ignore"): A_ /= a * d - b_ * c if not npy_all(isfinite(A_)): raise LinAlgError("Division error.") return dot(A_, b) return _solve(A, b)
python
def solve(A, b): r"""Solve for the linear equations :math:`\mathrm A \mathbf x = \mathbf b`. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``. """ A = asarray(A, float) b = asarray(b, float) if A.shape[0] == 1: with errstate(divide="ignore"): A_ = array([[1.0 / A[0, 0]]]) if not isfinite(A_[0, 0]): raise LinAlgError("Division error.") return dot(A_, b) elif A.shape[0] == 2: a = A[0, 0] b_ = A[0, 1] c = A[1, 0] d = A[1, 1] A_ = array([[d, -b_], [-c, a]]) with errstate(divide="ignore"): A_ /= a * d - b_ * c if not npy_all(isfinite(A_)): raise LinAlgError("Division error.") return dot(A_, b) return _solve(A, b)
[ "def", "solve", "(", "A", ",", "b", ")", ":", "A", "=", "asarray", "(", "A", ",", "float", ")", "b", "=", "asarray", "(", "b", ",", "float", ")", "if", "A", ".", "shape", "[", "0", "]", "==", "1", ":", "with", "errstate", "(", "divide", "="...
r"""Solve for the linear equations :math:`\mathrm A \mathbf x = \mathbf b`. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``.
[ "r", "Solve", "for", "the", "linear", "equations", ":", "math", ":", "\\", "mathrm", "A", "\\", "mathbf", "x", "=", "\\", "mathbf", "b", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/solve.py#L101-L136
limix/numpy-sugar
numpy_sugar/linalg/solve.py
rsolve
def rsolve(A, b, epsilon=_epsilon): r"""Robust solve for the linear equations. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``. """ A = asarray(A, float) b = asarray(b, float) if A.shape[0] == 0: return zeros((A.shape[1],)) if A.shape[1] == 0: return zeros((0,)) try: x = lstsq(A, b, rcond=epsilon) r = sum(x[3] > epsilon) if r == 0: return zeros(A.shape[1]) return x[0] except (ValueError, LinAlgError) as e: warnings.warn(str(e), RuntimeWarning) return solve(A, b)
python
def rsolve(A, b, epsilon=_epsilon): r"""Robust solve for the linear equations. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``. """ A = asarray(A, float) b = asarray(b, float) if A.shape[0] == 0: return zeros((A.shape[1],)) if A.shape[1] == 0: return zeros((0,)) try: x = lstsq(A, b, rcond=epsilon) r = sum(x[3] > epsilon) if r == 0: return zeros(A.shape[1]) return x[0] except (ValueError, LinAlgError) as e: warnings.warn(str(e), RuntimeWarning) return solve(A, b)
[ "def", "rsolve", "(", "A", ",", "b", ",", "epsilon", "=", "_epsilon", ")", ":", "A", "=", "asarray", "(", "A", ",", "float", ")", "b", "=", "asarray", "(", "b", ",", "float", ")", "if", "A", ".", "shape", "[", "0", "]", "==", "0", ":", "ret...
r"""Robust solve for the linear equations. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``.
[ "r", "Robust", "solve", "for", "the", "linear", "equations", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/solve.py#L139-L163