repository_name stringlengths 7 55 | func_path_in_repository stringlengths 4 223 | func_name stringlengths 1 134 | whole_func_string stringlengths 75 104k | language stringclasses 1
value | func_code_string stringlengths 75 104k | func_code_tokens listlengths 19 28.4k | func_documentation_string stringlengths 1 46.9k | func_documentation_tokens listlengths 1 1.97k | split_name stringclasses 1
value | func_code_url stringlengths 87 315 |
|---|---|---|---|---|---|---|---|---|---|---|
moonso/loqusdb | loqusdb/plugins/mongo/variant.py | VariantMixin.add_variants | def add_variants(self, variants):
"""Add a bulk of variants
This could be used for faster inserts
Args:
variants(iterable(dict))
"""
operations = []
nr_inserted = 0
for i,variant in enumerate(variants, 1):
# We need to check if there was any information returned
# The variant could be excluded based on low gq or if no individiual was called
# in the particular case
if not variant:
continue
nr_inserted += 1
update = self._get_update(variant)
operations.append(
UpdateOne(
{'_id': variant['_id']},
update,
upsert=True
)
)
if i % 10000 == 0:
self.db.variant.bulk_write(operations, ordered=False)
operations = []
if len(operations) > 0:
self.db.variant.bulk_write(operations, ordered=False)
return nr_inserted | python | def add_variants(self, variants):
"""Add a bulk of variants
This could be used for faster inserts
Args:
variants(iterable(dict))
"""
operations = []
nr_inserted = 0
for i,variant in enumerate(variants, 1):
# We need to check if there was any information returned
# The variant could be excluded based on low gq or if no individiual was called
# in the particular case
if not variant:
continue
nr_inserted += 1
update = self._get_update(variant)
operations.append(
UpdateOne(
{'_id': variant['_id']},
update,
upsert=True
)
)
if i % 10000 == 0:
self.db.variant.bulk_write(operations, ordered=False)
operations = []
if len(operations) > 0:
self.db.variant.bulk_write(operations, ordered=False)
return nr_inserted | [
"def",
"add_variants",
"(",
"self",
",",
"variants",
")",
":",
"operations",
"=",
"[",
"]",
"nr_inserted",
"=",
"0",
"for",
"i",
",",
"variant",
"in",
"enumerate",
"(",
"variants",
",",
"1",
")",
":",
"# We need to check if there was any information returned",
... | Add a bulk of variants
This could be used for faster inserts
Args:
variants(iterable(dict)) | [
"Add",
"a",
"bulk",
"of",
"variants",
"This",
"could",
"be",
"used",
"for",
"faster",
"inserts",
"Args",
":",
"variants",
"(",
"iterable",
"(",
"dict",
"))"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L70-L104 |
moonso/loqusdb | loqusdb/plugins/mongo/variant.py | VariantMixin.search_variants | def search_variants(self, variant_ids):
"""Make a batch search for variants in the database
Args:
variant_ids(list(str)): List of variant ids
Returns:
res(pymngo.Cursor(variant_obj)): The result
"""
query = {'_id': {'$in': variant_ids}}
return self.db.variant.find(query) | python | def search_variants(self, variant_ids):
"""Make a batch search for variants in the database
Args:
variant_ids(list(str)): List of variant ids
Returns:
res(pymngo.Cursor(variant_obj)): The result
"""
query = {'_id': {'$in': variant_ids}}
return self.db.variant.find(query) | [
"def",
"search_variants",
"(",
"self",
",",
"variant_ids",
")",
":",
"query",
"=",
"{",
"'_id'",
":",
"{",
"'$in'",
":",
"variant_ids",
"}",
"}",
"return",
"self",
".",
"db",
".",
"variant",
".",
"find",
"(",
"query",
")"
] | Make a batch search for variants in the database
Args:
variant_ids(list(str)): List of variant ids
Returns:
res(pymngo.Cursor(variant_obj)): The result | [
"Make",
"a",
"batch",
"search",
"for",
"variants",
"in",
"the",
"database",
"Args",
":",
"variant_ids",
"(",
"list",
"(",
"str",
"))",
":",
"List",
"of",
"variant",
"ids",
"Returns",
":",
"res",
"(",
"pymngo",
".",
"Cursor",
"(",
"variant_obj",
"))",
"... | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L123-L134 |
moonso/loqusdb | loqusdb/plugins/mongo/variant.py | VariantMixin.get_variants | def get_variants(self, chromosome=None, start=None, end=None):
"""Return all variants in the database
If no region is specified all variants will be returned.
Args:
chromosome(str)
start(int)
end(int)
Returns:
variants(Iterable(Variant))
"""
query = {}
if chromosome:
query['chrom'] = chromosome
if start:
query['start'] = {'$lte': end}
query['end'] = {'$gte': start}
LOG.info("Find all variants {}".format(query))
return self.db.variant.find(query).sort([('start', ASCENDING)]) | python | def get_variants(self, chromosome=None, start=None, end=None):
"""Return all variants in the database
If no region is specified all variants will be returned.
Args:
chromosome(str)
start(int)
end(int)
Returns:
variants(Iterable(Variant))
"""
query = {}
if chromosome:
query['chrom'] = chromosome
if start:
query['start'] = {'$lte': end}
query['end'] = {'$gte': start}
LOG.info("Find all variants {}".format(query))
return self.db.variant.find(query).sort([('start', ASCENDING)]) | [
"def",
"get_variants",
"(",
"self",
",",
"chromosome",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"query",
"=",
"{",
"}",
"if",
"chromosome",
":",
"query",
"[",
"'chrom'",
"]",
"=",
"chromosome",
"if",
"start",
":",
... | Return all variants in the database
If no region is specified all variants will be returned.
Args:
chromosome(str)
start(int)
end(int)
Returns:
variants(Iterable(Variant)) | [
"Return",
"all",
"variants",
"in",
"the",
"database",
"If",
"no",
"region",
"is",
"specified",
"all",
"variants",
"will",
"be",
"returned",
"."
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L137-L157 |
moonso/loqusdb | loqusdb/plugins/mongo/variant.py | VariantMixin.delete_variant | def delete_variant(self, variant):
"""Delete observation in database
This means that we take down the observations variable with one.
If 'observations' == 1 we remove the variant. If variant was homozygote
we decrease 'homozygote' with one.
Also remove the family from array 'families'.
Args:
variant (dict): A variant dictionary
"""
mongo_variant = self.get_variant(variant)
if mongo_variant:
if mongo_variant['observations'] == 1:
LOG.debug("Removing variant {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.delete_one({'_id': variant['_id']})
else:
LOG.debug("Decreasing observations for {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.update_one({
'_id': mongo_variant['_id']
},{
'$inc': {
'observations': -1,
'homozygote': - (variant.get('homozygote', 0)),
'hemizygote': - (variant.get('hemizygote', 0)),
},
'$pull': {
'families': variant.get('case_id')
}
}, upsert=False)
return | python | def delete_variant(self, variant):
"""Delete observation in database
This means that we take down the observations variable with one.
If 'observations' == 1 we remove the variant. If variant was homozygote
we decrease 'homozygote' with one.
Also remove the family from array 'families'.
Args:
variant (dict): A variant dictionary
"""
mongo_variant = self.get_variant(variant)
if mongo_variant:
if mongo_variant['observations'] == 1:
LOG.debug("Removing variant {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.delete_one({'_id': variant['_id']})
else:
LOG.debug("Decreasing observations for {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.update_one({
'_id': mongo_variant['_id']
},{
'$inc': {
'observations': -1,
'homozygote': - (variant.get('homozygote', 0)),
'hemizygote': - (variant.get('hemizygote', 0)),
},
'$pull': {
'families': variant.get('case_id')
}
}, upsert=False)
return | [
"def",
"delete_variant",
"(",
"self",
",",
"variant",
")",
":",
"mongo_variant",
"=",
"self",
".",
"get_variant",
"(",
"variant",
")",
"if",
"mongo_variant",
":",
"if",
"mongo_variant",
"[",
"'observations'",
"]",
"==",
"1",
":",
"LOG",
".",
"debug",
"(",
... | Delete observation in database
This means that we take down the observations variable with one.
If 'observations' == 1 we remove the variant. If variant was homozygote
we decrease 'homozygote' with one.
Also remove the family from array 'families'.
Args:
variant (dict): A variant dictionary | [
"Delete",
"observation",
"in",
"database"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L159-L196 |
moonso/loqusdb | loqusdb/plugins/mongo/variant.py | VariantMixin.get_chromosomes | def get_chromosomes(self, sv=False):
"""Return a list of all chromosomes found in database
Args:
sv(bool): if sv variants should be choosen
Returns:
res(iterable(str)): An iterable with all chromosomes in the database
"""
if sv:
res = self.db.structural_variant.distinct('chrom')
else:
res = self.db.variant.distinct('chrom')
return res | python | def get_chromosomes(self, sv=False):
"""Return a list of all chromosomes found in database
Args:
sv(bool): if sv variants should be choosen
Returns:
res(iterable(str)): An iterable with all chromosomes in the database
"""
if sv:
res = self.db.structural_variant.distinct('chrom')
else:
res = self.db.variant.distinct('chrom')
return res | [
"def",
"get_chromosomes",
"(",
"self",
",",
"sv",
"=",
"False",
")",
":",
"if",
"sv",
":",
"res",
"=",
"self",
".",
"db",
".",
"structural_variant",
".",
"distinct",
"(",
"'chrom'",
")",
"else",
":",
"res",
"=",
"self",
".",
"db",
".",
"variant",
"... | Return a list of all chromosomes found in database
Args:
sv(bool): if sv variants should be choosen
Returns:
res(iterable(str)): An iterable with all chromosomes in the database | [
"Return",
"a",
"list",
"of",
"all",
"chromosomes",
"found",
"in",
"database",
"Args",
":",
"sv",
"(",
"bool",
")",
":",
"if",
"sv",
"variants",
"should",
"be",
"choosen",
"Returns",
":",
"res",
"(",
"iterable",
"(",
"str",
"))",
":",
"An",
"iterable",
... | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L199-L213 |
moonso/loqusdb | loqusdb/plugins/mongo/variant.py | VariantMixin.get_max_position | def get_max_position(self, chrom):
"""Get the last position observed on a chromosome in the database
Args:
chrom(str)
Returns:
end(int): The largest end position found
"""
res = self.db.variant.find({'chrom':chrom}, {'_id':0, 'end':1}).sort([('end', DESCENDING)]).limit(1)
end = 0
for variant in res:
end = variant['end']
return end | python | def get_max_position(self, chrom):
"""Get the last position observed on a chromosome in the database
Args:
chrom(str)
Returns:
end(int): The largest end position found
"""
res = self.db.variant.find({'chrom':chrom}, {'_id':0, 'end':1}).sort([('end', DESCENDING)]).limit(1)
end = 0
for variant in res:
end = variant['end']
return end | [
"def",
"get_max_position",
"(",
"self",
",",
"chrom",
")",
":",
"res",
"=",
"self",
".",
"db",
".",
"variant",
".",
"find",
"(",
"{",
"'chrom'",
":",
"chrom",
"}",
",",
"{",
"'_id'",
":",
"0",
",",
"'end'",
":",
"1",
"}",
")",
".",
"sort",
"(",... | Get the last position observed on a chromosome in the database
Args:
chrom(str)
Returns:
end(int): The largest end position found | [
"Get",
"the",
"last",
"position",
"observed",
"on",
"a",
"chromosome",
"in",
"the",
"database",
"Args",
":",
"chrom",
"(",
"str",
")",
"Returns",
":",
"end",
"(",
"int",
")",
":",
"The",
"largest",
"end",
"position",
"found"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L215-L229 |
moonso/loqusdb | loqusdb/commands/restore.py | restore | def restore(ctx, filename):
"""Restore the database from a zipped file.
Default is to restore from db dump in loqusdb/resources/
"""
filename = filename or background_path
if not os.path.isfile(filename):
LOG.warning("File {} does not exist. Please point to a valid file".format(filename))
ctx.abort()
call = ['mongorestore', '--gzip', '--db', 'loqusdb', '--archive={}'.format(filename)]
LOG.info('Restoring database from %s', filename)
start_time = datetime.now()
try:
completed = subprocess.run(call, check=True)
except subprocess.CalledProcessError as err:
LOG.warning(err)
ctx.abort()
LOG.info('Database restored succesfully')
LOG.info('Time to restore database: {0}'.format(datetime.now()-start_time)) | python | def restore(ctx, filename):
"""Restore the database from a zipped file.
Default is to restore from db dump in loqusdb/resources/
"""
filename = filename or background_path
if not os.path.isfile(filename):
LOG.warning("File {} does not exist. Please point to a valid file".format(filename))
ctx.abort()
call = ['mongorestore', '--gzip', '--db', 'loqusdb', '--archive={}'.format(filename)]
LOG.info('Restoring database from %s', filename)
start_time = datetime.now()
try:
completed = subprocess.run(call, check=True)
except subprocess.CalledProcessError as err:
LOG.warning(err)
ctx.abort()
LOG.info('Database restored succesfully')
LOG.info('Time to restore database: {0}'.format(datetime.now()-start_time)) | [
"def",
"restore",
"(",
"ctx",
",",
"filename",
")",
":",
"filename",
"=",
"filename",
"or",
"background_path",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"LOG",
".",
"warning",
"(",
"\"File {} does not exist. Please point to a va... | Restore the database from a zipped file.
Default is to restore from db dump in loqusdb/resources/ | [
"Restore",
"the",
"database",
"from",
"a",
"zipped",
"file",
".",
"Default",
"is",
"to",
"restore",
"from",
"db",
"dump",
"in",
"loqusdb",
"/",
"resources",
"/"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/restore.py#L21-L42 |
yjzhang/uncurl_python | uncurl/sampling.py | downsample | def downsample(data, percent):
"""
downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1
"""
n_genes = data.shape[0]
n_cells = data.shape[1]
new_data = data.copy()
total_count = float(data.sum())
to_remove = total_count*percent
# sum of read counts per cell
cell_sums = data.sum(0).astype(float)
# probability of selecting genes per cell
cell_gene_probs = data/cell_sums
# probability of selecting cells
cell_probs = np.array(cell_sums/total_count).flatten()
cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)
for i, num_selected in enumerate(cells_selected):
cell_gene = np.array(cell_gene_probs[:,i]).flatten()
genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)
if sparse.issparse(data):
genes_selected = sparse.csc_matrix(genes_selected).T
new_data[:,i] -= genes_selected
new_data[new_data < 0] = 0
return new_data | python | def downsample(data, percent):
"""
downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1
"""
n_genes = data.shape[0]
n_cells = data.shape[1]
new_data = data.copy()
total_count = float(data.sum())
to_remove = total_count*percent
# sum of read counts per cell
cell_sums = data.sum(0).astype(float)
# probability of selecting genes per cell
cell_gene_probs = data/cell_sums
# probability of selecting cells
cell_probs = np.array(cell_sums/total_count).flatten()
cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)
for i, num_selected in enumerate(cells_selected):
cell_gene = np.array(cell_gene_probs[:,i]).flatten()
genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)
if sparse.issparse(data):
genes_selected = sparse.csc_matrix(genes_selected).T
new_data[:,i] -= genes_selected
new_data[new_data < 0] = 0
return new_data | [
"def",
"downsample",
"(",
"data",
",",
"percent",
")",
":",
"n_genes",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"n_cells",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"new_data",
"=",
"data",
".",
"copy",
"(",
")",
"total_count",
"=",
"float",
"(",
... | downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1 | [
"downsample",
"the",
"data",
"by",
"removing",
"a",
"given",
"percentage",
"of",
"the",
"reads",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/sampling.py#L7-L34 |
yjzhang/uncurl_python | uncurl/nb_state_estimation.py | _create_w_objective | def _create_w_objective(m, X, R):
"""
Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
R (array): 1 x genes
"""
genes, clusters = m.shape
cells = X.shape[1]
R1 = R.reshape((genes, 1)).dot(np.ones((1, cells)))
def objective(w):
# convert w into a matrix first... because it's a vector for
# optimization purposes
w = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w)+eps
return np.sum((X + R1)*np.log(d + R1) - X*np.log(d))/genes
def deriv(w):
# derivative of objective wrt all elements of w
# for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus
# x_ij
w2 = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w2)+eps
temp = X/d
temp2 = (X+R1)/(d+R1)
m1 = m.T.dot(temp2)
m2 = m.T.dot(temp)
deriv = m1 - m2
return deriv.flatten()/genes
return objective, deriv | python | def _create_w_objective(m, X, R):
"""
Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
R (array): 1 x genes
"""
genes, clusters = m.shape
cells = X.shape[1]
R1 = R.reshape((genes, 1)).dot(np.ones((1, cells)))
def objective(w):
# convert w into a matrix first... because it's a vector for
# optimization purposes
w = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w)+eps
return np.sum((X + R1)*np.log(d + R1) - X*np.log(d))/genes
def deriv(w):
# derivative of objective wrt all elements of w
# for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus
# x_ij
w2 = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w2)+eps
temp = X/d
temp2 = (X+R1)/(d+R1)
m1 = m.T.dot(temp2)
m2 = m.T.dot(temp)
deriv = m1 - m2
return deriv.flatten()/genes
return objective, deriv | [
"def",
"_create_w_objective",
"(",
"m",
",",
"X",
",",
"R",
")",
":",
"genes",
",",
"clusters",
"=",
"m",
".",
"shape",
"cells",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"R1",
"=",
"R",
".",
"reshape",
"(",
"(",
"genes",
",",
"1",
")",
")",
"."... | Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
R (array): 1 x genes | [
"Creates",
"an",
"objective",
"function",
"and",
"its",
"derivative",
"for",
"W",
"given",
"M",
"and",
"X",
"(",
"data",
")"
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_state_estimation.py#L12-L42 |
yjzhang/uncurl_python | uncurl/nb_state_estimation.py | nb_estimate_state | def nb_estimate_state(data, clusters, R=None, init_means=None, init_weights=None, max_iters=10, tol=1e-4, disp=True, inner_max_iters=400, normalize=True):
"""
Uses a Negative Binomial Mixture model to estimate cell states and
cell state mixing weights.
If some of the genes do not fit a negative binomial distribution
(mean > var), then the genes are discarded from the analysis.
Args:
data (array): genes x cells
clusters (int): number of mixture components
R (array, optional): vector of length genes containing the dispersion estimates for each gene. Default: use nb_fit
init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations
init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1)
max_iters (int, optional): maximum number of iterations. Default: 10
tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4
disp (bool, optional): whether or not to display optimization parameters. Default: True
inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400
normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True.
Returns:
M (array): genes x clusters - state centers
W (array): clusters x cells - state mixing components for each cell
R (array): 1 x genes - NB dispersion parameter for each gene
ll (float): Log-likelihood of final iteration
"""
# TODO: deal with non-NB data... just ignore it? or do something else?
data_subset = data.copy()
genes, cells = data_subset.shape
# 1. use nb_fit to get inital Rs
if R is None:
nb_indices = find_nb_genes(data)
data_subset = data[nb_indices, :]
if init_means is not None and len(init_means) > sum(nb_indices):
init_means = init_means[nb_indices, :]
genes, cells = data_subset.shape
R = np.zeros(genes)
P, R = nb_fit(data_subset)
if init_means is None:
means, assignments = kmeans_pp(data_subset, clusters)
else:
means = init_means.copy()
clusters = means.shape[1]
w_init = np.random.random(cells*clusters)
if init_weights is not None:
if len(init_weights.shape)==1:
init_weights = initialize_from_assignments(init_weights, clusters)
w_init = init_weights.reshape(cells*clusters)
m_init = means.reshape(genes*clusters)
ll = np.inf
# repeat steps 1 and 2 until convergence:
for i in range(max_iters):
if disp:
print('iter: {0}'.format(i))
w_bounds = [(0, 1.0) for x in w_init]
m_bounds = [(0, None) for x in m_init]
# step 1: given M, estimate W
w_objective, w_deriv = _create_w_objective(means, data_subset, R)
w_res = minimize(w_objective, w_init, method='L-BFGS-B', jac=w_deriv, bounds=w_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
w_diff = np.sqrt(np.sum((w_res.x-w_init)**2))/w_init.size
w_new = w_res.x.reshape((clusters, cells))
w_init = w_res.x
# step 2: given W, update M
m_objective, m_deriv = _create_m_objective(w_new, data_subset, R)
# method could be 'L-BFGS-B' or 'SLSQP'... SLSQP gives a memory error...
# or use TNC...
m_res = minimize(m_objective, m_init, method='L-BFGS-B', jac=m_deriv, bounds=m_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
m_diff = np.sqrt(np.sum((m_res.x-m_init)**2))/m_init.size
m_new = m_res.x.reshape((genes, clusters))
m_init = m_res.x
ll = m_res.fun
means = m_new
if w_diff < tol and m_diff < tol:
break
if normalize:
w_new = w_new/w_new.sum(0)
return m_new, w_new, R, ll | python | def nb_estimate_state(data, clusters, R=None, init_means=None, init_weights=None, max_iters=10, tol=1e-4, disp=True, inner_max_iters=400, normalize=True):
"""
Uses a Negative Binomial Mixture model to estimate cell states and
cell state mixing weights.
If some of the genes do not fit a negative binomial distribution
(mean > var), then the genes are discarded from the analysis.
Args:
data (array): genes x cells
clusters (int): number of mixture components
R (array, optional): vector of length genes containing the dispersion estimates for each gene. Default: use nb_fit
init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations
init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1)
max_iters (int, optional): maximum number of iterations. Default: 10
tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4
disp (bool, optional): whether or not to display optimization parameters. Default: True
inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400
normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True.
Returns:
M (array): genes x clusters - state centers
W (array): clusters x cells - state mixing components for each cell
R (array): 1 x genes - NB dispersion parameter for each gene
ll (float): Log-likelihood of final iteration
"""
# TODO: deal with non-NB data... just ignore it? or do something else?
data_subset = data.copy()
genes, cells = data_subset.shape
# 1. use nb_fit to get inital Rs
if R is None:
nb_indices = find_nb_genes(data)
data_subset = data[nb_indices, :]
if init_means is not None and len(init_means) > sum(nb_indices):
init_means = init_means[nb_indices, :]
genes, cells = data_subset.shape
R = np.zeros(genes)
P, R = nb_fit(data_subset)
if init_means is None:
means, assignments = kmeans_pp(data_subset, clusters)
else:
means = init_means.copy()
clusters = means.shape[1]
w_init = np.random.random(cells*clusters)
if init_weights is not None:
if len(init_weights.shape)==1:
init_weights = initialize_from_assignments(init_weights, clusters)
w_init = init_weights.reshape(cells*clusters)
m_init = means.reshape(genes*clusters)
ll = np.inf
# repeat steps 1 and 2 until convergence:
for i in range(max_iters):
if disp:
print('iter: {0}'.format(i))
w_bounds = [(0, 1.0) for x in w_init]
m_bounds = [(0, None) for x in m_init]
# step 1: given M, estimate W
w_objective, w_deriv = _create_w_objective(means, data_subset, R)
w_res = minimize(w_objective, w_init, method='L-BFGS-B', jac=w_deriv, bounds=w_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
w_diff = np.sqrt(np.sum((w_res.x-w_init)**2))/w_init.size
w_new = w_res.x.reshape((clusters, cells))
w_init = w_res.x
# step 2: given W, update M
m_objective, m_deriv = _create_m_objective(w_new, data_subset, R)
# method could be 'L-BFGS-B' or 'SLSQP'... SLSQP gives a memory error...
# or use TNC...
m_res = minimize(m_objective, m_init, method='L-BFGS-B', jac=m_deriv, bounds=m_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
m_diff = np.sqrt(np.sum((m_res.x-m_init)**2))/m_init.size
m_new = m_res.x.reshape((genes, clusters))
m_init = m_res.x
ll = m_res.fun
means = m_new
if w_diff < tol and m_diff < tol:
break
if normalize:
w_new = w_new/w_new.sum(0)
return m_new, w_new, R, ll | [
"def",
"nb_estimate_state",
"(",
"data",
",",
"clusters",
",",
"R",
"=",
"None",
",",
"init_means",
"=",
"None",
",",
"init_weights",
"=",
"None",
",",
"max_iters",
"=",
"10",
",",
"tol",
"=",
"1e-4",
",",
"disp",
"=",
"True",
",",
"inner_max_iters",
"... | Uses a Negative Binomial Mixture model to estimate cell states and
cell state mixing weights.
If some of the genes do not fit a negative binomial distribution
(mean > var), then the genes are discarded from the analysis.
Args:
data (array): genes x cells
clusters (int): number of mixture components
R (array, optional): vector of length genes containing the dispersion estimates for each gene. Default: use nb_fit
init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations
init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1)
max_iters (int, optional): maximum number of iterations. Default: 10
tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4
disp (bool, optional): whether or not to display optimization parameters. Default: True
inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400
normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True.
Returns:
M (array): genes x clusters - state centers
W (array): clusters x cells - state mixing components for each cell
R (array): 1 x genes - NB dispersion parameter for each gene
ll (float): Log-likelihood of final iteration | [
"Uses",
"a",
"Negative",
"Binomial",
"Mixture",
"model",
"to",
"estimate",
"cell",
"states",
"and",
"cell",
"state",
"mixing",
"weights",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_state_estimation.py#L71-L147 |
yjzhang/uncurl_python | uncurl/lightlda_utils.py | poisson_objective | def poisson_objective(X, m, w):
"""
Creates an objective function and its derivative for M, given W and X
Args:
w (array): clusters x cells
X (array): genes x cells
selected_genes (array): array of ints - genes to be selected
"""
clusters, cells = w.shape
genes = X.shape[0]
#m = m.reshape((X.shape[0], w.shape[0]))
d = m.dot(w)+eps
#temp = X/d
#w_sum = w.sum(1)
#w2 = w.dot(temp.T)
#deriv = w_sum - w2.T
return np.sum(d - X*np.log(d))/genes | python | def poisson_objective(X, m, w):
"""
Creates an objective function and its derivative for M, given W and X
Args:
w (array): clusters x cells
X (array): genes x cells
selected_genes (array): array of ints - genes to be selected
"""
clusters, cells = w.shape
genes = X.shape[0]
#m = m.reshape((X.shape[0], w.shape[0]))
d = m.dot(w)+eps
#temp = X/d
#w_sum = w.sum(1)
#w2 = w.dot(temp.T)
#deriv = w_sum - w2.T
return np.sum(d - X*np.log(d))/genes | [
"def",
"poisson_objective",
"(",
"X",
",",
"m",
",",
"w",
")",
":",
"clusters",
",",
"cells",
"=",
"w",
".",
"shape",
"genes",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"#m = m.reshape((X.shape[0], w.shape[0]))",
"d",
"=",
"m",
".",
"dot",
"(",
"w",
")"... | Creates an objective function and its derivative for M, given W and X
Args:
w (array): clusters x cells
X (array): genes x cells
selected_genes (array): array of ints - genes to be selected | [
"Creates",
"an",
"objective",
"function",
"and",
"its",
"derivative",
"for",
"M",
"given",
"W",
"and",
"X",
"Args",
":",
"w",
"(",
"array",
")",
":",
"clusters",
"x",
"cells",
"X",
"(",
"array",
")",
":",
"genes",
"x",
"cells",
"selected_genes",
"(",
... | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/lightlda_utils.py#L114-L130 |
yjzhang/uncurl_python | uncurl/lightlda_utils.py | lightlda_estimate_state | def lightlda_estimate_state(data, k, input_folder="data1/LightLDA_input", threads=8, max_iters=250, prepare_data=True, init_means=None, init_weights=None, lightlda_folder=None, data_capacity=1000):
"""
Runs LDA on the given dataset (can be an 2-D array of any form - sparse
or dense, as long as it can be indexed). If the data has not already been
prepared into LDA format, set "prepare_data" to TRUE. If "prepare_data" is
FALSE, the method assumes that the data has already been preprocessed into
LightLDA format and is located at the given "input_folder".
"""
if lightlda_folder is None:
lightlda_folder = LIGHTLDA_FOLDER
if prepare_data:
prepare_lightlda_data(data, input_folder, lightlda_folder)
# Check if initializations for M/W were provided.
if ((init_means is not None) and (init_weights is None)) or ((init_means is None) and (init_weights is not None)):
raise ValueError("LightLDA requires that either both M and W be initialized, or neither. You initialized one but not the other.")
warm_start = False
# If we have initial M/W matrices, write to the model and doc-topic files
if (init_means is not None) and (init_weights is not None):
warm_start = True
init_means = init_means/init_means.sum(0)
init_weights = init_weights/init_weights.sum(0)
create_model_file("server_0_table_0.model", init_means)
create_model_file("doc_topic.0", init_weights.T)
print(init_means)
print("init_means")
# Run LightLDA
print("TRAINING")
# TODO: argument for data capacity
train_args = (os.path.join(lightlda_folder, "bin/lightlda"), "-num_vocabs", str(data.shape[0]), "-num_topics",
str(k), "-num_iterations", str(max_iters), "-alpha", "0.05", "-beta", "0.01", "-mh_steps", "2",
"-num_local_workers", str(threads), "-num_blocks", "1", "-max_num_document", str(data.shape[1]),
"-input_dir", input_folder, "-data_capacity", str(data_capacity))
if warm_start:
print("warm start")
train_args = train_args + ("-warm_start",)
# Call LightLDA
subprocess.call(train_args)
# Parse final model and doc-topic files to obtain M/W
print("data shape")
print(data.shape)
M = parse_model_file("server_0_table_0.model", k, data.shape[0])
W = parse_result_file("doc_topic.0", k)
# Not sure if normalization is correct
M = M * (np.mean(data) / np.mean(M))
W = W/W.sum(0)
print("shapes")
print(M.shape)
print(W.shape)
# TODO: poisson_objective doesn't work for sparse matrices
if sparse.issparse(data):
ll = 0
else:
ll = poisson_objective(data, M, W)
#M = M * (5./np.mean(M))
return M, W, ll | python | def lightlda_estimate_state(data, k, input_folder="data1/LightLDA_input", threads=8, max_iters=250, prepare_data=True, init_means=None, init_weights=None, lightlda_folder=None, data_capacity=1000):
"""
Runs LDA on the given dataset (can be an 2-D array of any form - sparse
or dense, as long as it can be indexed). If the data has not already been
prepared into LDA format, set "prepare_data" to TRUE. If "prepare_data" is
FALSE, the method assumes that the data has already been preprocessed into
LightLDA format and is located at the given "input_folder".
"""
if lightlda_folder is None:
lightlda_folder = LIGHTLDA_FOLDER
if prepare_data:
prepare_lightlda_data(data, input_folder, lightlda_folder)
# Check if initializations for M/W were provided.
if ((init_means is not None) and (init_weights is None)) or ((init_means is None) and (init_weights is not None)):
raise ValueError("LightLDA requires that either both M and W be initialized, or neither. You initialized one but not the other.")
warm_start = False
# If we have initial M/W matrices, write to the model and doc-topic files
if (init_means is not None) and (init_weights is not None):
warm_start = True
init_means = init_means/init_means.sum(0)
init_weights = init_weights/init_weights.sum(0)
create_model_file("server_0_table_0.model", init_means)
create_model_file("doc_topic.0", init_weights.T)
print(init_means)
print("init_means")
# Run LightLDA
print("TRAINING")
# TODO: argument for data capacity
train_args = (os.path.join(lightlda_folder, "bin/lightlda"), "-num_vocabs", str(data.shape[0]), "-num_topics",
str(k), "-num_iterations", str(max_iters), "-alpha", "0.05", "-beta", "0.01", "-mh_steps", "2",
"-num_local_workers", str(threads), "-num_blocks", "1", "-max_num_document", str(data.shape[1]),
"-input_dir", input_folder, "-data_capacity", str(data_capacity))
if warm_start:
print("warm start")
train_args = train_args + ("-warm_start",)
# Call LightLDA
subprocess.call(train_args)
# Parse final model and doc-topic files to obtain M/W
print("data shape")
print(data.shape)
M = parse_model_file("server_0_table_0.model", k, data.shape[0])
W = parse_result_file("doc_topic.0", k)
# Not sure if normalization is correct
M = M * (np.mean(data) / np.mean(M))
W = W/W.sum(0)
print("shapes")
print(M.shape)
print(W.shape)
# TODO: poisson_objective doesn't work for sparse matrices
if sparse.issparse(data):
ll = 0
else:
ll = poisson_objective(data, M, W)
#M = M * (5./np.mean(M))
return M, W, ll | [
"def",
"lightlda_estimate_state",
"(",
"data",
",",
"k",
",",
"input_folder",
"=",
"\"data1/LightLDA_input\"",
",",
"threads",
"=",
"8",
",",
"max_iters",
"=",
"250",
",",
"prepare_data",
"=",
"True",
",",
"init_means",
"=",
"None",
",",
"init_weights",
"=",
... | Runs LDA on the given dataset (can be an 2-D array of any form - sparse
or dense, as long as it can be indexed). If the data has not already been
prepared into LDA format, set "prepare_data" to TRUE. If "prepare_data" is
FALSE, the method assumes that the data has already been preprocessed into
LightLDA format and is located at the given "input_folder". | [
"Runs",
"LDA",
"on",
"the",
"given",
"dataset",
"(",
"can",
"be",
"an",
"2",
"-",
"D",
"array",
"of",
"any",
"form",
"-",
"sparse",
"or",
"dense",
"as",
"long",
"as",
"it",
"can",
"be",
"indexed",
")",
".",
"If",
"the",
"data",
"has",
"not",
"alr... | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/lightlda_utils.py#L133-L195 |
moonso/loqusdb | scripts/load_files.py | cli | def cli(ctx, directory, uri, verbose, count):
"""Load all files in a directory."""
# configure root logger to print to STDERR
loglevel = "INFO"
if verbose:
loglevel = "DEBUG"
coloredlogs.install(level=loglevel)
p = Path(directory)
if not p.is_dir():
LOG.warning("{0} is not a valid directory".format(directory))
ctx.abort()
start_time = datetime.now()
# Make sure that the database is indexed
index_call = ['loqusdb', 'index']
base_call = ['loqusdb']
if uri:
base_call.append('--uri')
base_call.append(uri)
index_call.append('--uri')
index_call.append(uri)
subprocess.run(index_call)
base_call.append('load')
nr_files = 0
for nr_files,file_name in enumerate(list(p.glob('*.vcf')),1):
call = deepcopy(base_call)
case_id = file_name.stem.split('.')[0]
call.append('--sv-variants')
call.append(str(file_name))
call.append('--case-id')
call.append(case_id)
if count:
continue
try:
subprocess.run(call, check=True)
except subprocess.CalledProcessError as err:
LOG.warning(err)
LOG.warning("Failed to load file %s", filename)
LOG.info("Continue with files...")
if nr_files % 100:
LOG.info("%s files loaded", nr_files)
LOG.info("%s files inserted", nr_files)
LOG.info("Time to insert files: {}".format(datetime.now()-start_time)) | python | def cli(ctx, directory, uri, verbose, count):
"""Load all files in a directory."""
# configure root logger to print to STDERR
loglevel = "INFO"
if verbose:
loglevel = "DEBUG"
coloredlogs.install(level=loglevel)
p = Path(directory)
if not p.is_dir():
LOG.warning("{0} is not a valid directory".format(directory))
ctx.abort()
start_time = datetime.now()
# Make sure that the database is indexed
index_call = ['loqusdb', 'index']
base_call = ['loqusdb']
if uri:
base_call.append('--uri')
base_call.append(uri)
index_call.append('--uri')
index_call.append(uri)
subprocess.run(index_call)
base_call.append('load')
nr_files = 0
for nr_files,file_name in enumerate(list(p.glob('*.vcf')),1):
call = deepcopy(base_call)
case_id = file_name.stem.split('.')[0]
call.append('--sv-variants')
call.append(str(file_name))
call.append('--case-id')
call.append(case_id)
if count:
continue
try:
subprocess.run(call, check=True)
except subprocess.CalledProcessError as err:
LOG.warning(err)
LOG.warning("Failed to load file %s", filename)
LOG.info("Continue with files...")
if nr_files % 100:
LOG.info("%s files loaded", nr_files)
LOG.info("%s files inserted", nr_files)
LOG.info("Time to insert files: {}".format(datetime.now()-start_time)) | [
"def",
"cli",
"(",
"ctx",
",",
"directory",
",",
"uri",
",",
"verbose",
",",
"count",
")",
":",
"# configure root logger to print to STDERR",
"loglevel",
"=",
"\"INFO\"",
"if",
"verbose",
":",
"loglevel",
"=",
"\"DEBUG\"",
"coloredlogs",
".",
"install",
"(",
"... | Load all files in a directory. | [
"Load",
"all",
"files",
"in",
"a",
"directory",
"."
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/scripts/load_files.py#L24-L72 |
yjzhang/uncurl_python | uncurl/nmf_wrapper.py | nmf_init | def nmf_init(data, clusters, k, init='enhanced'):
"""
Generates initial M and W given a data set and an array of cluster labels.
There are 3 options for init:
enhanced - uses EIn-NMF from Gong 2013
basic - uses means for M, assigns W such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1).
nmf - uses means for M, and assigns W using the NMF objective while holding M constant.
"""
init_m = np.zeros((data.shape[0], k))
if sparse.issparse(data):
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].toarray().flatten()
else:
init_m[:,i] = np.array(data[:,clusters==i].mean(1)).flatten()
else:
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].flatten()
else:
init_m[:,i] = data[:,clusters==i].mean(1)
init_w = np.zeros((k, data.shape[1]))
if init == 'enhanced':
distances = np.zeros((k, data.shape[1]))
for i in range(k):
for j in range(data.shape[1]):
distances[i,j] = np.sqrt(((data[:,j] - init_m[:,i])**2).sum())
for i in range(k):
for j in range(data.shape[1]):
init_w[i,j] = 1/((distances[:,j]/distances[i,j])**(-2)).sum()
elif init == 'basic':
init_w = initialize_from_assignments(clusters, k)
elif init == 'nmf':
init_w_, _, n_iter = non_negative_factorization(data.T, n_components=k, init='custom', update_W=False, W=init_m.T)
init_w = init_w_.T
return init_m, init_w | python | def nmf_init(data, clusters, k, init='enhanced'):
"""
Generates initial M and W given a data set and an array of cluster labels.
There are 3 options for init:
enhanced - uses EIn-NMF from Gong 2013
basic - uses means for M, assigns W such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1).
nmf - uses means for M, and assigns W using the NMF objective while holding M constant.
"""
init_m = np.zeros((data.shape[0], k))
if sparse.issparse(data):
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].toarray().flatten()
else:
init_m[:,i] = np.array(data[:,clusters==i].mean(1)).flatten()
else:
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].flatten()
else:
init_m[:,i] = data[:,clusters==i].mean(1)
init_w = np.zeros((k, data.shape[1]))
if init == 'enhanced':
distances = np.zeros((k, data.shape[1]))
for i in range(k):
for j in range(data.shape[1]):
distances[i,j] = np.sqrt(((data[:,j] - init_m[:,i])**2).sum())
for i in range(k):
for j in range(data.shape[1]):
init_w[i,j] = 1/((distances[:,j]/distances[i,j])**(-2)).sum()
elif init == 'basic':
init_w = initialize_from_assignments(clusters, k)
elif init == 'nmf':
init_w_, _, n_iter = non_negative_factorization(data.T, n_components=k, init='custom', update_W=False, W=init_m.T)
init_w = init_w_.T
return init_m, init_w | [
"def",
"nmf_init",
"(",
"data",
",",
"clusters",
",",
"k",
",",
"init",
"=",
"'enhanced'",
")",
":",
"init_m",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"k",
")",
")",
"if",
"sparse",
".",
"issparse",
"(",
"da... | Generates initial M and W given a data set and an array of cluster labels.
There are 3 options for init:
enhanced - uses EIn-NMF from Gong 2013
basic - uses means for M, assigns W such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1).
nmf - uses means for M, and assigns W using the NMF objective while holding M constant. | [
"Generates",
"initial",
"M",
"and",
"W",
"given",
"a",
"data",
"set",
"and",
"an",
"array",
"of",
"cluster",
"labels",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nmf_wrapper.py#L10-L48 |
yjzhang/uncurl_python | uncurl/nmf_wrapper.py | log_norm_nmf | def log_norm_nmf(data, k, normalize_w=True, return_cost=True, init_weights=None, init_means=None, write_progress_file=None, **kwargs):
"""
Args:
data (array): dense or sparse array with shape (genes, cells)
k (int): number of cell types
normalize_w (bool, optional): True if W should be normalized (so that each column sums to 1). Default: True
return_cost (bool, optional): True if the NMF objective value (squared error) should be returned. Default: True
init_weights (array, optional): Initial value for W. Default: None
init_means (array, optional): Initial value for M. Default: None
**kwargs: misc arguments to NMF
Returns:
Two matrices M of shape (genes, k) and W of shape (k, cells). They correspond to M and M in Poisson state estimation. If return_cost is True (which it is by default), then the cost will also be returned. This might be prohibitably costly
"""
init = None
data = log1p(cell_normalize(data))
if init_weights is not None or init_means is not None:
init = 'custom'
if init_weights is None:
init_weights_, _, n_iter = non_negative_factorization(data.T, n_components=k, init='custom', update_W=False, W=init_means.T)
init_weights = init_weights_.T
elif init_means is None:
init_means, _, n_iter = non_negative_factorization(data, n_components=k, init='custom', update_W=False, W=init_weights)
init_means = init_means.copy(order='C')
init_weights = init_weights.copy(order='C')
nmf = NMF(k, init=init, **kwargs)
if write_progress_file is not None:
progress = open(write_progress_file, 'w')
progress.write(str(0))
progress.close()
M = nmf.fit_transform(data, W=init_means, H=init_weights)
W = nmf.components_
if normalize_w:
W = W/W.sum(0)
if return_cost:
cost = 0
if sparse.issparse(data):
ws = sparse.csr_matrix(M)
hs = sparse.csr_matrix(W)
cost = 0.5*((data - ws.dot(hs)).power(2)).sum()
else:
cost = 0.5*((data - M.dot(W))**2).sum()
return M, W, cost
else:
return M, W | python | def log_norm_nmf(data, k, normalize_w=True, return_cost=True, init_weights=None, init_means=None, write_progress_file=None, **kwargs):
"""
Args:
data (array): dense or sparse array with shape (genes, cells)
k (int): number of cell types
normalize_w (bool, optional): True if W should be normalized (so that each column sums to 1). Default: True
return_cost (bool, optional): True if the NMF objective value (squared error) should be returned. Default: True
init_weights (array, optional): Initial value for W. Default: None
init_means (array, optional): Initial value for M. Default: None
**kwargs: misc arguments to NMF
Returns:
Two matrices M of shape (genes, k) and W of shape (k, cells). They correspond to M and M in Poisson state estimation. If return_cost is True (which it is by default), then the cost will also be returned. This might be prohibitably costly
"""
init = None
data = log1p(cell_normalize(data))
if init_weights is not None or init_means is not None:
init = 'custom'
if init_weights is None:
init_weights_, _, n_iter = non_negative_factorization(data.T, n_components=k, init='custom', update_W=False, W=init_means.T)
init_weights = init_weights_.T
elif init_means is None:
init_means, _, n_iter = non_negative_factorization(data, n_components=k, init='custom', update_W=False, W=init_weights)
init_means = init_means.copy(order='C')
init_weights = init_weights.copy(order='C')
nmf = NMF(k, init=init, **kwargs)
if write_progress_file is not None:
progress = open(write_progress_file, 'w')
progress.write(str(0))
progress.close()
M = nmf.fit_transform(data, W=init_means, H=init_weights)
W = nmf.components_
if normalize_w:
W = W/W.sum(0)
if return_cost:
cost = 0
if sparse.issparse(data):
ws = sparse.csr_matrix(M)
hs = sparse.csr_matrix(W)
cost = 0.5*((data - ws.dot(hs)).power(2)).sum()
else:
cost = 0.5*((data - M.dot(W))**2).sum()
return M, W, cost
else:
return M, W | [
"def",
"log_norm_nmf",
"(",
"data",
",",
"k",
",",
"normalize_w",
"=",
"True",
",",
"return_cost",
"=",
"True",
",",
"init_weights",
"=",
"None",
",",
"init_means",
"=",
"None",
",",
"write_progress_file",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
... | Args:
data (array): dense or sparse array with shape (genes, cells)
k (int): number of cell types
normalize_w (bool, optional): True if W should be normalized (so that each column sums to 1). Default: True
return_cost (bool, optional): True if the NMF objective value (squared error) should be returned. Default: True
init_weights (array, optional): Initial value for W. Default: None
init_means (array, optional): Initial value for M. Default: None
**kwargs: misc arguments to NMF
Returns:
Two matrices M of shape (genes, k) and W of shape (k, cells). They correspond to M and M in Poisson state estimation. If return_cost is True (which it is by default), then the cost will also be returned. This might be prohibitably costly | [
"Args",
":",
"data",
"(",
"array",
")",
":",
"dense",
"or",
"sparse",
"array",
"with",
"shape",
"(",
"genes",
"cells",
")",
"k",
"(",
"int",
")",
":",
"number",
"of",
"cell",
"types",
"normalize_w",
"(",
"bool",
"optional",
")",
":",
"True",
"if",
... | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nmf_wrapper.py#L51-L95 |
moonso/loqusdb | loqusdb/build_models/variant.py | check_par | def check_par(chrom, pos):
"""Check if a coordinate is in the PAR region
Args:
chrom(str)
pos(int)
Returns:
par(bool)
"""
par = False
for interval in PAR.get(chrom,[]):
if (pos >= interval[0] and pos <= interval[1]):
par = True
return par | python | def check_par(chrom, pos):
"""Check if a coordinate is in the PAR region
Args:
chrom(str)
pos(int)
Returns:
par(bool)
"""
par = False
for interval in PAR.get(chrom,[]):
if (pos >= interval[0] and pos <= interval[1]):
par = True
return par | [
"def",
"check_par",
"(",
"chrom",
",",
"pos",
")",
":",
"par",
"=",
"False",
"for",
"interval",
"in",
"PAR",
".",
"get",
"(",
"chrom",
",",
"[",
"]",
")",
":",
"if",
"(",
"pos",
">=",
"interval",
"[",
"0",
"]",
"and",
"pos",
"<=",
"interval",
"... | Check if a coordinate is in the PAR region
Args:
chrom(str)
pos(int)
Returns:
par(bool) | [
"Check",
"if",
"a",
"coordinate",
"is",
"in",
"the",
"PAR",
"region",
"Args",
":",
"chrom",
"(",
"str",
")",
"pos",
"(",
"int",
")",
"Returns",
":",
"par",
"(",
"bool",
")"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/variant.py#L16-L32 |
moonso/loqusdb | loqusdb/build_models/variant.py | get_variant_id | def get_variant_id(variant):
"""Get a variant id on the format chrom_pos_ref_alt"""
variant_id = '_'.join([
str(variant.CHROM),
str(variant.POS),
str(variant.REF),
str(variant.ALT[0])
]
)
return variant_id | python | def get_variant_id(variant):
"""Get a variant id on the format chrom_pos_ref_alt"""
variant_id = '_'.join([
str(variant.CHROM),
str(variant.POS),
str(variant.REF),
str(variant.ALT[0])
]
)
return variant_id | [
"def",
"get_variant_id",
"(",
"variant",
")",
":",
"variant_id",
"=",
"'_'",
".",
"join",
"(",
"[",
"str",
"(",
"variant",
".",
"CHROM",
")",
",",
"str",
"(",
"variant",
".",
"POS",
")",
",",
"str",
"(",
"variant",
".",
"REF",
")",
",",
"str",
"(... | Get a variant id on the format chrom_pos_ref_alt | [
"Get",
"a",
"variant",
"id",
"on",
"the",
"format",
"chrom_pos_ref_alt"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/variant.py#L35-L44 |
moonso/loqusdb | loqusdb/build_models/variant.py | is_greater | def is_greater(a,b):
"""Check if position a is greater than position b
This will look at chromosome and position.
For example a position where chrom = 2 and pos = 300 is greater than a position where
chrom = 1 and pos = 1000
If any of the chromosomes is outside [1-22,X,Y,MT] we can not say which is biggest.
Args:
a,b(Position)
Returns:
bool: True if a is greater than b
"""
a_chrom = CHROM_TO_INT.get(a.chrom,0)
b_chrom = CHROM_TO_INT.get(b.chrom,0)
if (a_chrom == 0 or b_chrom == 0):
return False
if a_chrom > b_chrom:
return True
if a_chrom == b_chrom:
if a.pos > b.pos:
return True
return False | python | def is_greater(a,b):
"""Check if position a is greater than position b
This will look at chromosome and position.
For example a position where chrom = 2 and pos = 300 is greater than a position where
chrom = 1 and pos = 1000
If any of the chromosomes is outside [1-22,X,Y,MT] we can not say which is biggest.
Args:
a,b(Position)
Returns:
bool: True if a is greater than b
"""
a_chrom = CHROM_TO_INT.get(a.chrom,0)
b_chrom = CHROM_TO_INT.get(b.chrom,0)
if (a_chrom == 0 or b_chrom == 0):
return False
if a_chrom > b_chrom:
return True
if a_chrom == b_chrom:
if a.pos > b.pos:
return True
return False | [
"def",
"is_greater",
"(",
"a",
",",
"b",
")",
":",
"a_chrom",
"=",
"CHROM_TO_INT",
".",
"get",
"(",
"a",
".",
"chrom",
",",
"0",
")",
"b_chrom",
"=",
"CHROM_TO_INT",
".",
"get",
"(",
"b",
".",
"chrom",
",",
"0",
")",
"if",
"(",
"a_chrom",
"==",
... | Check if position a is greater than position b
This will look at chromosome and position.
For example a position where chrom = 2 and pos = 300 is greater than a position where
chrom = 1 and pos = 1000
If any of the chromosomes is outside [1-22,X,Y,MT] we can not say which is biggest.
Args:
a,b(Position)
Returns:
bool: True if a is greater than b | [
"Check",
"if",
"position",
"a",
"is",
"greater",
"than",
"position",
"b",
"This",
"will",
"look",
"at",
"chromosome",
"and",
"position",
".",
"For",
"example",
"a",
"position",
"where",
"chrom",
"=",
"2",
"and",
"pos",
"=",
"300",
"is",
"greater",
"than"... | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/variant.py#L46-L75 |
moonso/loqusdb | loqusdb/build_models/variant.py | get_coords | def get_coords(variant):
"""Returns a dictionary with position information
Args:
variant(cyvcf2.Variant)
Returns:
coordinates(dict)
"""
coordinates = {
'chrom': None,
'end_chrom': None,
'sv_length': None,
'sv_type': None,
'pos': None,
'end': None,
}
chrom = variant.CHROM
if chrom.startswith(('chr', 'CHR', 'Chr')):
chrom = chrom[3:]
coordinates['chrom'] = chrom
end_chrom = chrom
pos = int(variant.POS)
alt = variant.ALT[0]
# Get the end position
# This will be None for non-svs
end_pos = variant.INFO.get('END')
if end_pos:
end = int(end_pos)
else:
end = int(variant.end)
coordinates['end'] = end
sv_type = variant.INFO.get('SVTYPE')
length = variant.INFO.get('SVLEN')
if length:
sv_len = abs(length)
else:
sv_len = end - pos
# Translocations will sometimes have a end chrom that differs from chrom
if sv_type == 'BND':
other_coordinates = alt.strip('ACGTN[]').split(':')
end_chrom = other_coordinates[0]
if end_chrom.startswith(('chr', 'CHR', 'Chr')):
end_chrom = end_chrom[3:]
end = int(other_coordinates[1])
#Set 'infinity' to length if translocation
sv_len = float('inf')
# Insertions often have length 0 in VCF
if (sv_len == 0 and alt != '<INS>'):
sv_len = len(alt)
if (pos == end) and (sv_len > 0):
end = pos + sv_len
position = Position(chrom, pos)
end_position = Position(end_chrom, end)
# If 'start' is greater than 'end', switch positions
if is_greater(position, end_position):
end_chrom = position.chrom
end = position.pos
chrom = end_position.chrom
pos = end_position.pos
coordinates['end_chrom'] = end_chrom
coordinates['pos'] = pos
coordinates['end'] = end
coordinates['sv_length'] = sv_len
coordinates['sv_type'] = sv_type
return coordinates | python | def get_coords(variant):
"""Returns a dictionary with position information
Args:
variant(cyvcf2.Variant)
Returns:
coordinates(dict)
"""
coordinates = {
'chrom': None,
'end_chrom': None,
'sv_length': None,
'sv_type': None,
'pos': None,
'end': None,
}
chrom = variant.CHROM
if chrom.startswith(('chr', 'CHR', 'Chr')):
chrom = chrom[3:]
coordinates['chrom'] = chrom
end_chrom = chrom
pos = int(variant.POS)
alt = variant.ALT[0]
# Get the end position
# This will be None for non-svs
end_pos = variant.INFO.get('END')
if end_pos:
end = int(end_pos)
else:
end = int(variant.end)
coordinates['end'] = end
sv_type = variant.INFO.get('SVTYPE')
length = variant.INFO.get('SVLEN')
if length:
sv_len = abs(length)
else:
sv_len = end - pos
# Translocations will sometimes have a end chrom that differs from chrom
if sv_type == 'BND':
other_coordinates = alt.strip('ACGTN[]').split(':')
end_chrom = other_coordinates[0]
if end_chrom.startswith(('chr', 'CHR', 'Chr')):
end_chrom = end_chrom[3:]
end = int(other_coordinates[1])
#Set 'infinity' to length if translocation
sv_len = float('inf')
# Insertions often have length 0 in VCF
if (sv_len == 0 and alt != '<INS>'):
sv_len = len(alt)
if (pos == end) and (sv_len > 0):
end = pos + sv_len
position = Position(chrom, pos)
end_position = Position(end_chrom, end)
# If 'start' is greater than 'end', switch positions
if is_greater(position, end_position):
end_chrom = position.chrom
end = position.pos
chrom = end_position.chrom
pos = end_position.pos
coordinates['end_chrom'] = end_chrom
coordinates['pos'] = pos
coordinates['end'] = end
coordinates['sv_length'] = sv_len
coordinates['sv_type'] = sv_type
return coordinates | [
"def",
"get_coords",
"(",
"variant",
")",
":",
"coordinates",
"=",
"{",
"'chrom'",
":",
"None",
",",
"'end_chrom'",
":",
"None",
",",
"'sv_length'",
":",
"None",
",",
"'sv_type'",
":",
"None",
",",
"'pos'",
":",
"None",
",",
"'end'",
":",
"None",
",",
... | Returns a dictionary with position information
Args:
variant(cyvcf2.Variant)
Returns:
coordinates(dict) | [
"Returns",
"a",
"dictionary",
"with",
"position",
"information",
"Args",
":",
"variant",
"(",
"cyvcf2",
".",
"Variant",
")",
"Returns",
":",
"coordinates",
"(",
"dict",
")"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/variant.py#L78-L156 |
moonso/loqusdb | loqusdb/build_models/variant.py | build_variant | def build_variant(variant, case_obj, case_id=None, gq_treshold=None):
"""Return a Variant object
Take a cyvcf2 formated variant line and return a models.Variant.
If criterias are not fullfilled, eg. variant have no gt call or quality
is below gq treshold then return None.
Args:
variant(cyvcf2.Variant)
case_obj(Case): We need the case object to check individuals sex
case_id(str): The case id
gq_treshold(int): Genotype Quality treshold
Return:
formated_variant(models.Variant): A variant dictionary
"""
variant_obj = None
sv = False
# Let cyvcf2 tell if it is a Structural Variant or not
if variant.var_type == 'sv':
sv = True
# chrom_pos_ref_alt
variant_id = get_variant_id(variant)
ref = variant.REF
# ALT is an array in cyvcf2
# We allways assume splitted and normalized VCFs
alt = variant.ALT[0]
coordinates = get_coords(variant)
chrom = coordinates['chrom']
pos = coordinates['pos']
# These are integers that will be used when uploading
found_homozygote = 0
found_hemizygote = 0
# Only look at genotypes for the present individuals
if sv:
found_variant = True
else:
found_variant = False
for ind_obj in case_obj['individuals']:
ind_id = ind_obj['ind_id']
# Get the index position for the individual in the VCF
ind_pos = ind_obj['ind_index']
gq = int(variant.gt_quals[ind_pos])
if (gq_treshold and gq < gq_treshold):
continue
genotype = GENOTYPE_MAP[variant.gt_types[ind_pos]]
if genotype in ['het', 'hom_alt']:
LOG.debug("Found variant")
found_variant = True
# If variant in X or Y and individual is male,
# we need to check hemizygosity
if chrom in ['X','Y'] and ind_obj['sex'] == 1:
if not check_par(chrom, pos):
LOG.debug("Found hemizygous variant")
found_hemizygote = 1
if genotype == 'hom_alt':
LOG.debug("Found homozygote alternative variant")
found_homozygote = 1
if found_variant:
variant_obj = Variant(
variant_id=variant_id,
chrom=chrom,
pos=pos,
end=coordinates['end'],
ref=ref,
alt=alt,
end_chrom=coordinates['end_chrom'],
sv_type = coordinates['sv_type'],
sv_len = coordinates['sv_length'],
case_id = case_id,
homozygote = found_homozygote,
hemizygote = found_hemizygote,
is_sv = sv,
id_column = variant.ID,
)
return variant_obj | python | def build_variant(variant, case_obj, case_id=None, gq_treshold=None):
"""Return a Variant object
Take a cyvcf2 formated variant line and return a models.Variant.
If criterias are not fullfilled, eg. variant have no gt call or quality
is below gq treshold then return None.
Args:
variant(cyvcf2.Variant)
case_obj(Case): We need the case object to check individuals sex
case_id(str): The case id
gq_treshold(int): Genotype Quality treshold
Return:
formated_variant(models.Variant): A variant dictionary
"""
variant_obj = None
sv = False
# Let cyvcf2 tell if it is a Structural Variant or not
if variant.var_type == 'sv':
sv = True
# chrom_pos_ref_alt
variant_id = get_variant_id(variant)
ref = variant.REF
# ALT is an array in cyvcf2
# We allways assume splitted and normalized VCFs
alt = variant.ALT[0]
coordinates = get_coords(variant)
chrom = coordinates['chrom']
pos = coordinates['pos']
# These are integers that will be used when uploading
found_homozygote = 0
found_hemizygote = 0
# Only look at genotypes for the present individuals
if sv:
found_variant = True
else:
found_variant = False
for ind_obj in case_obj['individuals']:
ind_id = ind_obj['ind_id']
# Get the index position for the individual in the VCF
ind_pos = ind_obj['ind_index']
gq = int(variant.gt_quals[ind_pos])
if (gq_treshold and gq < gq_treshold):
continue
genotype = GENOTYPE_MAP[variant.gt_types[ind_pos]]
if genotype in ['het', 'hom_alt']:
LOG.debug("Found variant")
found_variant = True
# If variant in X or Y and individual is male,
# we need to check hemizygosity
if chrom in ['X','Y'] and ind_obj['sex'] == 1:
if not check_par(chrom, pos):
LOG.debug("Found hemizygous variant")
found_hemizygote = 1
if genotype == 'hom_alt':
LOG.debug("Found homozygote alternative variant")
found_homozygote = 1
if found_variant:
variant_obj = Variant(
variant_id=variant_id,
chrom=chrom,
pos=pos,
end=coordinates['end'],
ref=ref,
alt=alt,
end_chrom=coordinates['end_chrom'],
sv_type = coordinates['sv_type'],
sv_len = coordinates['sv_length'],
case_id = case_id,
homozygote = found_homozygote,
hemizygote = found_hemizygote,
is_sv = sv,
id_column = variant.ID,
)
return variant_obj | [
"def",
"build_variant",
"(",
"variant",
",",
"case_obj",
",",
"case_id",
"=",
"None",
",",
"gq_treshold",
"=",
"None",
")",
":",
"variant_obj",
"=",
"None",
"sv",
"=",
"False",
"# Let cyvcf2 tell if it is a Structural Variant or not",
"if",
"variant",
".",
"var_ty... | Return a Variant object
Take a cyvcf2 formated variant line and return a models.Variant.
If criterias are not fullfilled, eg. variant have no gt call or quality
is below gq treshold then return None.
Args:
variant(cyvcf2.Variant)
case_obj(Case): We need the case object to check individuals sex
case_id(str): The case id
gq_treshold(int): Genotype Quality treshold
Return:
formated_variant(models.Variant): A variant dictionary | [
"Return",
"a",
"Variant",
"object"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/variant.py#L158-L247 |
moonso/loqusdb | loqusdb/commands/migrate.py | migrate | def migrate(ctx,):
"""Migrate an old loqusdb instance to 1.0
"""
adapter = ctx.obj['adapter']
start_time = datetime.now()
nr_updated = migrate_database(adapter)
LOG.info("All variants updated, time to complete migration: {}".format(
datetime.now() - start_time))
LOG.info("Nr variants that where updated: %s", nr_updated) | python | def migrate(ctx,):
"""Migrate an old loqusdb instance to 1.0
"""
adapter = ctx.obj['adapter']
start_time = datetime.now()
nr_updated = migrate_database(adapter)
LOG.info("All variants updated, time to complete migration: {}".format(
datetime.now() - start_time))
LOG.info("Nr variants that where updated: %s", nr_updated) | [
"def",
"migrate",
"(",
"ctx",
",",
")",
":",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"start_time",
"=",
"datetime",
".",
"now",
"(",
")",
"nr_updated",
"=",
"migrate_database",
"(",
"adapter",
")",
"LOG",
".",
"info",
"(",
"\"All vari... | Migrate an old loqusdb instance to 1.0 | [
"Migrate",
"an",
"old",
"loqusdb",
"instance",
"to",
"1",
".",
"0"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/migrate.py#L14-L25 |
moonso/loqusdb | loqusdb/commands/update.py | update | def update(ctx, variant_file, sv_variants, family_file, family_type, skip_case_id, gq_treshold,
case_id, ensure_index, max_window):
"""Load the variants of a case
A variant is loaded if it is observed in any individual of a case
If no family file is provided all individuals in vcf file will be considered.
"""
if not (family_file or case_id):
LOG.warning("Please provide a family file or a case id")
ctx.abort()
if not (variant_file or sv_variants):
LOG.warning("Please provide a VCF file")
ctx.abort()
variant_path = None
if variant_file:
variant_path = os.path.abspath(variant_file)
variant_sv_path = None
if sv_variants:
variant_sv_path = os.path.abspath(sv_variants)
adapter = ctx.obj['adapter']
start_inserting = datetime.now()
try:
nr_inserted = update_database(
adapter=adapter,
variant_file=variant_path,
sv_file=variant_sv_path,
family_file=family_file,
family_type=family_type,
skip_case_id=skip_case_id,
case_id=case_id,
gq_treshold=gq_treshold,
max_window=max_window,
)
except (SyntaxError, CaseError, IOError, VcfError) as error:
LOG.warning(error)
ctx.abort()
LOG.info("Nr variants inserted: %s", nr_inserted)
LOG.info("Time to insert variants: {0}".format(
datetime.now() - start_inserting))
if ensure_index:
adapter.ensure_indexes()
else:
adapter.check_indexes() | python | def update(ctx, variant_file, sv_variants, family_file, family_type, skip_case_id, gq_treshold,
case_id, ensure_index, max_window):
"""Load the variants of a case
A variant is loaded if it is observed in any individual of a case
If no family file is provided all individuals in vcf file will be considered.
"""
if not (family_file or case_id):
LOG.warning("Please provide a family file or a case id")
ctx.abort()
if not (variant_file or sv_variants):
LOG.warning("Please provide a VCF file")
ctx.abort()
variant_path = None
if variant_file:
variant_path = os.path.abspath(variant_file)
variant_sv_path = None
if sv_variants:
variant_sv_path = os.path.abspath(sv_variants)
adapter = ctx.obj['adapter']
start_inserting = datetime.now()
try:
nr_inserted = update_database(
adapter=adapter,
variant_file=variant_path,
sv_file=variant_sv_path,
family_file=family_file,
family_type=family_type,
skip_case_id=skip_case_id,
case_id=case_id,
gq_treshold=gq_treshold,
max_window=max_window,
)
except (SyntaxError, CaseError, IOError, VcfError) as error:
LOG.warning(error)
ctx.abort()
LOG.info("Nr variants inserted: %s", nr_inserted)
LOG.info("Time to insert variants: {0}".format(
datetime.now() - start_inserting))
if ensure_index:
adapter.ensure_indexes()
else:
adapter.check_indexes() | [
"def",
"update",
"(",
"ctx",
",",
"variant_file",
",",
"sv_variants",
",",
"family_file",
",",
"family_type",
",",
"skip_case_id",
",",
"gq_treshold",
",",
"case_id",
",",
"ensure_index",
",",
"max_window",
")",
":",
"if",
"not",
"(",
"family_file",
"or",
"c... | Load the variants of a case
A variant is loaded if it is observed in any individual of a case
If no family file is provided all individuals in vcf file will be considered. | [
"Load",
"the",
"variants",
"of",
"a",
"case"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/update.py#L62-L114 |
moonso/loqusdb | loqusdb/commands/export.py | export | def export(ctx, outfile, variant_type):
"""Export the variants of a loqus db
The variants are exported to a vcf file
"""
adapter = ctx.obj['adapter']
version = ctx.obj['version']
LOG.info("Export the variants from {0}".format(adapter))
nr_cases = 0
is_sv = variant_type == 'sv'
existing_chromosomes = set(adapter.get_chromosomes(sv=is_sv))
ordered_chromosomes = []
for chrom in CHROMOSOME_ORDER:
if chrom in existing_chromosomes:
ordered_chromosomes.append(chrom)
existing_chromosomes.remove(chrom)
for chrom in existing_chromosomes:
ordered_chromosomes.append(chrom)
nr_cases = adapter.cases().count()
LOG.info("Found {0} cases in database".format(nr_cases))
head = HeaderParser()
head.add_fileformat("VCFv4.3")
head.add_meta_line("NrCases", nr_cases)
head.add_info("Obs", '1', 'Integer', "The number of observations for the variant")
head.add_info("Hom", '1', 'Integer', "The number of observed homozygotes")
head.add_info("Hem", '1', 'Integer', "The number of observed hemizygotes")
head.add_version_tracking("loqusdb", version, datetime.now().strftime("%Y-%m-%d %H:%M"))
if variant_type == 'sv':
head.add_info("END", '1', 'Integer', "End position of the variant")
head.add_info("SVTYPE", '1', 'String', "Type of structural variant")
head.add_info("SVLEN", '1', 'Integer', "Length of structural variant")
for chrom in ordered_chromosomes:
length = adapter.get_max_position(chrom)
head.add_contig(contig_id=chrom, length=str(length))
print_headers(head, outfile=outfile)
for chrom in ordered_chromosomes:
if variant_type == 'snv':
LOG.info("Collecting all SNV variants")
variants = adapter.get_variants(chromosome=chrom)
else:
LOG.info("Collecting all SV variants")
variants = adapter.get_sv_variants(chromosome=chrom)
LOG.info("{} variants found".format(variants.count()))
for variant in variants:
variant_line = format_variant(variant, variant_type=variant_type)
# chrom = variant['chrom']
# pos = variant['start']
# ref = variant['ref']
# alt = variant['alt']
# observations = variant['observations']
# homozygotes = variant['homozygote']
# hemizygotes = variant['hemizygote']
# info = "Obs={0}".format(observations)
# if homozygotes:
# info += ";Hom={0}".format(homozygotes)
# if hemizygotes:
# info += ";Hem={0}".format(hemizygotes)
# variant_line = "{0}\t{1}\t.\t{2}\t{3}\t.\t.\t{4}\n".format(
# chrom, pos, ref, alt, info)
print_variant(variant_line=variant_line, outfile=outfile) | python | def export(ctx, outfile, variant_type):
"""Export the variants of a loqus db
The variants are exported to a vcf file
"""
adapter = ctx.obj['adapter']
version = ctx.obj['version']
LOG.info("Export the variants from {0}".format(adapter))
nr_cases = 0
is_sv = variant_type == 'sv'
existing_chromosomes = set(adapter.get_chromosomes(sv=is_sv))
ordered_chromosomes = []
for chrom in CHROMOSOME_ORDER:
if chrom in existing_chromosomes:
ordered_chromosomes.append(chrom)
existing_chromosomes.remove(chrom)
for chrom in existing_chromosomes:
ordered_chromosomes.append(chrom)
nr_cases = adapter.cases().count()
LOG.info("Found {0} cases in database".format(nr_cases))
head = HeaderParser()
head.add_fileformat("VCFv4.3")
head.add_meta_line("NrCases", nr_cases)
head.add_info("Obs", '1', 'Integer', "The number of observations for the variant")
head.add_info("Hom", '1', 'Integer', "The number of observed homozygotes")
head.add_info("Hem", '1', 'Integer', "The number of observed hemizygotes")
head.add_version_tracking("loqusdb", version, datetime.now().strftime("%Y-%m-%d %H:%M"))
if variant_type == 'sv':
head.add_info("END", '1', 'Integer', "End position of the variant")
head.add_info("SVTYPE", '1', 'String', "Type of structural variant")
head.add_info("SVLEN", '1', 'Integer', "Length of structural variant")
for chrom in ordered_chromosomes:
length = adapter.get_max_position(chrom)
head.add_contig(contig_id=chrom, length=str(length))
print_headers(head, outfile=outfile)
for chrom in ordered_chromosomes:
if variant_type == 'snv':
LOG.info("Collecting all SNV variants")
variants = adapter.get_variants(chromosome=chrom)
else:
LOG.info("Collecting all SV variants")
variants = adapter.get_sv_variants(chromosome=chrom)
LOG.info("{} variants found".format(variants.count()))
for variant in variants:
variant_line = format_variant(variant, variant_type=variant_type)
# chrom = variant['chrom']
# pos = variant['start']
# ref = variant['ref']
# alt = variant['alt']
# observations = variant['observations']
# homozygotes = variant['homozygote']
# hemizygotes = variant['hemizygote']
# info = "Obs={0}".format(observations)
# if homozygotes:
# info += ";Hom={0}".format(homozygotes)
# if hemizygotes:
# info += ";Hem={0}".format(hemizygotes)
# variant_line = "{0}\t{1}\t.\t{2}\t{3}\t.\t.\t{4}\n".format(
# chrom, pos, ref, alt, info)
print_variant(variant_line=variant_line, outfile=outfile) | [
"def",
"export",
"(",
"ctx",
",",
"outfile",
",",
"variant_type",
")",
":",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"version",
"=",
"ctx",
".",
"obj",
"[",
"'version'",
"]",
"LOG",
".",
"info",
"(",
"\"Export the variants from {0}\"",
"... | Export the variants of a loqus db
The variants are exported to a vcf file | [
"Export",
"the",
"variants",
"of",
"a",
"loqus",
"db",
"The",
"variants",
"are",
"exported",
"to",
"a",
"vcf",
"file"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/export.py#L28-L97 |
moonso/loqusdb | loqusdb/utils/load.py | load_database | def load_database(adapter, variant_file=None, sv_file=None, family_file=None,
family_type='ped', skip_case_id=False, gq_treshold=None,
case_id=None, max_window = 3000, profile_file=None,
hard_threshold=0.95, soft_threshold=0.9):
"""Load the database with a case and its variants
Args:
adapter: Connection to database
variant_file(str): Path to variant file
sv_file(str): Path to sv variant file
family_file(str): Path to family file
family_type(str): Format of family file
skip_case_id(bool): If no case information should be added to variants
gq_treshold(int): If only quality variants should be considered
case_id(str): If different case id than the one in family file should be used
max_window(int): Specify the max size for sv windows
check_profile(bool): Does profile check if True
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
nr_inserted(int)
"""
vcf_files = []
nr_variants = None
vcf_individuals = None
if variant_file:
vcf_info = check_vcf(variant_file)
nr_variants = vcf_info['nr_variants']
variant_type = vcf_info['variant_type']
vcf_files.append(variant_file)
# Get the indivuduals that are present in vcf file
vcf_individuals = vcf_info['individuals']
nr_sv_variants = None
sv_individuals = None
if sv_file:
vcf_info = check_vcf(sv_file, 'sv')
nr_sv_variants = vcf_info['nr_variants']
vcf_files.append(sv_file)
sv_individuals = vcf_info['individuals']
profiles = None
matches = None
if profile_file:
profiles = get_profiles(adapter, profile_file)
###Check if any profile already exists
matches = profile_match(adapter,
profiles,
hard_threshold=hard_threshold,
soft_threshold=soft_threshold)
# If a gq treshold is used the variants needs to have GQ
for _vcf_file in vcf_files:
# Get a cyvcf2.VCF object
vcf = get_vcf(_vcf_file)
if gq_treshold:
if not vcf.contains('GQ'):
LOG.warning('Set gq-treshold to 0 or add info to vcf {0}'.format(_vcf_file))
raise SyntaxError('GQ is not defined in vcf header')
# Get a ped_parser.Family object from family file
family = None
family_id = None
if family_file:
LOG.info("Loading family from %s", family_file)
with open(family_file, 'r') as family_lines:
family = get_case(
family_lines=family_lines,
family_type=family_type
)
family_id = family.family_id
# There has to be a case_id or a family at this stage.
case_id = case_id or family_id
# Convert infromation to a loqusdb Case object
case_obj = build_case(
case=family,
case_id=case_id,
vcf_path=variant_file,
vcf_individuals=vcf_individuals,
nr_variants=nr_variants,
vcf_sv_path=sv_file,
sv_individuals=sv_individuals,
nr_sv_variants=nr_sv_variants,
profiles=profiles,
matches=matches,
profile_path=profile_file
)
# Build and load a new case, or update an existing one
load_case(
adapter=adapter,
case_obj=case_obj,
)
nr_inserted = 0
# If case was succesfully added we can store the variants
for file_type in ['vcf_path','vcf_sv_path']:
variant_type = 'snv'
if file_type == 'vcf_sv_path':
variant_type = 'sv'
if case_obj.get(file_type) is None:
continue
vcf_obj = get_vcf(case_obj[file_type])
try:
nr_inserted += load_variants(
adapter=adapter,
vcf_obj=vcf_obj,
case_obj=case_obj,
skip_case_id=skip_case_id,
gq_treshold=gq_treshold,
max_window=max_window,
variant_type=variant_type,
)
except Exception as err:
# If something went wrong do a rollback
LOG.warning(err)
delete(
adapter=adapter,
case_obj=case_obj,
)
raise err
return nr_inserted | python | def load_database(adapter, variant_file=None, sv_file=None, family_file=None,
family_type='ped', skip_case_id=False, gq_treshold=None,
case_id=None, max_window = 3000, profile_file=None,
hard_threshold=0.95, soft_threshold=0.9):
"""Load the database with a case and its variants
Args:
adapter: Connection to database
variant_file(str): Path to variant file
sv_file(str): Path to sv variant file
family_file(str): Path to family file
family_type(str): Format of family file
skip_case_id(bool): If no case information should be added to variants
gq_treshold(int): If only quality variants should be considered
case_id(str): If different case id than the one in family file should be used
max_window(int): Specify the max size for sv windows
check_profile(bool): Does profile check if True
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
nr_inserted(int)
"""
vcf_files = []
nr_variants = None
vcf_individuals = None
if variant_file:
vcf_info = check_vcf(variant_file)
nr_variants = vcf_info['nr_variants']
variant_type = vcf_info['variant_type']
vcf_files.append(variant_file)
# Get the indivuduals that are present in vcf file
vcf_individuals = vcf_info['individuals']
nr_sv_variants = None
sv_individuals = None
if sv_file:
vcf_info = check_vcf(sv_file, 'sv')
nr_sv_variants = vcf_info['nr_variants']
vcf_files.append(sv_file)
sv_individuals = vcf_info['individuals']
profiles = None
matches = None
if profile_file:
profiles = get_profiles(adapter, profile_file)
###Check if any profile already exists
matches = profile_match(adapter,
profiles,
hard_threshold=hard_threshold,
soft_threshold=soft_threshold)
# If a gq treshold is used the variants needs to have GQ
for _vcf_file in vcf_files:
# Get a cyvcf2.VCF object
vcf = get_vcf(_vcf_file)
if gq_treshold:
if not vcf.contains('GQ'):
LOG.warning('Set gq-treshold to 0 or add info to vcf {0}'.format(_vcf_file))
raise SyntaxError('GQ is not defined in vcf header')
# Get a ped_parser.Family object from family file
family = None
family_id = None
if family_file:
LOG.info("Loading family from %s", family_file)
with open(family_file, 'r') as family_lines:
family = get_case(
family_lines=family_lines,
family_type=family_type
)
family_id = family.family_id
# There has to be a case_id or a family at this stage.
case_id = case_id or family_id
# Convert infromation to a loqusdb Case object
case_obj = build_case(
case=family,
case_id=case_id,
vcf_path=variant_file,
vcf_individuals=vcf_individuals,
nr_variants=nr_variants,
vcf_sv_path=sv_file,
sv_individuals=sv_individuals,
nr_sv_variants=nr_sv_variants,
profiles=profiles,
matches=matches,
profile_path=profile_file
)
# Build and load a new case, or update an existing one
load_case(
adapter=adapter,
case_obj=case_obj,
)
nr_inserted = 0
# If case was succesfully added we can store the variants
for file_type in ['vcf_path','vcf_sv_path']:
variant_type = 'snv'
if file_type == 'vcf_sv_path':
variant_type = 'sv'
if case_obj.get(file_type) is None:
continue
vcf_obj = get_vcf(case_obj[file_type])
try:
nr_inserted += load_variants(
adapter=adapter,
vcf_obj=vcf_obj,
case_obj=case_obj,
skip_case_id=skip_case_id,
gq_treshold=gq_treshold,
max_window=max_window,
variant_type=variant_type,
)
except Exception as err:
# If something went wrong do a rollback
LOG.warning(err)
delete(
adapter=adapter,
case_obj=case_obj,
)
raise err
return nr_inserted | [
"def",
"load_database",
"(",
"adapter",
",",
"variant_file",
"=",
"None",
",",
"sv_file",
"=",
"None",
",",
"family_file",
"=",
"None",
",",
"family_type",
"=",
"'ped'",
",",
"skip_case_id",
"=",
"False",
",",
"gq_treshold",
"=",
"None",
",",
"case_id",
"=... | Load the database with a case and its variants
Args:
adapter: Connection to database
variant_file(str): Path to variant file
sv_file(str): Path to sv variant file
family_file(str): Path to family file
family_type(str): Format of family file
skip_case_id(bool): If no case information should be added to variants
gq_treshold(int): If only quality variants should be considered
case_id(str): If different case id than the one in family file should be used
max_window(int): Specify the max size for sv windows
check_profile(bool): Does profile check if True
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
nr_inserted(int) | [
"Load",
"the",
"database",
"with",
"a",
"case",
"and",
"its",
"variants"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/load.py#L26-L151 |
moonso/loqusdb | loqusdb/utils/load.py | load_case | def load_case(adapter, case_obj, update=False):
"""Load a case to the database
Args:
adapter: Connection to database
case_obj: dict
update(bool): If existing case should be updated
Returns:
case_obj(models.Case)
"""
# Check if the case already exists in database.
existing_case = adapter.case(case_obj)
if existing_case:
if not update:
raise CaseError("Case {0} already exists in database".format(case_obj['case_id']))
case_obj = update_case(case_obj, existing_case)
# Add the case to database
try:
adapter.add_case(case_obj, update=update)
except CaseError as err:
raise err
return case_obj | python | def load_case(adapter, case_obj, update=False):
"""Load a case to the database
Args:
adapter: Connection to database
case_obj: dict
update(bool): If existing case should be updated
Returns:
case_obj(models.Case)
"""
# Check if the case already exists in database.
existing_case = adapter.case(case_obj)
if existing_case:
if not update:
raise CaseError("Case {0} already exists in database".format(case_obj['case_id']))
case_obj = update_case(case_obj, existing_case)
# Add the case to database
try:
adapter.add_case(case_obj, update=update)
except CaseError as err:
raise err
return case_obj | [
"def",
"load_case",
"(",
"adapter",
",",
"case_obj",
",",
"update",
"=",
"False",
")",
":",
"# Check if the case already exists in database.",
"existing_case",
"=",
"adapter",
".",
"case",
"(",
"case_obj",
")",
"if",
"existing_case",
":",
"if",
"not",
"update",
... | Load a case to the database
Args:
adapter: Connection to database
case_obj: dict
update(bool): If existing case should be updated
Returns:
case_obj(models.Case) | [
"Load",
"a",
"case",
"to",
"the",
"database"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/load.py#L153-L177 |
moonso/loqusdb | loqusdb/utils/load.py | load_variants | def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None,
max_window=3000, variant_type='snv'):
"""Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int)
"""
if variant_type == 'snv':
nr_variants = case_obj['nr_variants']
else:
nr_variants = case_obj['nr_sv_variants']
nr_inserted = 0
case_id = case_obj['case_id']
if skip_case_id:
case_id = None
# Loop over the variants in the vcf
with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar:
variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar)
if variant_type == 'sv':
for sv_variant in variants:
if not sv_variant:
continue
adapter.add_structural_variant(variant=sv_variant, max_window=max_window)
nr_inserted += 1
if variant_type == 'snv':
nr_inserted = adapter.add_variants(variants)
LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type)
return nr_inserted | python | def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None,
max_window=3000, variant_type='snv'):
"""Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int)
"""
if variant_type == 'snv':
nr_variants = case_obj['nr_variants']
else:
nr_variants = case_obj['nr_sv_variants']
nr_inserted = 0
case_id = case_obj['case_id']
if skip_case_id:
case_id = None
# Loop over the variants in the vcf
with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar:
variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar)
if variant_type == 'sv':
for sv_variant in variants:
if not sv_variant:
continue
adapter.add_structural_variant(variant=sv_variant, max_window=max_window)
nr_inserted += 1
if variant_type == 'snv':
nr_inserted = adapter.add_variants(variants)
LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type)
return nr_inserted | [
"def",
"load_variants",
"(",
"adapter",
",",
"vcf_obj",
",",
"case_obj",
",",
"skip_case_id",
"=",
"False",
",",
"gq_treshold",
"=",
"None",
",",
"max_window",
"=",
"3000",
",",
"variant_type",
"=",
"'snv'",
")",
":",
"if",
"variant_type",
"==",
"'snv'",
"... | Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int) | [
"Load",
"variants",
"for",
"a",
"family",
"into",
"the",
"database",
"."
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/load.py#L179-L222 |
yjzhang/uncurl_python | uncurl/preprocessing.py | sparse_mean_var | def sparse_mean_var(data):
"""
Calculates the variance for each row of a sparse matrix,
using the relationship Var = E[x^2] - E[x]^2.
Returns:
pair of matrices mean, variance.
"""
data = sparse.csc_matrix(data)
return sparse_means_var_csc(data.data,
data.indices,
data.indptr,
data.shape[1],
data.shape[0]) | python | def sparse_mean_var(data):
"""
Calculates the variance for each row of a sparse matrix,
using the relationship Var = E[x^2] - E[x]^2.
Returns:
pair of matrices mean, variance.
"""
data = sparse.csc_matrix(data)
return sparse_means_var_csc(data.data,
data.indices,
data.indptr,
data.shape[1],
data.shape[0]) | [
"def",
"sparse_mean_var",
"(",
"data",
")",
":",
"data",
"=",
"sparse",
".",
"csc_matrix",
"(",
"data",
")",
"return",
"sparse_means_var_csc",
"(",
"data",
".",
"data",
",",
"data",
".",
"indices",
",",
"data",
".",
"indptr",
",",
"data",
".",
"shape",
... | Calculates the variance for each row of a sparse matrix,
using the relationship Var = E[x^2] - E[x]^2.
Returns:
pair of matrices mean, variance. | [
"Calculates",
"the",
"variance",
"for",
"each",
"row",
"of",
"a",
"sparse",
"matrix",
"using",
"the",
"relationship",
"Var",
"=",
"E",
"[",
"x^2",
"]",
"-",
"E",
"[",
"x",
"]",
"^2",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/preprocessing.py#L10-L23 |
yjzhang/uncurl_python | uncurl/preprocessing.py | max_variance_genes | def max_variance_genes(data, nbins=5, frac=0.2):
"""
This function identifies the genes that have the max variance
across a number of bins sorted by mean.
Args:
data (array): genes x cells
nbins (int): number of bins to sort genes by mean expression level. Default: 10.
frac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1
Returns:
list of gene indices (list of ints)
"""
# TODO: profile, make more efficient for large matrices
# 8000 cells: 0.325 seconds
# top time: sparse.csc_tocsr, csc_matvec, astype, copy, mul_scalar
# 73233 cells: 5.347 seconds, 4.762 s in sparse_var
# csc_tocsr: 1.736 s
# copy: 1.028 s
# astype: 0.999 s
# there is almost certainly something superlinear in this method
# maybe it's to_csr?
indices = []
if sparse.issparse(data):
means, var = sparse_mean_var(data)
else:
means = data.mean(1)
var = data.var(1)
mean_indices = means.argsort()
n_elements = int(data.shape[0]/nbins)
frac_elements = int(n_elements*frac)
for i in range(nbins):
bin_i = mean_indices[i*n_elements : (i+1)*n_elements]
if i==nbins-1:
bin_i = mean_indices[i*n_elements :]
var_i = var[bin_i]
var_sorted = var_i.argsort()
top_var_indices = var_sorted[len(bin_i) - frac_elements:]
ind = bin_i[top_var_indices]
# filter out genes with zero variance
ind = [index for index in ind if var[index]>0]
indices.extend(ind)
return indices | python | def max_variance_genes(data, nbins=5, frac=0.2):
"""
This function identifies the genes that have the max variance
across a number of bins sorted by mean.
Args:
data (array): genes x cells
nbins (int): number of bins to sort genes by mean expression level. Default: 10.
frac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1
Returns:
list of gene indices (list of ints)
"""
# TODO: profile, make more efficient for large matrices
# 8000 cells: 0.325 seconds
# top time: sparse.csc_tocsr, csc_matvec, astype, copy, mul_scalar
# 73233 cells: 5.347 seconds, 4.762 s in sparse_var
# csc_tocsr: 1.736 s
# copy: 1.028 s
# astype: 0.999 s
# there is almost certainly something superlinear in this method
# maybe it's to_csr?
indices = []
if sparse.issparse(data):
means, var = sparse_mean_var(data)
else:
means = data.mean(1)
var = data.var(1)
mean_indices = means.argsort()
n_elements = int(data.shape[0]/nbins)
frac_elements = int(n_elements*frac)
for i in range(nbins):
bin_i = mean_indices[i*n_elements : (i+1)*n_elements]
if i==nbins-1:
bin_i = mean_indices[i*n_elements :]
var_i = var[bin_i]
var_sorted = var_i.argsort()
top_var_indices = var_sorted[len(bin_i) - frac_elements:]
ind = bin_i[top_var_indices]
# filter out genes with zero variance
ind = [index for index in ind if var[index]>0]
indices.extend(ind)
return indices | [
"def",
"max_variance_genes",
"(",
"data",
",",
"nbins",
"=",
"5",
",",
"frac",
"=",
"0.2",
")",
":",
"# TODO: profile, make more efficient for large matrices",
"# 8000 cells: 0.325 seconds",
"# top time: sparse.csc_tocsr, csc_matvec, astype, copy, mul_scalar",
"# 73233 cells: 5.347... | This function identifies the genes that have the max variance
across a number of bins sorted by mean.
Args:
data (array): genes x cells
nbins (int): number of bins to sort genes by mean expression level. Default: 10.
frac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1
Returns:
list of gene indices (list of ints) | [
"This",
"function",
"identifies",
"the",
"genes",
"that",
"have",
"the",
"max",
"variance",
"across",
"a",
"number",
"of",
"bins",
"sorted",
"by",
"mean",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/preprocessing.py#L25-L67 |
yjzhang/uncurl_python | uncurl/preprocessing.py | cell_normalize | def cell_normalize(data):
"""
Returns the data where the expression is normalized so that the total
count per cell is equal.
"""
if sparse.issparse(data):
data = sparse.csc_matrix(data.astype(float))
# normalize in-place
sparse_cell_normalize(data.data,
data.indices,
data.indptr,
data.shape[1],
data.shape[0])
return data
data_norm = data.astype(float)
total_umis = []
for i in range(data.shape[1]):
di = data_norm[:,i]
total_umis.append(di.sum())
di /= total_umis[i]
med = np.median(total_umis)
data_norm *= med
return data_norm | python | def cell_normalize(data):
"""
Returns the data where the expression is normalized so that the total
count per cell is equal.
"""
if sparse.issparse(data):
data = sparse.csc_matrix(data.astype(float))
# normalize in-place
sparse_cell_normalize(data.data,
data.indices,
data.indptr,
data.shape[1],
data.shape[0])
return data
data_norm = data.astype(float)
total_umis = []
for i in range(data.shape[1]):
di = data_norm[:,i]
total_umis.append(di.sum())
di /= total_umis[i]
med = np.median(total_umis)
data_norm *= med
return data_norm | [
"def",
"cell_normalize",
"(",
"data",
")",
":",
"if",
"sparse",
".",
"issparse",
"(",
"data",
")",
":",
"data",
"=",
"sparse",
".",
"csc_matrix",
"(",
"data",
".",
"astype",
"(",
"float",
")",
")",
"# normalize in-place",
"sparse_cell_normalize",
"(",
"dat... | Returns the data where the expression is normalized so that the total
count per cell is equal. | [
"Returns",
"the",
"data",
"where",
"the",
"expression",
"is",
"normalized",
"so",
"that",
"the",
"total",
"count",
"per",
"cell",
"is",
"equal",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/preprocessing.py#L69-L91 |
yjzhang/uncurl_python | uncurl/preprocessing.py | log1p | def log1p(data):
"""
Returns ln(data+1), whether the original data is dense or sparse.
"""
if sparse.issparse(data):
return data.log1p()
else:
return np.log1p(data) | python | def log1p(data):
"""
Returns ln(data+1), whether the original data is dense or sparse.
"""
if sparse.issparse(data):
return data.log1p()
else:
return np.log1p(data) | [
"def",
"log1p",
"(",
"data",
")",
":",
"if",
"sparse",
".",
"issparse",
"(",
"data",
")",
":",
"return",
"data",
".",
"log1p",
"(",
")",
"else",
":",
"return",
"np",
".",
"log1p",
"(",
"data",
")"
] | Returns ln(data+1), whether the original data is dense or sparse. | [
"Returns",
"ln",
"(",
"data",
"+",
"1",
")",
"whether",
"the",
"original",
"data",
"is",
"dense",
"or",
"sparse",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/preprocessing.py#L93-L100 |
moonso/loqusdb | loqusdb/build_models/case.py | get_individual_positions | def get_individual_positions(individuals):
"""Return a dictionary with individual positions
Args:
individuals(list): A list with vcf individuals in correct order
Returns:
ind_pos(dict): Map from ind_id -> index position
"""
ind_pos = {}
if individuals:
for i, ind in enumerate(individuals):
ind_pos[ind] = i
return ind_pos | python | def get_individual_positions(individuals):
"""Return a dictionary with individual positions
Args:
individuals(list): A list with vcf individuals in correct order
Returns:
ind_pos(dict): Map from ind_id -> index position
"""
ind_pos = {}
if individuals:
for i, ind in enumerate(individuals):
ind_pos[ind] = i
return ind_pos | [
"def",
"get_individual_positions",
"(",
"individuals",
")",
":",
"ind_pos",
"=",
"{",
"}",
"if",
"individuals",
":",
"for",
"i",
",",
"ind",
"in",
"enumerate",
"(",
"individuals",
")",
":",
"ind_pos",
"[",
"ind",
"]",
"=",
"i",
"return",
"ind_pos"
] | Return a dictionary with individual positions
Args:
individuals(list): A list with vcf individuals in correct order
Returns:
ind_pos(dict): Map from ind_id -> index position | [
"Return",
"a",
"dictionary",
"with",
"individual",
"positions"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/case.py#L8-L21 |
moonso/loqusdb | loqusdb/build_models/case.py | build_case | def build_case(case, vcf_individuals=None, case_id=None, vcf_path=None, sv_individuals=None,
vcf_sv_path=None, nr_variants=None, nr_sv_variants=None, profiles=None,
matches=None, profile_path=None):
"""Build a Case from the given information
Args:
case(ped_parser.Family): A family object
vcf_individuals(list): Show the order of inds in vcf file
case_id(str): If another name than the one in family file should be used
vcf_path(str)
sv_individuals(list): Show the order of inds in vcf file
vcf_sv_path(str)
nr_variants(int)
nr_sv_variants(int)
profiles(dict): The profiles for each sample in vcf
matches(dict(list)): list of similar samples for each sample in vcf.
Returns:
case_obj(models.Case)
"""
# Create a dict that maps the ind ids to the position they have in vcf
individual_positions = get_individual_positions(vcf_individuals)
sv_individual_positions = get_individual_positions(sv_individuals)
family_id = None
if case:
if not case.affected_individuals:
LOG.warning("No affected individuals could be found in ped file")
family_id = case.family_id
# If case id is given manually we use that one
case_id = case_id or family_id
if case_id is None:
raise CaseError
case_obj = Case(
case_id=case_id,
)
if vcf_path:
case_obj['vcf_path'] = vcf_path
case_obj['nr_variants'] = nr_variants
if vcf_sv_path:
case_obj['vcf_sv_path'] = vcf_sv_path
case_obj['nr_sv_variants'] = nr_sv_variants
if profile_path:
case_obj['profile_path'] = profile_path
ind_objs = []
if case:
if individual_positions:
_ind_pos = individual_positions
else:
_ind_pos = sv_individual_positions
for ind_id in case.individuals:
individual = case.individuals[ind_id]
try:
#If a profile dict exists, get the profile for ind_id
profile = profiles[ind_id] if profiles else None
#If matching samples are found, get these samples for ind_id
similar_samples = matches[ind_id] if matches else None
ind_obj = Individual(
ind_id=ind_id,
case_id=case_id,
ind_index=_ind_pos[ind_id],
sex=individual.sex,
profile=profile,
similar_samples=similar_samples
)
ind_objs.append(dict(ind_obj))
except KeyError:
raise CaseError("Ind %s in ped file does not exist in VCF", ind_id)
else:
# If there where no family file we can create individuals from what we know
for ind_id in individual_positions:
profile = profiles[ind_id] if profiles else None
similar_samples = matches[ind_id] if matches else None
ind_obj = Individual(
ind_id = ind_id,
case_id = case_id,
ind_index=individual_positions[ind_id],
profile=profile,
similar_samples=similar_samples
)
ind_objs.append(dict(ind_obj))
# Add individuals to the correct variant type
for ind_obj in ind_objs:
if vcf_sv_path:
case_obj['sv_individuals'].append(dict(ind_obj))
case_obj['_sv_inds'][ind_obj['ind_id']] = dict(ind_obj)
if vcf_path:
case_obj['individuals'].append(dict(ind_obj))
case_obj['_inds'][ind_obj['ind_id']] = dict(ind_obj)
return case_obj | python | def build_case(case, vcf_individuals=None, case_id=None, vcf_path=None, sv_individuals=None,
vcf_sv_path=None, nr_variants=None, nr_sv_variants=None, profiles=None,
matches=None, profile_path=None):
"""Build a Case from the given information
Args:
case(ped_parser.Family): A family object
vcf_individuals(list): Show the order of inds in vcf file
case_id(str): If another name than the one in family file should be used
vcf_path(str)
sv_individuals(list): Show the order of inds in vcf file
vcf_sv_path(str)
nr_variants(int)
nr_sv_variants(int)
profiles(dict): The profiles for each sample in vcf
matches(dict(list)): list of similar samples for each sample in vcf.
Returns:
case_obj(models.Case)
"""
# Create a dict that maps the ind ids to the position they have in vcf
individual_positions = get_individual_positions(vcf_individuals)
sv_individual_positions = get_individual_positions(sv_individuals)
family_id = None
if case:
if not case.affected_individuals:
LOG.warning("No affected individuals could be found in ped file")
family_id = case.family_id
# If case id is given manually we use that one
case_id = case_id or family_id
if case_id is None:
raise CaseError
case_obj = Case(
case_id=case_id,
)
if vcf_path:
case_obj['vcf_path'] = vcf_path
case_obj['nr_variants'] = nr_variants
if vcf_sv_path:
case_obj['vcf_sv_path'] = vcf_sv_path
case_obj['nr_sv_variants'] = nr_sv_variants
if profile_path:
case_obj['profile_path'] = profile_path
ind_objs = []
if case:
if individual_positions:
_ind_pos = individual_positions
else:
_ind_pos = sv_individual_positions
for ind_id in case.individuals:
individual = case.individuals[ind_id]
try:
#If a profile dict exists, get the profile for ind_id
profile = profiles[ind_id] if profiles else None
#If matching samples are found, get these samples for ind_id
similar_samples = matches[ind_id] if matches else None
ind_obj = Individual(
ind_id=ind_id,
case_id=case_id,
ind_index=_ind_pos[ind_id],
sex=individual.sex,
profile=profile,
similar_samples=similar_samples
)
ind_objs.append(dict(ind_obj))
except KeyError:
raise CaseError("Ind %s in ped file does not exist in VCF", ind_id)
else:
# If there where no family file we can create individuals from what we know
for ind_id in individual_positions:
profile = profiles[ind_id] if profiles else None
similar_samples = matches[ind_id] if matches else None
ind_obj = Individual(
ind_id = ind_id,
case_id = case_id,
ind_index=individual_positions[ind_id],
profile=profile,
similar_samples=similar_samples
)
ind_objs.append(dict(ind_obj))
# Add individuals to the correct variant type
for ind_obj in ind_objs:
if vcf_sv_path:
case_obj['sv_individuals'].append(dict(ind_obj))
case_obj['_sv_inds'][ind_obj['ind_id']] = dict(ind_obj)
if vcf_path:
case_obj['individuals'].append(dict(ind_obj))
case_obj['_inds'][ind_obj['ind_id']] = dict(ind_obj)
return case_obj | [
"def",
"build_case",
"(",
"case",
",",
"vcf_individuals",
"=",
"None",
",",
"case_id",
"=",
"None",
",",
"vcf_path",
"=",
"None",
",",
"sv_individuals",
"=",
"None",
",",
"vcf_sv_path",
"=",
"None",
",",
"nr_variants",
"=",
"None",
",",
"nr_sv_variants",
"... | Build a Case from the given information
Args:
case(ped_parser.Family): A family object
vcf_individuals(list): Show the order of inds in vcf file
case_id(str): If another name than the one in family file should be used
vcf_path(str)
sv_individuals(list): Show the order of inds in vcf file
vcf_sv_path(str)
nr_variants(int)
nr_sv_variants(int)
profiles(dict): The profiles for each sample in vcf
matches(dict(list)): list of similar samples for each sample in vcf.
Returns:
case_obj(models.Case) | [
"Build",
"a",
"Case",
"from",
"the",
"given",
"information"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/case.py#L23-L122 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_poisson_data | def generate_poisson_data(centers, n_cells, cluster_probs=None):
"""
Generates poisson-distributed data, given a set of means for each cluster.
Args:
centers (array): genes x clusters matrix
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels
"""
genes, clusters = centers.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.random.poisson(centers[:,c])
return output, np.array(labels) | python | def generate_poisson_data(centers, n_cells, cluster_probs=None):
"""
Generates poisson-distributed data, given a set of means for each cluster.
Args:
centers (array): genes x clusters matrix
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels
"""
genes, clusters = centers.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.random.poisson(centers[:,c])
return output, np.array(labels) | [
"def",
"generate_poisson_data",
"(",
"centers",
",",
"n_cells",
",",
"cluster_probs",
"=",
"None",
")",
":",
"genes",
",",
"clusters",
"=",
"centers",
".",
"shape",
"output",
"=",
"np",
".",
"zeros",
"(",
"(",
"genes",
",",
"n_cells",
")",
")",
"if",
"... | Generates poisson-distributed data, given a set of means for each cluster.
Args:
centers (array): genes x clusters matrix
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels | [
"Generates",
"poisson",
"-",
"distributed",
"data",
"given",
"a",
"set",
"of",
"means",
"for",
"each",
"cluster",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L5-L28 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_zip_data | def generate_zip_data(M, L, n_cells, cluster_probs=None):
"""
Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster.
Args:
M (array): genes x clusters matrix
L (array): genes x clusters matrix - zero-inflation parameters
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels
"""
genes, clusters = M.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
zip_p = np.random.random((genes, n_cells))
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.where(zip_p[:,i] < L[:,c], 0, np.random.poisson(M[:,c]))
return output, np.array(labels) | python | def generate_zip_data(M, L, n_cells, cluster_probs=None):
"""
Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster.
Args:
M (array): genes x clusters matrix
L (array): genes x clusters matrix - zero-inflation parameters
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels
"""
genes, clusters = M.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
zip_p = np.random.random((genes, n_cells))
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.where(zip_p[:,i] < L[:,c], 0, np.random.poisson(M[:,c]))
return output, np.array(labels) | [
"def",
"generate_zip_data",
"(",
"M",
",",
"L",
",",
"n_cells",
",",
"cluster_probs",
"=",
"None",
")",
":",
"genes",
",",
"clusters",
"=",
"M",
".",
"shape",
"output",
"=",
"np",
".",
"zeros",
"(",
"(",
"genes",
",",
"n_cells",
")",
")",
"if",
"cl... | Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster.
Args:
M (array): genes x clusters matrix
L (array): genes x clusters matrix - zero-inflation parameters
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels | [
"Generates",
"zero",
"-",
"inflated",
"poisson",
"-",
"distributed",
"data",
"given",
"a",
"set",
"of",
"means",
"and",
"zero",
"probs",
"for",
"each",
"cluster",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L30-L55 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_state_data | def generate_state_data(means, weights):
"""
Generates data according to the Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
Returns:
data matrix - genes x cells
"""
x_true = np.dot(means, weights)
sample = np.random.poisson(x_true)
return sample.astype(float) | python | def generate_state_data(means, weights):
"""
Generates data according to the Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
Returns:
data matrix - genes x cells
"""
x_true = np.dot(means, weights)
sample = np.random.poisson(x_true)
return sample.astype(float) | [
"def",
"generate_state_data",
"(",
"means",
",",
"weights",
")",
":",
"x_true",
"=",
"np",
".",
"dot",
"(",
"means",
",",
"weights",
")",
"sample",
"=",
"np",
".",
"random",
".",
"poisson",
"(",
"x_true",
")",
"return",
"sample",
".",
"astype",
"(",
... | Generates data according to the Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
Returns:
data matrix - genes x cells | [
"Generates",
"data",
"according",
"to",
"the",
"Poisson",
"Convex",
"Mixture",
"Model",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L58-L71 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_zip_state_data | def generate_zip_state_data(means, weights, z):
"""
Generates data according to the Zero-inflated Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
z (float): zero-inflation parameter
Returns:
data matrix - genes x cells
"""
x_true = np.dot(means, weights)
sample = np.random.poisson(x_true)
random = np.random.random(x_true.shape)
x_true[random < z] = 0
return sample.astype(float) | python | def generate_zip_state_data(means, weights, z):
"""
Generates data according to the Zero-inflated Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
z (float): zero-inflation parameter
Returns:
data matrix - genes x cells
"""
x_true = np.dot(means, weights)
sample = np.random.poisson(x_true)
random = np.random.random(x_true.shape)
x_true[random < z] = 0
return sample.astype(float) | [
"def",
"generate_zip_state_data",
"(",
"means",
",",
"weights",
",",
"z",
")",
":",
"x_true",
"=",
"np",
".",
"dot",
"(",
"means",
",",
"weights",
")",
"sample",
"=",
"np",
".",
"random",
".",
"poisson",
"(",
"x_true",
")",
"random",
"=",
"np",
".",
... | Generates data according to the Zero-inflated Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
z (float): zero-inflation parameter
Returns:
data matrix - genes x cells | [
"Generates",
"data",
"according",
"to",
"the",
"Zero",
"-",
"inflated",
"Poisson",
"Convex",
"Mixture",
"Model",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L73-L89 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_nb_state_data | def generate_nb_state_data(means, weights, R):
"""
Generates data according to the Negative Binomial Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
R (array): dispersion parameter - 1 x genes
Returns:
data matrix - genes x cells
"""
cells = weights.shape[1]
# x_true = true means
x_true = np.dot(means, weights)
# convert means into P
R_ = np.tile(R, (cells, 1)).T
P_true = x_true/(R_ + x_true)
sample = np.random.negative_binomial(np.tile(R, (cells, 1)).T, P_true)
return sample.astype(float) | python | def generate_nb_state_data(means, weights, R):
"""
Generates data according to the Negative Binomial Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
R (array): dispersion parameter - 1 x genes
Returns:
data matrix - genes x cells
"""
cells = weights.shape[1]
# x_true = true means
x_true = np.dot(means, weights)
# convert means into P
R_ = np.tile(R, (cells, 1)).T
P_true = x_true/(R_ + x_true)
sample = np.random.negative_binomial(np.tile(R, (cells, 1)).T, P_true)
return sample.astype(float) | [
"def",
"generate_nb_state_data",
"(",
"means",
",",
"weights",
",",
"R",
")",
":",
"cells",
"=",
"weights",
".",
"shape",
"[",
"1",
"]",
"# x_true = true means",
"x_true",
"=",
"np",
".",
"dot",
"(",
"means",
",",
"weights",
")",
"# convert means into P",
... | Generates data according to the Negative Binomial Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
R (array): dispersion parameter - 1 x genes
Returns:
data matrix - genes x cells | [
"Generates",
"data",
"according",
"to",
"the",
"Negative",
"Binomial",
"Convex",
"Mixture",
"Model",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L91-L110 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_nb_states | def generate_nb_states(n_states, n_cells, n_genes):
"""
Generates means and weights for the Negative Binomial Mixture Model.
Weights are distributed Dirichlet(1,1,...), means are rand(0, 1).
Returned values can be passed to generate_state_data(M, W).
Args:
n_states (int): number of states or clusters
n_cells (int): number of cells
n_genes (int): number of genes
Returns:
M - genes x clusters
W - clusters x cells
R - genes x 1 - randint(1, 100)
"""
W = np.random.dirichlet([1]*n_states, size=(n_cells,))
W = W.T
M = np.random.random((n_genes, n_states))*100
R = np.random.randint(1, 100, n_genes)
return M, W, R | python | def generate_nb_states(n_states, n_cells, n_genes):
"""
Generates means and weights for the Negative Binomial Mixture Model.
Weights are distributed Dirichlet(1,1,...), means are rand(0, 1).
Returned values can be passed to generate_state_data(M, W).
Args:
n_states (int): number of states or clusters
n_cells (int): number of cells
n_genes (int): number of genes
Returns:
M - genes x clusters
W - clusters x cells
R - genes x 1 - randint(1, 100)
"""
W = np.random.dirichlet([1]*n_states, size=(n_cells,))
W = W.T
M = np.random.random((n_genes, n_states))*100
R = np.random.randint(1, 100, n_genes)
return M, W, R | [
"def",
"generate_nb_states",
"(",
"n_states",
",",
"n_cells",
",",
"n_genes",
")",
":",
"W",
"=",
"np",
".",
"random",
".",
"dirichlet",
"(",
"[",
"1",
"]",
"*",
"n_states",
",",
"size",
"=",
"(",
"n_cells",
",",
")",
")",
"W",
"=",
"W",
".",
"T"... | Generates means and weights for the Negative Binomial Mixture Model.
Weights are distributed Dirichlet(1,1,...), means are rand(0, 1).
Returned values can be passed to generate_state_data(M, W).
Args:
n_states (int): number of states or clusters
n_cells (int): number of cells
n_genes (int): number of genes
Returns:
M - genes x clusters
W - clusters x cells
R - genes x 1 - randint(1, 100) | [
"Generates",
"means",
"and",
"weights",
"for",
"the",
"Negative",
"Binomial",
"Mixture",
"Model",
".",
"Weights",
"are",
"distributed",
"Dirichlet",
"(",
"1",
"1",
"...",
")",
"means",
"are",
"rand",
"(",
"0",
"1",
")",
".",
"Returned",
"values",
"can",
... | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L112-L132 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_poisson_states | def generate_poisson_states(n_states, n_cells, n_genes):
"""
Generates means and weights for the Poisson Convex Mixture Model.
Weights are distributed Dirichlet(1,1,...), means are rand(0, 100).
Returned values can be passed to generate_state_data(M, W).
Args:
n_states (int): number of states or clusters
n_cells (int): number of cells
n_genes (int): number of genes
Returns:
M - genes x clusters
W - clusters x cells
"""
W = np.random.dirichlet([1]*n_states, size=(n_cells,))
W = W.T
M = np.random.random((n_genes, n_states))*100
return M, W | python | def generate_poisson_states(n_states, n_cells, n_genes):
"""
Generates means and weights for the Poisson Convex Mixture Model.
Weights are distributed Dirichlet(1,1,...), means are rand(0, 100).
Returned values can be passed to generate_state_data(M, W).
Args:
n_states (int): number of states or clusters
n_cells (int): number of cells
n_genes (int): number of genes
Returns:
M - genes x clusters
W - clusters x cells
"""
W = np.random.dirichlet([1]*n_states, size=(n_cells,))
W = W.T
M = np.random.random((n_genes, n_states))*100
return M, W | [
"def",
"generate_poisson_states",
"(",
"n_states",
",",
"n_cells",
",",
"n_genes",
")",
":",
"W",
"=",
"np",
".",
"random",
".",
"dirichlet",
"(",
"[",
"1",
"]",
"*",
"n_states",
",",
"size",
"=",
"(",
"n_cells",
",",
")",
")",
"W",
"=",
"W",
".",
... | Generates means and weights for the Poisson Convex Mixture Model.
Weights are distributed Dirichlet(1,1,...), means are rand(0, 100).
Returned values can be passed to generate_state_data(M, W).
Args:
n_states (int): number of states or clusters
n_cells (int): number of cells
n_genes (int): number of genes
Returns:
M - genes x clusters
W - clusters x cells | [
"Generates",
"means",
"and",
"weights",
"for",
"the",
"Poisson",
"Convex",
"Mixture",
"Model",
".",
"Weights",
"are",
"distributed",
"Dirichlet",
"(",
"1",
"1",
"...",
")",
"means",
"are",
"rand",
"(",
"0",
"100",
")",
".",
"Returned",
"values",
"can",
"... | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L134-L152 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_poisson_lineage | def generate_poisson_lineage(n_states, n_cells_per_cluster, n_genes, means=300):
"""
Generates a lineage for each state- assumes that each state has a common
ancestor.
Returns:
M - genes x clusters
W - clusters x cells
"""
# means...
M = np.random.random((n_genes, n_states))*means
center = M.mean(1)
W = np.zeros((n_states, n_cells_per_cluster*n_states))
# TODO
# start at a center where all the clusters have equal probability, and for
# each cluster, interpolate linearly towards the cluster.
index = 0
means = np.array([1.0/n_states]*n_states)
for c in range(n_states):
for i in range(n_cells_per_cluster):
w = np.copy(means)
new_value = w[c] + i*(1.0 - 1.0/n_states)/n_cells_per_cluster
w[:] = (1.0 - new_value)/(n_states - 1.0)
w[c] = new_value
W[:, index] = w
index += 1
return M, W | python | def generate_poisson_lineage(n_states, n_cells_per_cluster, n_genes, means=300):
"""
Generates a lineage for each state- assumes that each state has a common
ancestor.
Returns:
M - genes x clusters
W - clusters x cells
"""
# means...
M = np.random.random((n_genes, n_states))*means
center = M.mean(1)
W = np.zeros((n_states, n_cells_per_cluster*n_states))
# TODO
# start at a center where all the clusters have equal probability, and for
# each cluster, interpolate linearly towards the cluster.
index = 0
means = np.array([1.0/n_states]*n_states)
for c in range(n_states):
for i in range(n_cells_per_cluster):
w = np.copy(means)
new_value = w[c] + i*(1.0 - 1.0/n_states)/n_cells_per_cluster
w[:] = (1.0 - new_value)/(n_states - 1.0)
w[c] = new_value
W[:, index] = w
index += 1
return M, W | [
"def",
"generate_poisson_lineage",
"(",
"n_states",
",",
"n_cells_per_cluster",
",",
"n_genes",
",",
"means",
"=",
"300",
")",
":",
"# means...",
"M",
"=",
"np",
".",
"random",
".",
"random",
"(",
"(",
"n_genes",
",",
"n_states",
")",
")",
"*",
"means",
... | Generates a lineage for each state- assumes that each state has a common
ancestor.
Returns:
M - genes x clusters
W - clusters x cells | [
"Generates",
"a",
"lineage",
"for",
"each",
"state",
"-",
"assumes",
"that",
"each",
"state",
"has",
"a",
"common",
"ancestor",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L154-L180 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_nb_data | def generate_nb_data(P, R, n_cells, assignments=None):
"""
Generates negative binomial data
Args:
P (array): genes x clusters
R (array): genes x clusters
n_cells (int): number of cells
assignments (list): cluster assignment of each cell. Default:
random uniform
Returns:
data array with shape genes x cells
labels - array of cluster labels
"""
genes, clusters = P.shape
output = np.zeros((genes, n_cells))
if assignments is None:
cluster_probs = np.ones(clusters)/clusters
labels = []
for i in range(n_cells):
if assignments is None:
c = np.random.choice(range(clusters), p=cluster_probs)
else:
c = assignments[i]
labels.append(c)
# because numpy's negative binomial, r is the number of successes
output[:,i] = np.random.negative_binomial(R[:,c], 1.0-P[:,c])
return output, np.array(labels) | python | def generate_nb_data(P, R, n_cells, assignments=None):
"""
Generates negative binomial data
Args:
P (array): genes x clusters
R (array): genes x clusters
n_cells (int): number of cells
assignments (list): cluster assignment of each cell. Default:
random uniform
Returns:
data array with shape genes x cells
labels - array of cluster labels
"""
genes, clusters = P.shape
output = np.zeros((genes, n_cells))
if assignments is None:
cluster_probs = np.ones(clusters)/clusters
labels = []
for i in range(n_cells):
if assignments is None:
c = np.random.choice(range(clusters), p=cluster_probs)
else:
c = assignments[i]
labels.append(c)
# because numpy's negative binomial, r is the number of successes
output[:,i] = np.random.negative_binomial(R[:,c], 1.0-P[:,c])
return output, np.array(labels) | [
"def",
"generate_nb_data",
"(",
"P",
",",
"R",
",",
"n_cells",
",",
"assignments",
"=",
"None",
")",
":",
"genes",
",",
"clusters",
"=",
"P",
".",
"shape",
"output",
"=",
"np",
".",
"zeros",
"(",
"(",
"genes",
",",
"n_cells",
")",
")",
"if",
"assig... | Generates negative binomial data
Args:
P (array): genes x clusters
R (array): genes x clusters
n_cells (int): number of cells
assignments (list): cluster assignment of each cell. Default:
random uniform
Returns:
data array with shape genes x cells
labels - array of cluster labels | [
"Generates",
"negative",
"binomial",
"data"
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L182-L210 |
yjzhang/uncurl_python | uncurl/vis.py | visualize_poisson_w | def visualize_poisson_w(w, labels, filename, method='pca', figsize=(18,10), title='', **scatter_options):
"""
Saves a scatter plot of a visualization of W, the result from Poisson SE.
"""
if method == 'pca':
pca = PCA(2)
r_dim_red = pca.fit_transform(w.T).T
elif method == 'tsne':
pass
else:
print("Method is not available. use 'pca' (default) or 'tsne'.")
return
visualize_dim_red(r_dim_red, labels, filename, figsize, title, **scatter_options) | python | def visualize_poisson_w(w, labels, filename, method='pca', figsize=(18,10), title='', **scatter_options):
"""
Saves a scatter plot of a visualization of W, the result from Poisson SE.
"""
if method == 'pca':
pca = PCA(2)
r_dim_red = pca.fit_transform(w.T).T
elif method == 'tsne':
pass
else:
print("Method is not available. use 'pca' (default) or 'tsne'.")
return
visualize_dim_red(r_dim_red, labels, filename, figsize, title, **scatter_options) | [
"def",
"visualize_poisson_w",
"(",
"w",
",",
"labels",
",",
"filename",
",",
"method",
"=",
"'pca'",
",",
"figsize",
"=",
"(",
"18",
",",
"10",
")",
",",
"title",
"=",
"''",
",",
"*",
"*",
"scatter_options",
")",
":",
"if",
"method",
"==",
"'pca'",
... | Saves a scatter plot of a visualization of W, the result from Poisson SE. | [
"Saves",
"a",
"scatter",
"plot",
"of",
"a",
"visualization",
"of",
"W",
"the",
"result",
"from",
"Poisson",
"SE",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/vis.py#L6-L18 |
yjzhang/uncurl_python | uncurl/vis.py | visualize_dim_red | def visualize_dim_red(r, labels, filename=None, figsize=(18,10), title='', legend=True, label_map=None, label_scale=False, label_color_map=None, **scatter_options):
"""
Saves a scatter plot of a (2,n) matrix r, where each column is a cell.
Args:
r (array): (2,n) matrix
labels (array): (n,) array of ints/strings or floats. Can be None.
filename (string): string to save the output graph. If None, then this just displays the plot.
figsize (tuple): Default: (18, 10)
title (string): graph title
legend (bool): Default: True
label_map (dict): map of labels to label names. Default: None
label_scale (bool): True if labels is should be treated as floats. Default: False
label_color_map (array): (n,) array or list of colors for each label.
"""
fig = plt.figure(figsize=figsize)
plt.cla()
if not label_scale:
for i in set(labels):
label = i
if label_map is not None:
label = label_map[i]
if label_color_map is not None:
c = label_color_map[i]
plt.scatter(r[0, labels==i], r[1, labels==i], label=label, c=c, **scatter_options)
else:
plt.scatter(r[0, labels==i], r[1, labels==i], label=label, **scatter_options)
else:
if labels is None:
plt.scatter(r[0,:], r[1,:], **scatter_options)
else:
plt.scatter(r[0,:], r[1,:], c=labels/labels.max(), **scatter_options)
plt.title(title)
if legend:
plt.legend()
if filename is not None:
plt.savefig(filename, dpi=100)
plt.close()
return fig | python | def visualize_dim_red(r, labels, filename=None, figsize=(18,10), title='', legend=True, label_map=None, label_scale=False, label_color_map=None, **scatter_options):
"""
Saves a scatter plot of a (2,n) matrix r, where each column is a cell.
Args:
r (array): (2,n) matrix
labels (array): (n,) array of ints/strings or floats. Can be None.
filename (string): string to save the output graph. If None, then this just displays the plot.
figsize (tuple): Default: (18, 10)
title (string): graph title
legend (bool): Default: True
label_map (dict): map of labels to label names. Default: None
label_scale (bool): True if labels is should be treated as floats. Default: False
label_color_map (array): (n,) array or list of colors for each label.
"""
fig = plt.figure(figsize=figsize)
plt.cla()
if not label_scale:
for i in set(labels):
label = i
if label_map is not None:
label = label_map[i]
if label_color_map is not None:
c = label_color_map[i]
plt.scatter(r[0, labels==i], r[1, labels==i], label=label, c=c, **scatter_options)
else:
plt.scatter(r[0, labels==i], r[1, labels==i], label=label, **scatter_options)
else:
if labels is None:
plt.scatter(r[0,:], r[1,:], **scatter_options)
else:
plt.scatter(r[0,:], r[1,:], c=labels/labels.max(), **scatter_options)
plt.title(title)
if legend:
plt.legend()
if filename is not None:
plt.savefig(filename, dpi=100)
plt.close()
return fig | [
"def",
"visualize_dim_red",
"(",
"r",
",",
"labels",
",",
"filename",
"=",
"None",
",",
"figsize",
"=",
"(",
"18",
",",
"10",
")",
",",
"title",
"=",
"''",
",",
"legend",
"=",
"True",
",",
"label_map",
"=",
"None",
",",
"label_scale",
"=",
"False",
... | Saves a scatter plot of a (2,n) matrix r, where each column is a cell.
Args:
r (array): (2,n) matrix
labels (array): (n,) array of ints/strings or floats. Can be None.
filename (string): string to save the output graph. If None, then this just displays the plot.
figsize (tuple): Default: (18, 10)
title (string): graph title
legend (bool): Default: True
label_map (dict): map of labels to label names. Default: None
label_scale (bool): True if labels is should be treated as floats. Default: False
label_color_map (array): (n,) array or list of colors for each label. | [
"Saves",
"a",
"scatter",
"plot",
"of",
"a",
"(",
"2",
"n",
")",
"matrix",
"r",
"where",
"each",
"column",
"is",
"a",
"cell",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/vis.py#L20-L58 |
yjzhang/uncurl_python | uncurl/experiment_runner.py | run_experiment | def run_experiment(methods, data, n_classes, true_labels, n_runs=10, use_purity=True, use_nmi=False, use_ari=False, use_nne=False, consensus=False):
"""
runs a pre-processing + clustering experiment...
exactly one of use_purity, use_nmi, or use_ari can be true
Args:
methods: list of 2-tuples. The first element is either a single Preprocess object or a list of Preprocess objects, to be applied in sequence to the data. The second element is either a single Cluster object, a list of Cluster objects, or a list of lists, where each list is a sequence of Preprocess objects with the final element being a Cluster object.
data: genes x cells array
true_labels: 1d array of length cells
consensus: if true, runs a consensus on cluster results for each method at the very end.
use_purity, use_nmi, use_ari, use_nne: which error metric to use (at most one can be True)
Returns:
purities (list of lists)
names (list of lists)
other (dict): keys: timing, preprocessing, clusterings
"""
results = []
names = []
clusterings = {}
other_results = {}
other_results['timing'] = {}
other_results['preprocessing'] = {}
if use_purity:
purity_method = purity
elif use_nmi:
purity_method = nmi
elif use_ari:
purity_method = ari
elif use_nne:
purity_method = nne
for i in range(n_runs):
print('run {0}'.format(i))
purities = []
r = 0
method_index = 0
for preproc, cluster in methods:
t0 = time.time()
if isinstance(preproc, Preprocess):
preprocessed, ll = preproc.run(data)
output_names = preproc.output_names
else:
# if the input is a list, only use the first preproc result
p1 = data
output_names = ['']
for p in preproc:
p1, ll = p.run(p1)
p1 = p1[0]
if output_names[0] != '':
output_names[0] = output_names[0] + '_' + p.output_names[0]
else:
output_names[0] = p.output_names[0]
preprocessed = [p1]
t1 = time.time() - t0
for name, pre in zip(output_names, preprocessed):
starting_index = method_index
if isinstance(cluster, Cluster):
#try:
t0 = time.time()
labels = cluster.run(pre)
t2 = t1 + time.time() - t0
if use_nne:
purities.append(purity_method(pre, true_labels))
else:
purities.append(purity_method(labels, true_labels))
if i==0:
names.append(name + '_' + cluster.name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = []
print(names[r])
clusterings[names[r]].append(labels)
print('time: ' + str(t2))
other_results['timing'][names[r]].append(t2)
print(purities[-1])
r += 1
method_index += 1
#except:
# print('failed to do clustering')
elif type(cluster) == list:
for c in cluster:
if isinstance(c, list):
t2 = t1
name2 = name
sub_data = pre.copy()
for subproc in c[:-1]:
t0 = time.time()
subproc_out, ll = subproc.run(sub_data)
sub_data = subproc_out[0]
name2 = name2 + '_' + subproc.output_names[0]
t2 += time.time() - t0
t0 = time.time()
labels = c[-1].run(sub_data)
t2 += time.time() - t0
if use_nne:
purities.append(purity_method(sub_data, true_labels))
else:
purities.append(purity_method(labels, true_labels))
if i==0:
names.append(name2 + '_' + c[-1].name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = []
print(names[r])
clusterings[names[r]].append(labels)
other_results['timing'][names[r]].append(t2)
print('time: ' + str(t2))
print(purities[-1])
r += 1
method_index += 1
else:
try:
t0 = time.time()
labels = c.run(pre)
t2 = t1 + time.time() - t0
if i==0:
names.append(name + '_' + c.name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = []
if use_nne:
purities.append(purity_method(pre, true_labels))
else:
purities.append(purity_method(labels, true_labels))
print(names[r])
clusterings[names[r]].append(labels)
other_results['timing'][names[r]].append(t2)
print('time: ' + str(t2))
print(purities[-1])
r += 1
method_index += 1
except:
print('failed to do clustering')
# find the highest purity for the pre-processing method
# save the preprocessing result with the highest NMI
num_clustering_results = method_index - starting_index
clustering_results = purities[-num_clustering_results:]
if i > 0 and len(clustering_results) > 0:
old_clustering_results = results[-1][starting_index:method_index]
if max(old_clustering_results) < max(clustering_results):
other_results['preprocessing'][name] = pre
else:
other_results['preprocessing'][name] = pre
print('\t'.join(names))
print('purities: ' + '\t'.join(map(str, purities)))
results.append(purities)
consensus_purities = []
if consensus:
other_results['consensus'] = {}
k = len(np.unique(true_labels))
for name, clusts in clusterings.items():
print(name)
clusts = np.vstack(clusts)
consensus_clust = CE.cluster_ensembles(clusts, verbose=False, N_clusters_max=k)
other_results['consensus'][name] = consensus_clust
if use_purity:
consensus_purity = purity(consensus_clust.flatten(), true_labels)
print('consensus purity: ' + str(consensus_purity))
consensus_purities.append(consensus_purity)
if use_nmi:
consensus_nmi = nmi(true_labels, consensus_clust)
print('consensus NMI: ' + str(consensus_nmi))
consensus_purities.append(consensus_nmi)
if use_ari:
consensus_ari = ari(true_labels, consensus_clust)
print('consensus ARI: ' + str(consensus_ari))
consensus_purities.append(consensus_ari)
print('consensus results: ' + '\t'.join(map(str, consensus_purities)))
other_results['clusterings'] = clusterings
return results, names, other_results | python | def run_experiment(methods, data, n_classes, true_labels, n_runs=10, use_purity=True, use_nmi=False, use_ari=False, use_nne=False, consensus=False):
"""
runs a pre-processing + clustering experiment...
exactly one of use_purity, use_nmi, or use_ari can be true
Args:
methods: list of 2-tuples. The first element is either a single Preprocess object or a list of Preprocess objects, to be applied in sequence to the data. The second element is either a single Cluster object, a list of Cluster objects, or a list of lists, where each list is a sequence of Preprocess objects with the final element being a Cluster object.
data: genes x cells array
true_labels: 1d array of length cells
consensus: if true, runs a consensus on cluster results for each method at the very end.
use_purity, use_nmi, use_ari, use_nne: which error metric to use (at most one can be True)
Returns:
purities (list of lists)
names (list of lists)
other (dict): keys: timing, preprocessing, clusterings
"""
results = []
names = []
clusterings = {}
other_results = {}
other_results['timing'] = {}
other_results['preprocessing'] = {}
if use_purity:
purity_method = purity
elif use_nmi:
purity_method = nmi
elif use_ari:
purity_method = ari
elif use_nne:
purity_method = nne
for i in range(n_runs):
print('run {0}'.format(i))
purities = []
r = 0
method_index = 0
for preproc, cluster in methods:
t0 = time.time()
if isinstance(preproc, Preprocess):
preprocessed, ll = preproc.run(data)
output_names = preproc.output_names
else:
# if the input is a list, only use the first preproc result
p1 = data
output_names = ['']
for p in preproc:
p1, ll = p.run(p1)
p1 = p1[0]
if output_names[0] != '':
output_names[0] = output_names[0] + '_' + p.output_names[0]
else:
output_names[0] = p.output_names[0]
preprocessed = [p1]
t1 = time.time() - t0
for name, pre in zip(output_names, preprocessed):
starting_index = method_index
if isinstance(cluster, Cluster):
#try:
t0 = time.time()
labels = cluster.run(pre)
t2 = t1 + time.time() - t0
if use_nne:
purities.append(purity_method(pre, true_labels))
else:
purities.append(purity_method(labels, true_labels))
if i==0:
names.append(name + '_' + cluster.name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = []
print(names[r])
clusterings[names[r]].append(labels)
print('time: ' + str(t2))
other_results['timing'][names[r]].append(t2)
print(purities[-1])
r += 1
method_index += 1
#except:
# print('failed to do clustering')
elif type(cluster) == list:
for c in cluster:
if isinstance(c, list):
t2 = t1
name2 = name
sub_data = pre.copy()
for subproc in c[:-1]:
t0 = time.time()
subproc_out, ll = subproc.run(sub_data)
sub_data = subproc_out[0]
name2 = name2 + '_' + subproc.output_names[0]
t2 += time.time() - t0
t0 = time.time()
labels = c[-1].run(sub_data)
t2 += time.time() - t0
if use_nne:
purities.append(purity_method(sub_data, true_labels))
else:
purities.append(purity_method(labels, true_labels))
if i==0:
names.append(name2 + '_' + c[-1].name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = []
print(names[r])
clusterings[names[r]].append(labels)
other_results['timing'][names[r]].append(t2)
print('time: ' + str(t2))
print(purities[-1])
r += 1
method_index += 1
else:
try:
t0 = time.time()
labels = c.run(pre)
t2 = t1 + time.time() - t0
if i==0:
names.append(name + '_' + c.name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = []
if use_nne:
purities.append(purity_method(pre, true_labels))
else:
purities.append(purity_method(labels, true_labels))
print(names[r])
clusterings[names[r]].append(labels)
other_results['timing'][names[r]].append(t2)
print('time: ' + str(t2))
print(purities[-1])
r += 1
method_index += 1
except:
print('failed to do clustering')
# find the highest purity for the pre-processing method
# save the preprocessing result with the highest NMI
num_clustering_results = method_index - starting_index
clustering_results = purities[-num_clustering_results:]
if i > 0 and len(clustering_results) > 0:
old_clustering_results = results[-1][starting_index:method_index]
if max(old_clustering_results) < max(clustering_results):
other_results['preprocessing'][name] = pre
else:
other_results['preprocessing'][name] = pre
print('\t'.join(names))
print('purities: ' + '\t'.join(map(str, purities)))
results.append(purities)
consensus_purities = []
if consensus:
other_results['consensus'] = {}
k = len(np.unique(true_labels))
for name, clusts in clusterings.items():
print(name)
clusts = np.vstack(clusts)
consensus_clust = CE.cluster_ensembles(clusts, verbose=False, N_clusters_max=k)
other_results['consensus'][name] = consensus_clust
if use_purity:
consensus_purity = purity(consensus_clust.flatten(), true_labels)
print('consensus purity: ' + str(consensus_purity))
consensus_purities.append(consensus_purity)
if use_nmi:
consensus_nmi = nmi(true_labels, consensus_clust)
print('consensus NMI: ' + str(consensus_nmi))
consensus_purities.append(consensus_nmi)
if use_ari:
consensus_ari = ari(true_labels, consensus_clust)
print('consensus ARI: ' + str(consensus_ari))
consensus_purities.append(consensus_ari)
print('consensus results: ' + '\t'.join(map(str, consensus_purities)))
other_results['clusterings'] = clusterings
return results, names, other_results | [
"def",
"run_experiment",
"(",
"methods",
",",
"data",
",",
"n_classes",
",",
"true_labels",
",",
"n_runs",
"=",
"10",
",",
"use_purity",
"=",
"True",
",",
"use_nmi",
"=",
"False",
",",
"use_ari",
"=",
"False",
",",
"use_nne",
"=",
"False",
",",
"consensu... | runs a pre-processing + clustering experiment...
exactly one of use_purity, use_nmi, or use_ari can be true
Args:
methods: list of 2-tuples. The first element is either a single Preprocess object or a list of Preprocess objects, to be applied in sequence to the data. The second element is either a single Cluster object, a list of Cluster objects, or a list of lists, where each list is a sequence of Preprocess objects with the final element being a Cluster object.
data: genes x cells array
true_labels: 1d array of length cells
consensus: if true, runs a consensus on cluster results for each method at the very end.
use_purity, use_nmi, use_ari, use_nne: which error metric to use (at most one can be True)
Returns:
purities (list of lists)
names (list of lists)
other (dict): keys: timing, preprocessing, clusterings | [
"runs",
"a",
"pre",
"-",
"processing",
"+",
"clustering",
"experiment",
"..."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/experiment_runner.py#L889-L1056 |
yjzhang/uncurl_python | uncurl/experiment_runner.py | generate_visualizations | def generate_visualizations(methods, data, true_labels, base_dir = 'visualizations',
figsize=(18,10), **scatter_options):
"""
Generates visualization scatters for all the methods.
Args:
methods: follows same format as run_experiments. List of tuples.
data: genes x cells
true_labels: array of integers
base_dir: base directory to save all the plots
figsize: tuple of ints representing size of figure
scatter_options: options for plt.scatter
"""
plt.figure(figsize=figsize)
for method in methods:
preproc= method[0]
if isinstance(preproc, Preprocess):
preprocessed, ll = preproc.run(data)
output_names = preproc.output_names
else:
# if the input is a list, only use the first preproc result
p1 = data
output_names = ['']
for p in preproc:
p1, ll = p.run(p1)
p1 = p1[0]
output_names[0] = output_names[0] + p.output_names[0]
preprocessed = [p1]
for r, name in zip(preprocessed, output_names):
# TODO: cluster labels
print(name)
# if it's 2d, just display it... else, do tsne to reduce to 2d
if r.shape[0]==2:
r_dim_red = r
else:
# sometimes the data is too big to do tsne... (for sklearn)
if sparse.issparse(r) and r.shape[0] > 100:
name = 'tsvd_' + name
tsvd = TruncatedSVD(50)
r_dim_red = tsvd.fit_transform(r.T)
try:
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r_dim_red).T
name = 'tsne_' + name
except:
tsvd2 = TruncatedSVD(2)
r_dim_red = tsvd2.fit_transform(r_dim_red).T
else:
name = 'tsne_' + name
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r.T).T
if isinstance(method[1], list):
for clustering_method in method[1]:
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
else:
clustering_method = method[1]
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
output_path = base_dir + '/{0}_true_labels.png'.format(name)
visualize_dim_red(r_dim_red, true_labels, output_path, **scatter_options) | python | def generate_visualizations(methods, data, true_labels, base_dir = 'visualizations',
figsize=(18,10), **scatter_options):
"""
Generates visualization scatters for all the methods.
Args:
methods: follows same format as run_experiments. List of tuples.
data: genes x cells
true_labels: array of integers
base_dir: base directory to save all the plots
figsize: tuple of ints representing size of figure
scatter_options: options for plt.scatter
"""
plt.figure(figsize=figsize)
for method in methods:
preproc= method[0]
if isinstance(preproc, Preprocess):
preprocessed, ll = preproc.run(data)
output_names = preproc.output_names
else:
# if the input is a list, only use the first preproc result
p1 = data
output_names = ['']
for p in preproc:
p1, ll = p.run(p1)
p1 = p1[0]
output_names[0] = output_names[0] + p.output_names[0]
preprocessed = [p1]
for r, name in zip(preprocessed, output_names):
# TODO: cluster labels
print(name)
# if it's 2d, just display it... else, do tsne to reduce to 2d
if r.shape[0]==2:
r_dim_red = r
else:
# sometimes the data is too big to do tsne... (for sklearn)
if sparse.issparse(r) and r.shape[0] > 100:
name = 'tsvd_' + name
tsvd = TruncatedSVD(50)
r_dim_red = tsvd.fit_transform(r.T)
try:
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r_dim_red).T
name = 'tsne_' + name
except:
tsvd2 = TruncatedSVD(2)
r_dim_red = tsvd2.fit_transform(r_dim_red).T
else:
name = 'tsne_' + name
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r.T).T
if isinstance(method[1], list):
for clustering_method in method[1]:
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
else:
clustering_method = method[1]
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
output_path = base_dir + '/{0}_true_labels.png'.format(name)
visualize_dim_red(r_dim_red, true_labels, output_path, **scatter_options) | [
"def",
"generate_visualizations",
"(",
"methods",
",",
"data",
",",
"true_labels",
",",
"base_dir",
"=",
"'visualizations'",
",",
"figsize",
"=",
"(",
"18",
",",
"10",
")",
",",
"*",
"*",
"scatter_options",
")",
":",
"plt",
".",
"figure",
"(",
"figsize",
... | Generates visualization scatters for all the methods.
Args:
methods: follows same format as run_experiments. List of tuples.
data: genes x cells
true_labels: array of integers
base_dir: base directory to save all the plots
figsize: tuple of ints representing size of figure
scatter_options: options for plt.scatter | [
"Generates",
"visualization",
"scatters",
"for",
"all",
"the",
"methods",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/experiment_runner.py#L1058-L1128 |
yjzhang/uncurl_python | uncurl/experiment_runner.py | PoissonSE.run | def run(self, data):
"""
Returns:
list of W, M*W
ll
"""
if self.normalize_data:
data = cell_normalize(data)
M, W, ll = poisson_estimate_state(data, **self.params)
outputs = []
if self.return_w:
outputs.append(W)
if self.return_m:
outputs.append(M)
if self.return_mw:
outputs.append(M.dot(W))
if self.return_mds:
X = dim_reduce(M, W, 2)
outputs.append(X.T.dot(W))
return outputs, ll | python | def run(self, data):
"""
Returns:
list of W, M*W
ll
"""
if self.normalize_data:
data = cell_normalize(data)
M, W, ll = poisson_estimate_state(data, **self.params)
outputs = []
if self.return_w:
outputs.append(W)
if self.return_m:
outputs.append(M)
if self.return_mw:
outputs.append(M.dot(W))
if self.return_mds:
X = dim_reduce(M, W, 2)
outputs.append(X.T.dot(W))
return outputs, ll | [
"def",
"run",
"(",
"self",
",",
"data",
")",
":",
"if",
"self",
".",
"normalize_data",
":",
"data",
"=",
"cell_normalize",
"(",
"data",
")",
"M",
",",
"W",
",",
"ll",
"=",
"poisson_estimate_state",
"(",
"data",
",",
"*",
"*",
"self",
".",
"params",
... | Returns:
list of W, M*W
ll | [
"Returns",
":",
"list",
"of",
"W",
"M",
"*",
"W",
"ll"
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/experiment_runner.py#L231-L250 |
markperdue/pyvesync | src/pyvesync/helpers.py | Helpers.calculate_hex | def calculate_hex(hex_string):
"""Credit for conversion to itsnotlupus/vesync_wsproxy"""
hex_conv = hex_string.split(':')
converted_hex = (int(hex_conv[0], 16) + int(hex_conv[1], 16))/8192
return converted_hex | python | def calculate_hex(hex_string):
"""Credit for conversion to itsnotlupus/vesync_wsproxy"""
hex_conv = hex_string.split(':')
converted_hex = (int(hex_conv[0], 16) + int(hex_conv[1], 16))/8192
return converted_hex | [
"def",
"calculate_hex",
"(",
"hex_string",
")",
":",
"hex_conv",
"=",
"hex_string",
".",
"split",
"(",
"':'",
")",
"converted_hex",
"=",
"(",
"int",
"(",
"hex_conv",
"[",
"0",
"]",
",",
"16",
")",
"+",
"int",
"(",
"hex_conv",
"[",
"1",
"]",
",",
"1... | Credit for conversion to itsnotlupus/vesync_wsproxy | [
"Credit",
"for",
"conversion",
"to",
"itsnotlupus",
"/",
"vesync_wsproxy"
] | train | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/helpers.py#L122-L127 |
markperdue/pyvesync | src/pyvesync/helpers.py | Helpers.resolve_updates | def resolve_updates(orig_list, updated_list):
"""Merges changes from one list of devices against another"""
if updated_list is not None and updated_list:
if orig_list is None:
orig_list = updated_list
else:
# Add new devices not in list but found in the update
for new_device in updated_list:
was_found = False
for device in orig_list:
if new_device.cid == device.cid:
was_found = True
break
if not was_found:
orig_list.append(new_device)
# Remove old devices in the list not found in the update
for device in orig_list:
should_remove = True
for new_device in updated_list:
if device.cid == new_device.cid:
should_remove = False
break
if should_remove:
orig_list.remove(device)
# Call update on each device in the list
[device.update() for device in orig_list]
return orig_list | python | def resolve_updates(orig_list, updated_list):
"""Merges changes from one list of devices against another"""
if updated_list is not None and updated_list:
if orig_list is None:
orig_list = updated_list
else:
# Add new devices not in list but found in the update
for new_device in updated_list:
was_found = False
for device in orig_list:
if new_device.cid == device.cid:
was_found = True
break
if not was_found:
orig_list.append(new_device)
# Remove old devices in the list not found in the update
for device in orig_list:
should_remove = True
for new_device in updated_list:
if device.cid == new_device.cid:
should_remove = False
break
if should_remove:
orig_list.remove(device)
# Call update on each device in the list
[device.update() for device in orig_list]
return orig_list | [
"def",
"resolve_updates",
"(",
"orig_list",
",",
"updated_list",
")",
":",
"if",
"updated_list",
"is",
"not",
"None",
"and",
"updated_list",
":",
"if",
"orig_list",
"is",
"None",
":",
"orig_list",
"=",
"updated_list",
"else",
":",
"# Add new devices not in list bu... | Merges changes from one list of devices against another | [
"Merges",
"changes",
"from",
"one",
"list",
"of",
"devices",
"against",
"another"
] | train | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/helpers.py#L222-L256 |
fbergmann/libSEDML | examples/python/create_sedml.py | main | def main (args):
"""Usage: create_sedml output-filename
"""
if (len(args) != 2):
print(main.__doc__)
sys.exit(1);
# create the document
doc = libsedml.SedDocument();
doc.setLevel(1);
doc.setVersion(1);
# create a first model referencing an sbml file
model = doc.createModel();
model.setId("model1");
model.setSource("file.xml");
model.setLanguage("urn:sedml:sbml");
# create a second model modifying a variable of that other sbml file
model = doc.createModel();
model.setId("model2");
model.setSource("model1");
model.setLanguage("urn:sedml:sbml");
# change a paramerter 'k' to 0.1
change = model.createChangeAttribute();
change.setTarget("/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='k']/@value");
change.setNewValue("0.1");
# remove species 's1'
remove = model.createRemoveXML();
remove.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S1']");
# now for something tricky we want to update the initialConcentration of 'S2' to be
# half what it was in the original model
compute = model.createComputeChange();
compute.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id="S2"]/@initialConcentration");
variable = compute.createVariable();
variable.setId("S2");
variable.setModelReference("model1");
variable.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S2']");
compute.setMath(libsedml.parseFormula("S2 / 2"));
# create simulation
tc = doc.createUniformTimeCourse();
tc.setId("sim1");
tc.setInitialTime(0.0);
tc.setOutputStartTime(0.0);
tc.setOutputEndTime(10.0);
tc.setNumberOfPoints(1000);
# need to set the correct KISAO Term
alg = tc.createAlgorithm();
alg.setKisaoID("KISAO:0000019");
# create a task that uses the simulation and the model above
task = doc.createTask();
task.setId("task1");
task.setModelReference("model1");
task.setSimulationReference("sim1");
# add a DataGenerator to hold the output for time
dg = doc.createDataGenerator();
dg.setId("time");
dg.setName("time");
var = dg.createVariable();
var.setId("v0");
var.setName("time");
var.setTaskReference("task1");
var.setSymbol("urn:sedml:symbol:time");
dg.setMath(libsedml.parseFormula("v0"));
# and one for S1
dg = doc.createDataGenerator();
dg.setId("S1");
dg.setName("S1");
var = dg.createVariable();
var.setId("v1");
var.setName("S1");
var.setTaskReference("task1");
var.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S1']");
dg.setMath(libsedml.parseFormula("v1"));
# add a report
report = doc.createReport();
report.setId("r1");
report.setName("report 1");
set = report.createDataSet();
set.setId("ds1");
set.setLabel("time");
set.setDataReference("time");
set = report.createDataSet();
set.setId("ds2");
set.setLabel("S1");
set.setDataReference("S1");
# add a 2d plot
plot = doc.createPlot2D();
plot.setId("p1");
plot.setName("S1 Timecourse");
curve = plot.createCurve();
curve.setId("c1");
curve.setName("S1");
curve.setLogX(False);
curve.setLogY(False);
curve.setXDataReference("time");
curve.setYDataReference("S1");
# add a 3D Plot
plot2 = doc.createPlot3D();
plot2.setId("p2");
plot2.setName("dunno");
surf = plot2.createSurface();
surf.setId("surf1");
surf.setName("S1");
surf.setLogX(False);
surf.setLogY(False);
surf.setLogZ(False);
surf.setXDataReference("time");
surf.setYDataReference("S1");
surf.setZDataReference("S1");
# write the document
libsedml.writeSedML(doc, args[1]); | python | def main (args):
"""Usage: create_sedml output-filename
"""
if (len(args) != 2):
print(main.__doc__)
sys.exit(1);
# create the document
doc = libsedml.SedDocument();
doc.setLevel(1);
doc.setVersion(1);
# create a first model referencing an sbml file
model = doc.createModel();
model.setId("model1");
model.setSource("file.xml");
model.setLanguage("urn:sedml:sbml");
# create a second model modifying a variable of that other sbml file
model = doc.createModel();
model.setId("model2");
model.setSource("model1");
model.setLanguage("urn:sedml:sbml");
# change a paramerter 'k' to 0.1
change = model.createChangeAttribute();
change.setTarget("/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='k']/@value");
change.setNewValue("0.1");
# remove species 's1'
remove = model.createRemoveXML();
remove.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S1']");
# now for something tricky we want to update the initialConcentration of 'S2' to be
# half what it was in the original model
compute = model.createComputeChange();
compute.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id="S2"]/@initialConcentration");
variable = compute.createVariable();
variable.setId("S2");
variable.setModelReference("model1");
variable.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S2']");
compute.setMath(libsedml.parseFormula("S2 / 2"));
# create simulation
tc = doc.createUniformTimeCourse();
tc.setId("sim1");
tc.setInitialTime(0.0);
tc.setOutputStartTime(0.0);
tc.setOutputEndTime(10.0);
tc.setNumberOfPoints(1000);
# need to set the correct KISAO Term
alg = tc.createAlgorithm();
alg.setKisaoID("KISAO:0000019");
# create a task that uses the simulation and the model above
task = doc.createTask();
task.setId("task1");
task.setModelReference("model1");
task.setSimulationReference("sim1");
# add a DataGenerator to hold the output for time
dg = doc.createDataGenerator();
dg.setId("time");
dg.setName("time");
var = dg.createVariable();
var.setId("v0");
var.setName("time");
var.setTaskReference("task1");
var.setSymbol("urn:sedml:symbol:time");
dg.setMath(libsedml.parseFormula("v0"));
# and one for S1
dg = doc.createDataGenerator();
dg.setId("S1");
dg.setName("S1");
var = dg.createVariable();
var.setId("v1");
var.setName("S1");
var.setTaskReference("task1");
var.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S1']");
dg.setMath(libsedml.parseFormula("v1"));
# add a report
report = doc.createReport();
report.setId("r1");
report.setName("report 1");
set = report.createDataSet();
set.setId("ds1");
set.setLabel("time");
set.setDataReference("time");
set = report.createDataSet();
set.setId("ds2");
set.setLabel("S1");
set.setDataReference("S1");
# add a 2d plot
plot = doc.createPlot2D();
plot.setId("p1");
plot.setName("S1 Timecourse");
curve = plot.createCurve();
curve.setId("c1");
curve.setName("S1");
curve.setLogX(False);
curve.setLogY(False);
curve.setXDataReference("time");
curve.setYDataReference("S1");
# add a 3D Plot
plot2 = doc.createPlot3D();
plot2.setId("p2");
plot2.setName("dunno");
surf = plot2.createSurface();
surf.setId("surf1");
surf.setName("S1");
surf.setLogX(False);
surf.setLogY(False);
surf.setLogZ(False);
surf.setXDataReference("time");
surf.setYDataReference("S1");
surf.setZDataReference("S1");
# write the document
libsedml.writeSedML(doc, args[1]); | [
"def",
"main",
"(",
"args",
")",
":",
"if",
"(",
"len",
"(",
"args",
")",
"!=",
"2",
")",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# create the document",
"doc",
"=",
"libsedml",
".",
"SedDocument",
"(",
... | Usage: create_sedml output-filename | [
"Usage",
":",
"create_sedml",
"output",
"-",
"filename"
] | train | https://github.com/fbergmann/libSEDML/blob/2611274d993cb92c663f8f0296896a6e441f75fd/examples/python/create_sedml.py#L41-L163 |
moonso/loqusdb | loqusdb/utils/profiling.py | get_profiles | def get_profiles(adapter, vcf_file):
"""Given a vcf, get a profile string for each sample in the vcf
based on the profile variants in the database
Args:
adapter(MongoAdapter): Adapter to mongodb
vcf_file(str): Path to vcf file
Returns:
profiles (dict(str)): The profiles (given as strings) for each sample
in vcf.
"""
vcf = get_file_handle(vcf_file)
individuals = vcf.samples
profiles = {individual: [] for individual in individuals}
for profile_variant in adapter.profile_variants():
ref = profile_variant['ref']
alt = profile_variant['alt']
pos = profile_variant['pos']
end = pos + 1
chrom = profile_variant['chrom']
region = f"{chrom}:{pos}-{end}"
#Find variants in region
found_variant = False
for variant in vcf(region):
variant_id = get_variant_id(variant)
#If variant id i.e. chrom_pos_ref_alt matches
if variant_id == profile_variant['_id']:
found_variant = True
#find genotype for each individual in vcf
for i, individual in enumerate(individuals):
genotype = GENOTYPE_MAP[variant.gt_types[i]]
if genotype == 'hom_alt':
gt_str = f"{alt}{alt}"
elif genotype == 'het':
gt_str = f"{ref}{alt}"
else:
gt_str = f"{ref}{ref}"
#Append genotype to profile string of individual
profiles[individual].append(gt_str)
#Break loop if variant is found in region
break
#If no call was found for variant, give all samples a hom ref genotype
if not found_variant:
for individual in individuals: profiles[individual].append(f"{ref}{ref}")
return profiles | python | def get_profiles(adapter, vcf_file):
"""Given a vcf, get a profile string for each sample in the vcf
based on the profile variants in the database
Args:
adapter(MongoAdapter): Adapter to mongodb
vcf_file(str): Path to vcf file
Returns:
profiles (dict(str)): The profiles (given as strings) for each sample
in vcf.
"""
vcf = get_file_handle(vcf_file)
individuals = vcf.samples
profiles = {individual: [] for individual in individuals}
for profile_variant in adapter.profile_variants():
ref = profile_variant['ref']
alt = profile_variant['alt']
pos = profile_variant['pos']
end = pos + 1
chrom = profile_variant['chrom']
region = f"{chrom}:{pos}-{end}"
#Find variants in region
found_variant = False
for variant in vcf(region):
variant_id = get_variant_id(variant)
#If variant id i.e. chrom_pos_ref_alt matches
if variant_id == profile_variant['_id']:
found_variant = True
#find genotype for each individual in vcf
for i, individual in enumerate(individuals):
genotype = GENOTYPE_MAP[variant.gt_types[i]]
if genotype == 'hom_alt':
gt_str = f"{alt}{alt}"
elif genotype == 'het':
gt_str = f"{ref}{alt}"
else:
gt_str = f"{ref}{ref}"
#Append genotype to profile string of individual
profiles[individual].append(gt_str)
#Break loop if variant is found in region
break
#If no call was found for variant, give all samples a hom ref genotype
if not found_variant:
for individual in individuals: profiles[individual].append(f"{ref}{ref}")
return profiles | [
"def",
"get_profiles",
"(",
"adapter",
",",
"vcf_file",
")",
":",
"vcf",
"=",
"get_file_handle",
"(",
"vcf_file",
")",
"individuals",
"=",
"vcf",
".",
"samples",
"profiles",
"=",
"{",
"individual",
":",
"[",
"]",
"for",
"individual",
"in",
"individuals",
"... | Given a vcf, get a profile string for each sample in the vcf
based on the profile variants in the database
Args:
adapter(MongoAdapter): Adapter to mongodb
vcf_file(str): Path to vcf file
Returns:
profiles (dict(str)): The profiles (given as strings) for each sample
in vcf. | [
"Given",
"a",
"vcf",
"get",
"a",
"profile",
"string",
"for",
"each",
"sample",
"in",
"the",
"vcf",
"based",
"on",
"the",
"profile",
"variants",
"in",
"the",
"database"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L15-L76 |
moonso/loqusdb | loqusdb/utils/profiling.py | profile_match | def profile_match(adapter, profiles, hard_threshold=0.95, soft_threshold=0.9):
"""
given a dict of profiles, searches through all the samples in the DB
for a match. If a matching sample is found an exception is raised,
and the variants will not be loaded into the database.
Args:
adapter (MongoAdapter): Adapter to mongodb
profiles (dict(str)): The profiles (given as strings) for each sample in vcf.
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
matches(dict(list)): list of similar samples for each sample in vcf.
"""
matches = {sample: [] for sample in profiles.keys()}
for case in adapter.cases():
for individual in case['individuals']:
for sample in profiles.keys():
if individual.get('profile'):
similarity = compare_profiles(
profiles[sample], individual['profile']
)
if similarity >= hard_threshold:
msg = (
f"individual {sample} has a {similarity} similarity "
f"with individual {individual['ind_id']} in case "
f"{case['case_id']}"
)
LOG.critical(msg)
#Raise some exception
raise ProfileError
if similarity >= soft_threshold:
match = f"{case['case_id']}.{individual['ind_id']}"
matches[sample].append(match)
return matches | python | def profile_match(adapter, profiles, hard_threshold=0.95, soft_threshold=0.9):
"""
given a dict of profiles, searches through all the samples in the DB
for a match. If a matching sample is found an exception is raised,
and the variants will not be loaded into the database.
Args:
adapter (MongoAdapter): Adapter to mongodb
profiles (dict(str)): The profiles (given as strings) for each sample in vcf.
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
matches(dict(list)): list of similar samples for each sample in vcf.
"""
matches = {sample: [] for sample in profiles.keys()}
for case in adapter.cases():
for individual in case['individuals']:
for sample in profiles.keys():
if individual.get('profile'):
similarity = compare_profiles(
profiles[sample], individual['profile']
)
if similarity >= hard_threshold:
msg = (
f"individual {sample} has a {similarity} similarity "
f"with individual {individual['ind_id']} in case "
f"{case['case_id']}"
)
LOG.critical(msg)
#Raise some exception
raise ProfileError
if similarity >= soft_threshold:
match = f"{case['case_id']}.{individual['ind_id']}"
matches[sample].append(match)
return matches | [
"def",
"profile_match",
"(",
"adapter",
",",
"profiles",
",",
"hard_threshold",
"=",
"0.95",
",",
"soft_threshold",
"=",
"0.9",
")",
":",
"matches",
"=",
"{",
"sample",
":",
"[",
"]",
"for",
"sample",
"in",
"profiles",
".",
"keys",
"(",
")",
"}",
"for"... | given a dict of profiles, searches through all the samples in the DB
for a match. If a matching sample is found an exception is raised,
and the variants will not be loaded into the database.
Args:
adapter (MongoAdapter): Adapter to mongodb
profiles (dict(str)): The profiles (given as strings) for each sample in vcf.
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
matches(dict(list)): list of similar samples for each sample in vcf. | [
"given",
"a",
"dict",
"of",
"profiles",
"searches",
"through",
"all",
"the",
"samples",
"in",
"the",
"DB",
"for",
"a",
"match",
".",
"If",
"a",
"matching",
"sample",
"is",
"found",
"an",
"exception",
"is",
"raised",
"and",
"the",
"variants",
"will",
"not... | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L78-L124 |
moonso/loqusdb | loqusdb/utils/profiling.py | compare_profiles | def compare_profiles(profile1, profile2):
"""
Given two profiles, determine the ratio of similarity, i.e.
the hamming distance between the strings.
Args:
profile1/2 (str): profile string
Returns:
similarity_ratio (float): the ratio of similiarity (0-1)
"""
length = len(profile1)
profile1 = np.array(list(profile1))
profile2 = np.array(list(profile2))
similarity_array = profile1 == profile2
matches = np.sum(similarity_array)
similarity_ratio = matches/length
return similarity_ratio | python | def compare_profiles(profile1, profile2):
"""
Given two profiles, determine the ratio of similarity, i.e.
the hamming distance between the strings.
Args:
profile1/2 (str): profile string
Returns:
similarity_ratio (float): the ratio of similiarity (0-1)
"""
length = len(profile1)
profile1 = np.array(list(profile1))
profile2 = np.array(list(profile2))
similarity_array = profile1 == profile2
matches = np.sum(similarity_array)
similarity_ratio = matches/length
return similarity_ratio | [
"def",
"compare_profiles",
"(",
"profile1",
",",
"profile2",
")",
":",
"length",
"=",
"len",
"(",
"profile1",
")",
"profile1",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"profile1",
")",
")",
"profile2",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"pr... | Given two profiles, determine the ratio of similarity, i.e.
the hamming distance between the strings.
Args:
profile1/2 (str): profile string
Returns:
similarity_ratio (float): the ratio of similiarity (0-1) | [
"Given",
"two",
"profiles",
"determine",
"the",
"ratio",
"of",
"similarity",
"i",
".",
"e",
".",
"the",
"hamming",
"distance",
"between",
"the",
"strings",
"."
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L128-L151 |
moonso/loqusdb | loqusdb/utils/profiling.py | update_profiles | def update_profiles(adapter):
"""
For all cases having vcf_path, update the profile string for the samples
Args:
adapter (MongoAdapter): Adapter to mongodb
"""
for case in adapter.cases():
#If the case has a vcf_path, get the profiles and update the
#case with new profiled individuals.
if case.get('profile_path'):
profiles = get_profiles(adapter, case['profile_path'])
profiled_individuals = deepcopy(case['individuals'])
for individual in profiled_individuals:
ind_id = individual['ind_id']
try:
profile = profiles[ind_id]
individual['profile'] = profile
except KeyError:
LOG.warning(f"sample IDs in vcf does not match for case {case['case_id']}")
updated_case = deepcopy(case)
updated_case['individuals'] = profiled_individuals
adapter.add_case(updated_case, update=True) | python | def update_profiles(adapter):
"""
For all cases having vcf_path, update the profile string for the samples
Args:
adapter (MongoAdapter): Adapter to mongodb
"""
for case in adapter.cases():
#If the case has a vcf_path, get the profiles and update the
#case with new profiled individuals.
if case.get('profile_path'):
profiles = get_profiles(adapter, case['profile_path'])
profiled_individuals = deepcopy(case['individuals'])
for individual in profiled_individuals:
ind_id = individual['ind_id']
try:
profile = profiles[ind_id]
individual['profile'] = profile
except KeyError:
LOG.warning(f"sample IDs in vcf does not match for case {case['case_id']}")
updated_case = deepcopy(case)
updated_case['individuals'] = profiled_individuals
adapter.add_case(updated_case, update=True) | [
"def",
"update_profiles",
"(",
"adapter",
")",
":",
"for",
"case",
"in",
"adapter",
".",
"cases",
"(",
")",
":",
"#If the case has a vcf_path, get the profiles and update the",
"#case with new profiled individuals.",
"if",
"case",
".",
"get",
"(",
"'profile_path'",
")",... | For all cases having vcf_path, update the profile string for the samples
Args:
adapter (MongoAdapter): Adapter to mongodb | [
"For",
"all",
"cases",
"having",
"vcf_path",
"update",
"the",
"profile",
"string",
"for",
"the",
"samples"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L154-L186 |
moonso/loqusdb | loqusdb/utils/profiling.py | profile_stats | def profile_stats(adapter, threshold = 0.9):
"""
Compares the pairwise hamming distances for all the sample profiles in
the database. Returns a table of the number of distances within given
ranges.
Args:
adapter (MongoAdapter): Adapter to mongodb
threshold (float): If any distance is found above this threshold
a warning will be given, stating the two matching samples.
Returns:
distance_dict (dict): dictionary with ranges as keys, and the number
of distances that are within these ranges as values.
"""
profiles = []
samples = []
#Instatiate the distance dictionary with a count 0 for all the ranges
distance_dict = {key: 0 for key in HAMMING_RANGES.keys()}
for case in adapter.cases():
for individual in case['individuals']:
if individual.get('profile'):
#Make sample name <case_id>.<sample_id>
sample_id = f"{case['case_id']}.{individual['ind_id']}"
ind_profile = individual['profile']
#Numpy array to hold all the distances for this samples profile
distance_array = np.array([], dtype=np.float)
for sample, profile in zip(samples, profiles):
#Get distance and append to distance array
distance = compare_profiles(ind_profile, profile)
distance_array = np.append(distance_array, distance)
#Issue warning if above threshold
if distance >= threshold:
LOG.warning(f"{sample_id} is {distance} similar to {sample}")
#Check number of distances in each range and add to distance_dict
for key,range in HAMMING_RANGES.items():
#Calculate the number of hamming distances found within the
#range for current individual
distance_dict[key] += np.sum(
(distance_array >= range[0]) & (distance_array < range[1])
)
#Append profile and sample_id for this sample for the next
#iteration
profiles.append(ind_profile)
samples.append(sample_id)
return distance_dict | python | def profile_stats(adapter, threshold = 0.9):
"""
Compares the pairwise hamming distances for all the sample profiles in
the database. Returns a table of the number of distances within given
ranges.
Args:
adapter (MongoAdapter): Adapter to mongodb
threshold (float): If any distance is found above this threshold
a warning will be given, stating the two matching samples.
Returns:
distance_dict (dict): dictionary with ranges as keys, and the number
of distances that are within these ranges as values.
"""
profiles = []
samples = []
#Instatiate the distance dictionary with a count 0 for all the ranges
distance_dict = {key: 0 for key in HAMMING_RANGES.keys()}
for case in adapter.cases():
for individual in case['individuals']:
if individual.get('profile'):
#Make sample name <case_id>.<sample_id>
sample_id = f"{case['case_id']}.{individual['ind_id']}"
ind_profile = individual['profile']
#Numpy array to hold all the distances for this samples profile
distance_array = np.array([], dtype=np.float)
for sample, profile in zip(samples, profiles):
#Get distance and append to distance array
distance = compare_profiles(ind_profile, profile)
distance_array = np.append(distance_array, distance)
#Issue warning if above threshold
if distance >= threshold:
LOG.warning(f"{sample_id} is {distance} similar to {sample}")
#Check number of distances in each range and add to distance_dict
for key,range in HAMMING_RANGES.items():
#Calculate the number of hamming distances found within the
#range for current individual
distance_dict[key] += np.sum(
(distance_array >= range[0]) & (distance_array < range[1])
)
#Append profile and sample_id for this sample for the next
#iteration
profiles.append(ind_profile)
samples.append(sample_id)
return distance_dict | [
"def",
"profile_stats",
"(",
"adapter",
",",
"threshold",
"=",
"0.9",
")",
":",
"profiles",
"=",
"[",
"]",
"samples",
"=",
"[",
"]",
"#Instatiate the distance dictionary with a count 0 for all the ranges",
"distance_dict",
"=",
"{",
"key",
":",
"0",
"for",
"key",
... | Compares the pairwise hamming distances for all the sample profiles in
the database. Returns a table of the number of distances within given
ranges.
Args:
adapter (MongoAdapter): Adapter to mongodb
threshold (float): If any distance is found above this threshold
a warning will be given, stating the two matching samples.
Returns:
distance_dict (dict): dictionary with ranges as keys, and the number
of distances that are within these ranges as values. | [
"Compares",
"the",
"pairwise",
"hamming",
"distances",
"for",
"all",
"the",
"sample",
"profiles",
"in",
"the",
"database",
".",
"Returns",
"a",
"table",
"of",
"the",
"number",
"of",
"distances",
"within",
"given",
"ranges",
"."
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L189-L248 |
yjzhang/uncurl_python | uncurl/evaluation.py | purity | def purity(labels, true_labels):
"""
Calculates the purity score for the given labels.
Args:
labels (array): 1D array of integers
true_labels (array): 1D array of integers - true labels
Returns:
purity score - a float bewteen 0 and 1. Closer to 1 is better.
"""
purity = 0.0
for i in set(labels):
indices = (labels==i)
true_clusters = true_labels[indices]
if len(true_clusters)==0:
continue
counts = Counter(true_clusters)
lab, count = counts.most_common()[0]
purity += count
return float(purity)/len(labels) | python | def purity(labels, true_labels):
"""
Calculates the purity score for the given labels.
Args:
labels (array): 1D array of integers
true_labels (array): 1D array of integers - true labels
Returns:
purity score - a float bewteen 0 and 1. Closer to 1 is better.
"""
purity = 0.0
for i in set(labels):
indices = (labels==i)
true_clusters = true_labels[indices]
if len(true_clusters)==0:
continue
counts = Counter(true_clusters)
lab, count = counts.most_common()[0]
purity += count
return float(purity)/len(labels) | [
"def",
"purity",
"(",
"labels",
",",
"true_labels",
")",
":",
"purity",
"=",
"0.0",
"for",
"i",
"in",
"set",
"(",
"labels",
")",
":",
"indices",
"=",
"(",
"labels",
"==",
"i",
")",
"true_clusters",
"=",
"true_labels",
"[",
"indices",
"]",
"if",
"len"... | Calculates the purity score for the given labels.
Args:
labels (array): 1D array of integers
true_labels (array): 1D array of integers - true labels
Returns:
purity score - a float bewteen 0 and 1. Closer to 1 is better. | [
"Calculates",
"the",
"purity",
"score",
"for",
"the",
"given",
"labels",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/evaluation.py#L6-L26 |
yjzhang/uncurl_python | uncurl/evaluation.py | nne | def nne(dim_red, true_labels):
"""
Calculates the nearest neighbor accuracy (basically leave-one-out cross
validation with a 1NN classifier).
Args:
dim_red (array): dimensions (k, cells)
true_labels (array): 1d array of integers
Returns:
Nearest neighbor accuracy - fraction of points for which the 1NN
1NN classifier returns the correct value.
"""
# use sklearn's BallTree
bt = BallTree(dim_red.T)
correct = 0
for i, l in enumerate(true_labels):
dist, ind = bt.query([dim_red[:,i]], k=2)
closest_cell = ind[0, 1]
if true_labels[closest_cell] == l:
correct += 1
return float(correct)/len(true_labels) | python | def nne(dim_red, true_labels):
"""
Calculates the nearest neighbor accuracy (basically leave-one-out cross
validation with a 1NN classifier).
Args:
dim_red (array): dimensions (k, cells)
true_labels (array): 1d array of integers
Returns:
Nearest neighbor accuracy - fraction of points for which the 1NN
1NN classifier returns the correct value.
"""
# use sklearn's BallTree
bt = BallTree(dim_red.T)
correct = 0
for i, l in enumerate(true_labels):
dist, ind = bt.query([dim_red[:,i]], k=2)
closest_cell = ind[0, 1]
if true_labels[closest_cell] == l:
correct += 1
return float(correct)/len(true_labels) | [
"def",
"nne",
"(",
"dim_red",
",",
"true_labels",
")",
":",
"# use sklearn's BallTree",
"bt",
"=",
"BallTree",
"(",
"dim_red",
".",
"T",
")",
"correct",
"=",
"0",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"true_labels",
")",
":",
"dist",
",",
"ind"... | Calculates the nearest neighbor accuracy (basically leave-one-out cross
validation with a 1NN classifier).
Args:
dim_red (array): dimensions (k, cells)
true_labels (array): 1d array of integers
Returns:
Nearest neighbor accuracy - fraction of points for which the 1NN
1NN classifier returns the correct value. | [
"Calculates",
"the",
"nearest",
"neighbor",
"accuracy",
"(",
"basically",
"leave",
"-",
"one",
"-",
"out",
"cross",
"validation",
"with",
"a",
"1NN",
"classifier",
")",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/evaluation.py#L28-L49 |
yjzhang/uncurl_python | uncurl/evaluation.py | mdl | def mdl(ll, k, data):
"""
Returns the minimum description length score of the model given its
log-likelihood and k, the number of cell types.
a lower cost is better...
"""
"""
N - no. of genes
n - no. of cells
k - no. of cell types
R - sum(Dataset) i.e. total no. of reads
function TotCost = TotBits(N,m,p,R,C)
# C is the cost from the cost function
TotCost = C + (N*m + m*p)*(log(R/(N*p)));
"""
N, m = data.shape
cost = ll + (N*m + m*k)*(np.log(data.sum()/(N*k)))
return cost | python | def mdl(ll, k, data):
"""
Returns the minimum description length score of the model given its
log-likelihood and k, the number of cell types.
a lower cost is better...
"""
"""
N - no. of genes
n - no. of cells
k - no. of cell types
R - sum(Dataset) i.e. total no. of reads
function TotCost = TotBits(N,m,p,R,C)
# C is the cost from the cost function
TotCost = C + (N*m + m*p)*(log(R/(N*p)));
"""
N, m = data.shape
cost = ll + (N*m + m*k)*(np.log(data.sum()/(N*k)))
return cost | [
"def",
"mdl",
"(",
"ll",
",",
"k",
",",
"data",
")",
":",
"\"\"\"\n N - no. of genes\n n - no. of cells \n k - no. of cell types\n R - sum(Dataset) i.e. total no. of reads\n\n function TotCost = TotBits(N,m,p,R,C)\n # C is the cost from the cost function\n TotCost = ... | Returns the minimum description length score of the model given its
log-likelihood and k, the number of cell types.
a lower cost is better... | [
"Returns",
"the",
"minimum",
"description",
"length",
"score",
"of",
"the",
"model",
"given",
"its",
"log",
"-",
"likelihood",
"and",
"k",
"the",
"number",
"of",
"cell",
"types",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/evaluation.py#L51-L71 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | find_nb_genes | def find_nb_genes(data):
"""
Finds the indices of all genes in the dataset that have
a mean < 0.9 variance. Returns an array of booleans.
"""
data_means = data.mean(1)
data_vars = data.var(1)
nb_indices = data_means < 0.9*data_vars
return nb_indices | python | def find_nb_genes(data):
"""
Finds the indices of all genes in the dataset that have
a mean < 0.9 variance. Returns an array of booleans.
"""
data_means = data.mean(1)
data_vars = data.var(1)
nb_indices = data_means < 0.9*data_vars
return nb_indices | [
"def",
"find_nb_genes",
"(",
"data",
")",
":",
"data_means",
"=",
"data",
".",
"mean",
"(",
"1",
")",
"data_vars",
"=",
"data",
".",
"var",
"(",
"1",
")",
"nb_indices",
"=",
"data_means",
"<",
"0.9",
"*",
"data_vars",
"return",
"nb_indices"
] | Finds the indices of all genes in the dataset that have
a mean < 0.9 variance. Returns an array of booleans. | [
"Finds",
"the",
"indices",
"of",
"all",
"genes",
"in",
"the",
"dataset",
"that",
"have",
"a",
"mean",
"<",
"0",
".",
"9",
"variance",
".",
"Returns",
"an",
"array",
"of",
"booleans",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L12-L20 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | log_ncr | def log_ncr(a, b):
"""
Returns log(nCr(a,b)), given that b<a. Does not assume that a and b
are integers (uses log-gamma).
"""
val = gammaln(a+1) - gammaln(a-b+1) - gammaln(b+1)
return val | python | def log_ncr(a, b):
"""
Returns log(nCr(a,b)), given that b<a. Does not assume that a and b
are integers (uses log-gamma).
"""
val = gammaln(a+1) - gammaln(a-b+1) - gammaln(b+1)
return val | [
"def",
"log_ncr",
"(",
"a",
",",
"b",
")",
":",
"val",
"=",
"gammaln",
"(",
"a",
"+",
"1",
")",
"-",
"gammaln",
"(",
"a",
"-",
"b",
"+",
"1",
")",
"-",
"gammaln",
"(",
"b",
"+",
"1",
")",
"return",
"val"
] | Returns log(nCr(a,b)), given that b<a. Does not assume that a and b
are integers (uses log-gamma). | [
"Returns",
"log",
"(",
"nCr",
"(",
"a",
"b",
"))",
"given",
"that",
"b<a",
".",
"Does",
"not",
"assume",
"that",
"a",
"and",
"b",
"are",
"integers",
"(",
"uses",
"log",
"-",
"gamma",
")",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L22-L28 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | nb_ll | def nb_ll(data, P, R):
"""
Returns the negative binomial log-likelihood of the data.
Args:
data (array): genes x cells
P (array): NB success probability param - genes x clusters
R (array): NB stopping param - genes x clusters
Returns:
cells x clusters array of log-likelihoods
"""
# TODO: include factorial...
#data = data + eps
genes, cells = data.shape
clusters = P.shape[1]
lls = np.zeros((cells, clusters))
for c in range(clusters):
P_c = P[:,c].reshape((genes, 1))
R_c = R[:,c].reshape((genes, 1))
# don't need constant factors...
ll = gammaln(R_c + data) - gammaln(R_c) #- gammaln(data + 1)
ll += data*np.log(P_c) + xlog1py(R_c, -P_c)
#new_ll = np.sum(nbinom.logpmf(data, R_c, P_c), 0)
lls[:,c] = ll.sum(0)
return lls | python | def nb_ll(data, P, R):
"""
Returns the negative binomial log-likelihood of the data.
Args:
data (array): genes x cells
P (array): NB success probability param - genes x clusters
R (array): NB stopping param - genes x clusters
Returns:
cells x clusters array of log-likelihoods
"""
# TODO: include factorial...
#data = data + eps
genes, cells = data.shape
clusters = P.shape[1]
lls = np.zeros((cells, clusters))
for c in range(clusters):
P_c = P[:,c].reshape((genes, 1))
R_c = R[:,c].reshape((genes, 1))
# don't need constant factors...
ll = gammaln(R_c + data) - gammaln(R_c) #- gammaln(data + 1)
ll += data*np.log(P_c) + xlog1py(R_c, -P_c)
#new_ll = np.sum(nbinom.logpmf(data, R_c, P_c), 0)
lls[:,c] = ll.sum(0)
return lls | [
"def",
"nb_ll",
"(",
"data",
",",
"P",
",",
"R",
")",
":",
"# TODO: include factorial...",
"#data = data + eps",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"clusters",
"=",
"P",
".",
"shape",
"[",
"1",
"]",
"lls",
"=",
"np",
".",
"zeros",
"(",
... | Returns the negative binomial log-likelihood of the data.
Args:
data (array): genes x cells
P (array): NB success probability param - genes x clusters
R (array): NB stopping param - genes x clusters
Returns:
cells x clusters array of log-likelihoods | [
"Returns",
"the",
"negative",
"binomial",
"log",
"-",
"likelihood",
"of",
"the",
"data",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L36-L61 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | zinb_ll | def zinb_ll(data, P, R, Z):
"""
Returns the zero-inflated negative binomial log-likelihood of the data.
"""
lls = nb_ll(data, P, R)
clusters = P.shape[1]
for c in range(clusters):
pass
return lls | python | def zinb_ll(data, P, R, Z):
"""
Returns the zero-inflated negative binomial log-likelihood of the data.
"""
lls = nb_ll(data, P, R)
clusters = P.shape[1]
for c in range(clusters):
pass
return lls | [
"def",
"zinb_ll",
"(",
"data",
",",
"P",
",",
"R",
",",
"Z",
")",
":",
"lls",
"=",
"nb_ll",
"(",
"data",
",",
"P",
",",
"R",
")",
"clusters",
"=",
"P",
".",
"shape",
"[",
"1",
"]",
"for",
"c",
"in",
"range",
"(",
"clusters",
")",
":",
"pass... | Returns the zero-inflated negative binomial log-likelihood of the data. | [
"Returns",
"the",
"zero",
"-",
"inflated",
"negative",
"binomial",
"log",
"-",
"likelihood",
"of",
"the",
"data",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L63-L71 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | nb_ll_row | def nb_ll_row(params, data_row):
"""
returns the negative LL of a single row.
Args:
params (array) - [p, r]
data_row (array) - 1d array of data
Returns:
LL of row
"""
p = params[0]
r = params[1]
n = len(data_row)
ll = np.sum(gammaln(data_row + r)) - np.sum(gammaln(data_row + 1))
ll -= n*gammaln(r)
ll += np.sum(data_row)*np.log(p)
ll += n*r*np.log(1-p)
return -ll | python | def nb_ll_row(params, data_row):
"""
returns the negative LL of a single row.
Args:
params (array) - [p, r]
data_row (array) - 1d array of data
Returns:
LL of row
"""
p = params[0]
r = params[1]
n = len(data_row)
ll = np.sum(gammaln(data_row + r)) - np.sum(gammaln(data_row + 1))
ll -= n*gammaln(r)
ll += np.sum(data_row)*np.log(p)
ll += n*r*np.log(1-p)
return -ll | [
"def",
"nb_ll_row",
"(",
"params",
",",
"data_row",
")",
":",
"p",
"=",
"params",
"[",
"0",
"]",
"r",
"=",
"params",
"[",
"1",
"]",
"n",
"=",
"len",
"(",
"data_row",
")",
"ll",
"=",
"np",
".",
"sum",
"(",
"gammaln",
"(",
"data_row",
"+",
"r",
... | returns the negative LL of a single row.
Args:
params (array) - [p, r]
data_row (array) - 1d array of data
Returns:
LL of row | [
"returns",
"the",
"negative",
"LL",
"of",
"a",
"single",
"row",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L73-L91 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | nb_r_deriv | def nb_r_deriv(r, data_row):
"""
Derivative of log-likelihood wrt r (formula from wikipedia)
Args:
r (float): the R paramemter in the NB distribution
data_row (array): 1d array of length cells
"""
n = len(data_row)
d = sum(digamma(data_row + r)) - n*digamma(r) + n*np.log(r/(r+np.mean(data_row)))
return d | python | def nb_r_deriv(r, data_row):
"""
Derivative of log-likelihood wrt r (formula from wikipedia)
Args:
r (float): the R paramemter in the NB distribution
data_row (array): 1d array of length cells
"""
n = len(data_row)
d = sum(digamma(data_row + r)) - n*digamma(r) + n*np.log(r/(r+np.mean(data_row)))
return d | [
"def",
"nb_r_deriv",
"(",
"r",
",",
"data_row",
")",
":",
"n",
"=",
"len",
"(",
"data_row",
")",
"d",
"=",
"sum",
"(",
"digamma",
"(",
"data_row",
"+",
"r",
")",
")",
"-",
"n",
"*",
"digamma",
"(",
"r",
")",
"+",
"n",
"*",
"np",
".",
"log",
... | Derivative of log-likelihood wrt r (formula from wikipedia)
Args:
r (float): the R paramemter in the NB distribution
data_row (array): 1d array of length cells | [
"Derivative",
"of",
"log",
"-",
"likelihood",
"wrt",
"r",
"(",
"formula",
"from",
"wikipedia",
")"
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L93-L103 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | nb_fit | def nb_fit(data, P_init=None, R_init=None, epsilon=1e-8, max_iters=100):
"""
Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data
"""
means = data.mean(1)
variances = data.var(1)
if (means > variances).any():
raise ValueError("For NB fit, means must be less than variances")
genes, cells = data.shape
# method of moments
P = 1.0 - means/variances
R = means*(1-P)/P
for i in range(genes):
result = minimize(nb_ll_row, [P[i], R[i]], args=(data[i,:],),
bounds = [(0, 1), (eps, None)])
params = result.x
P[i] = params[0]
R[i] = params[1]
#R[i] = fsolve(nb_r_deriv, R[i], args = (data[i,:],))
#P[i] = data[i,:].mean()/(data[i,:].mean() + R[i])
return P,R | python | def nb_fit(data, P_init=None, R_init=None, epsilon=1e-8, max_iters=100):
"""
Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data
"""
means = data.mean(1)
variances = data.var(1)
if (means > variances).any():
raise ValueError("For NB fit, means must be less than variances")
genes, cells = data.shape
# method of moments
P = 1.0 - means/variances
R = means*(1-P)/P
for i in range(genes):
result = minimize(nb_ll_row, [P[i], R[i]], args=(data[i,:],),
bounds = [(0, 1), (eps, None)])
params = result.x
P[i] = params[0]
R[i] = params[1]
#R[i] = fsolve(nb_r_deriv, R[i], args = (data[i,:],))
#P[i] = data[i,:].mean()/(data[i,:].mean() + R[i])
return P,R | [
"def",
"nb_fit",
"(",
"data",
",",
"P_init",
"=",
"None",
",",
"R_init",
"=",
"None",
",",
"epsilon",
"=",
"1e-8",
",",
"max_iters",
"=",
"100",
")",
":",
"means",
"=",
"data",
".",
"mean",
"(",
"1",
")",
"variances",
"=",
"data",
".",
"var",
"("... | Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data | [
"Fits",
"the",
"NB",
"distribution",
"to",
"data",
"using",
"method",
"of",
"moments",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L105-L133 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | nb_cluster | def nb_cluster(data, k, P_init=None, R_init=None, assignments=None, means=None, max_iters=10):
"""
Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.
Args:
data (array): genes x cells
k (int): number of clusters
P_init (array): NB success prob param - genes x k. Default: random
R_init (array): NB stopping param - genes x k. Default: random
assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)
means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None
max_iters (int): default: 100
Returns:
assignments (array): 1d array of length cells, containing integers 0...k-1
P (array): genes x k - value is 0 for genes with mean > var
R (array): genes x k - value is inf for genes with mean > var
"""
genes, cells = data.shape
if P_init is None:
P_init = np.random.random((genes, k))
if R_init is None:
R_init = np.random.randint(1, data.max(), (genes, k))
R_init = R_init.astype(float)
if assignments is None:
_, assignments = kmeans_pp(data, k, means)
means = np.zeros((genes, k))
#assignments = np.array([np.random.randint(0,k) for i in range(cells)])
old_assignments = np.copy(assignments)
# If mean > variance, then fall back to Poisson, since NB
# distribution can't handle that case.
for i in range(max_iters):
# estimate params from assigned cells
nb_gene_indices = fit_cluster(data, assignments, k, P_init, R_init, means)
# re-calculate assignments
lls = nb_ll(data[nb_gene_indices, :], P_init[nb_gene_indices,:], R_init[nb_gene_indices,:])
lls += pois_ll.poisson_ll(data[~nb_gene_indices,:], means[~nb_gene_indices,:])
# set NB params to failure values
P_init[~nb_gene_indices,:] = 0
R_init[~nb_gene_indices,:] = np.inf
for c in range(cells):
assignments[c] = np.argmax(lls[c,:])
if np.equal(assignments,old_assignments).all():
break
old_assignments = np.copy(assignments)
return assignments, P_init, R_init | python | def nb_cluster(data, k, P_init=None, R_init=None, assignments=None, means=None, max_iters=10):
"""
Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.
Args:
data (array): genes x cells
k (int): number of clusters
P_init (array): NB success prob param - genes x k. Default: random
R_init (array): NB stopping param - genes x k. Default: random
assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)
means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None
max_iters (int): default: 100
Returns:
assignments (array): 1d array of length cells, containing integers 0...k-1
P (array): genes x k - value is 0 for genes with mean > var
R (array): genes x k - value is inf for genes with mean > var
"""
genes, cells = data.shape
if P_init is None:
P_init = np.random.random((genes, k))
if R_init is None:
R_init = np.random.randint(1, data.max(), (genes, k))
R_init = R_init.astype(float)
if assignments is None:
_, assignments = kmeans_pp(data, k, means)
means = np.zeros((genes, k))
#assignments = np.array([np.random.randint(0,k) for i in range(cells)])
old_assignments = np.copy(assignments)
# If mean > variance, then fall back to Poisson, since NB
# distribution can't handle that case.
for i in range(max_iters):
# estimate params from assigned cells
nb_gene_indices = fit_cluster(data, assignments, k, P_init, R_init, means)
# re-calculate assignments
lls = nb_ll(data[nb_gene_indices, :], P_init[nb_gene_indices,:], R_init[nb_gene_indices,:])
lls += pois_ll.poisson_ll(data[~nb_gene_indices,:], means[~nb_gene_indices,:])
# set NB params to failure values
P_init[~nb_gene_indices,:] = 0
R_init[~nb_gene_indices,:] = np.inf
for c in range(cells):
assignments[c] = np.argmax(lls[c,:])
if np.equal(assignments,old_assignments).all():
break
old_assignments = np.copy(assignments)
return assignments, P_init, R_init | [
"def",
"nb_cluster",
"(",
"data",
",",
"k",
",",
"P_init",
"=",
"None",
",",
"R_init",
"=",
"None",
",",
"assignments",
"=",
"None",
",",
"means",
"=",
"None",
",",
"max_iters",
"=",
"10",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",... | Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.
Args:
data (array): genes x cells
k (int): number of clusters
P_init (array): NB success prob param - genes x k. Default: random
R_init (array): NB stopping param - genes x k. Default: random
assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)
means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None
max_iters (int): default: 100
Returns:
assignments (array): 1d array of length cells, containing integers 0...k-1
P (array): genes x k - value is 0 for genes with mean > var
R (array): genes x k - value is inf for genes with mean > var | [
"Performs",
"negative",
"binomial",
"clustering",
"on",
"the",
"given",
"data",
".",
"If",
"some",
"genes",
"have",
"mean",
">",
"variance",
"then",
"these",
"genes",
"are",
"fitted",
"to",
"a",
"Poisson",
"distribution",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L141-L186 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | fit_cluster | def fit_cluster(data, assignments, k, P_init, R_init, means):
"""
Fits NB/poisson params to a cluster.
"""
for c in range(k):
if data[:,assignments==c].shape[1] == 0:
_, assignments = kmeans_pp(data, k)
genes, cells = data.shape
nb_gene_indices = np.array([True for i in range(genes)])
for c in range(k):
c_data = data[:,assignments==c]
nb_gene_indices = nb_gene_indices & find_nb_genes(c_data)
for c in range(k):
c_data = data[:,assignments==c]
nb_genes = c_data[nb_gene_indices,:]
poisson_genes = c_data[~nb_gene_indices, :]
P_init[nb_gene_indices, c], R_init[nb_gene_indices, c] = nb_fit(nb_genes)
means[~nb_gene_indices, c] = poisson_genes.mean(1)
return nb_gene_indices | python | def fit_cluster(data, assignments, k, P_init, R_init, means):
"""
Fits NB/poisson params to a cluster.
"""
for c in range(k):
if data[:,assignments==c].shape[1] == 0:
_, assignments = kmeans_pp(data, k)
genes, cells = data.shape
nb_gene_indices = np.array([True for i in range(genes)])
for c in range(k):
c_data = data[:,assignments==c]
nb_gene_indices = nb_gene_indices & find_nb_genes(c_data)
for c in range(k):
c_data = data[:,assignments==c]
nb_genes = c_data[nb_gene_indices,:]
poisson_genes = c_data[~nb_gene_indices, :]
P_init[nb_gene_indices, c], R_init[nb_gene_indices, c] = nb_fit(nb_genes)
means[~nb_gene_indices, c] = poisson_genes.mean(1)
return nb_gene_indices | [
"def",
"fit_cluster",
"(",
"data",
",",
"assignments",
",",
"k",
",",
"P_init",
",",
"R_init",
",",
"means",
")",
":",
"for",
"c",
"in",
"range",
"(",
"k",
")",
":",
"if",
"data",
"[",
":",
",",
"assignments",
"==",
"c",
"]",
".",
"shape",
"[",
... | Fits NB/poisson params to a cluster. | [
"Fits",
"NB",
"/",
"poisson",
"params",
"to",
"a",
"cluster",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L188-L206 |
yjzhang/uncurl_python | uncurl/zip_utils.py | zip_ll | def zip_ll(data, means, M):
"""
Calculates the zero-inflated Poisson log-likelihood.
Args:
data (array): genes x cells
means (array): genes x k
M (array): genes x k - this is the zero-inflation parameter.
Returns:
cells x k array of log-likelihood for each cell/cluster pair.
"""
genes, cells = data.shape
clusters = means.shape[1]
ll = np.zeros((cells, clusters))
d0 = (data==0)
d1 = (data>0)
for i in range(clusters):
means_i = np.tile(means[:,i], (cells, 1))
means_i = means_i.transpose()
L_i = np.tile(M[:,i], (cells, 1))
L_i = L_i.transpose()
ll_0 = np.log(L_i + (1 - L_i)*np.exp(-means_i))
ll_0 = np.where((L_i==0) & (means_i==0), -means_i, ll_0)
# not including constant factors
ll_1 = np.log(1 - L_i) + xlogy(data, means_i) - means_i
ll_0 = np.where(d0, ll_0, 0.0)
ll_1 = np.where(d1, ll_1, 0.0)
ll[:,i] = np.sum(ll_0 + ll_1, 0)
return ll | python | def zip_ll(data, means, M):
"""
Calculates the zero-inflated Poisson log-likelihood.
Args:
data (array): genes x cells
means (array): genes x k
M (array): genes x k - this is the zero-inflation parameter.
Returns:
cells x k array of log-likelihood for each cell/cluster pair.
"""
genes, cells = data.shape
clusters = means.shape[1]
ll = np.zeros((cells, clusters))
d0 = (data==0)
d1 = (data>0)
for i in range(clusters):
means_i = np.tile(means[:,i], (cells, 1))
means_i = means_i.transpose()
L_i = np.tile(M[:,i], (cells, 1))
L_i = L_i.transpose()
ll_0 = np.log(L_i + (1 - L_i)*np.exp(-means_i))
ll_0 = np.where((L_i==0) & (means_i==0), -means_i, ll_0)
# not including constant factors
ll_1 = np.log(1 - L_i) + xlogy(data, means_i) - means_i
ll_0 = np.where(d0, ll_0, 0.0)
ll_1 = np.where(d1, ll_1, 0.0)
ll[:,i] = np.sum(ll_0 + ll_1, 0)
return ll | [
"def",
"zip_ll",
"(",
"data",
",",
"means",
",",
"M",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"clusters",
"=",
"means",
".",
"shape",
"[",
"1",
"]",
"ll",
"=",
"np",
".",
"zeros",
"(",
"(",
"cells",
",",
"clusters",
")",
")"... | Calculates the zero-inflated Poisson log-likelihood.
Args:
data (array): genes x cells
means (array): genes x k
M (array): genes x k - this is the zero-inflation parameter.
Returns:
cells x k array of log-likelihood for each cell/cluster pair. | [
"Calculates",
"the",
"zero",
"-",
"inflated",
"Poisson",
"log",
"-",
"likelihood",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_utils.py#L9-L38 |
yjzhang/uncurl_python | uncurl/zip_utils.py | zip_ll_row | def zip_ll_row(params, data_row):
"""
Returns the negative log-likelihood of a row given ZIP data.
Args:
params (list): [lambda zero-inf]
data_row (array): 1d array
Returns:
negative log-likelihood
"""
l = params[0]
pi = params[1]
d0 = (data_row==0)
likelihood = d0*pi + (1-pi)*poisson.pmf(data_row, l)
return -np.log(likelihood+eps).sum() | python | def zip_ll_row(params, data_row):
"""
Returns the negative log-likelihood of a row given ZIP data.
Args:
params (list): [lambda zero-inf]
data_row (array): 1d array
Returns:
negative log-likelihood
"""
l = params[0]
pi = params[1]
d0 = (data_row==0)
likelihood = d0*pi + (1-pi)*poisson.pmf(data_row, l)
return -np.log(likelihood+eps).sum() | [
"def",
"zip_ll_row",
"(",
"params",
",",
"data_row",
")",
":",
"l",
"=",
"params",
"[",
"0",
"]",
"pi",
"=",
"params",
"[",
"1",
"]",
"d0",
"=",
"(",
"data_row",
"==",
"0",
")",
"likelihood",
"=",
"d0",
"*",
"pi",
"+",
"(",
"1",
"-",
"pi",
")... | Returns the negative log-likelihood of a row given ZIP data.
Args:
params (list): [lambda zero-inf]
data_row (array): 1d array
Returns:
negative log-likelihood | [
"Returns",
"the",
"negative",
"log",
"-",
"likelihood",
"of",
"a",
"row",
"given",
"ZIP",
"data",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_utils.py#L40-L55 |
moonso/loqusdb | loqusdb/utils/migrate.py | migrate_database | def migrate_database(adapter):
"""Migrate an old loqusdb instance to 1.0
Args:
adapter
Returns:
nr_updated(int): Number of variants that where updated
"""
all_variants = adapter.get_variants()
nr_variants = all_variants.count()
nr_updated = 0
with progressbar(all_variants, label="Updating variants", length=nr_variants) as bar:
for variant in bar:
# Do not update if the variants have the correct format
if 'chrom' in variant:
continue
nr_updated += 1
splitted_id = variant['_id'].split('_')
chrom = splitted_id[0]
start = int(splitted_id[1])
ref = splitted_id[2]
alt = splitted_id[3]
# Calculate end
end = start + (max(len(ref), len(alt)) - 1)
adapter.db.variant.find_one_and_update(
{'_id': variant['_id']},
{
'$set': {
'chrom': chrom,
'start': start,
'end': end
}
}
)
return nr_updated | python | def migrate_database(adapter):
"""Migrate an old loqusdb instance to 1.0
Args:
adapter
Returns:
nr_updated(int): Number of variants that where updated
"""
all_variants = adapter.get_variants()
nr_variants = all_variants.count()
nr_updated = 0
with progressbar(all_variants, label="Updating variants", length=nr_variants) as bar:
for variant in bar:
# Do not update if the variants have the correct format
if 'chrom' in variant:
continue
nr_updated += 1
splitted_id = variant['_id'].split('_')
chrom = splitted_id[0]
start = int(splitted_id[1])
ref = splitted_id[2]
alt = splitted_id[3]
# Calculate end
end = start + (max(len(ref), len(alt)) - 1)
adapter.db.variant.find_one_and_update(
{'_id': variant['_id']},
{
'$set': {
'chrom': chrom,
'start': start,
'end': end
}
}
)
return nr_updated | [
"def",
"migrate_database",
"(",
"adapter",
")",
":",
"all_variants",
"=",
"adapter",
".",
"get_variants",
"(",
")",
"nr_variants",
"=",
"all_variants",
".",
"count",
"(",
")",
"nr_updated",
"=",
"0",
"with",
"progressbar",
"(",
"all_variants",
",",
"label",
... | Migrate an old loqusdb instance to 1.0
Args:
adapter
Returns:
nr_updated(int): Number of variants that where updated | [
"Migrate",
"an",
"old",
"loqusdb",
"instance",
"to",
"1",
".",
"0",
"Args",
":",
"adapter",
"Returns",
":",
"nr_updated",
"(",
"int",
")",
":",
"Number",
"of",
"variants",
"that",
"where",
"updated"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/migrate.py#L7-L47 |
fbergmann/libSEDML | examples/python/create_sedml2.py | main | def main (args):
"""Usage: create_sedml2 output-filename
"""
if (len(args) != 2):
print(main.__doc__)
sys.exit(1);
# create the document
doc = libsedml.SedDocument();
doc.setLevel(1);
doc.setVersion(3);
# create a data description
ddesc = doc.createDataDescription()
ddesc.setId('data1')
ddesc.setName('Oscli Timecourse data')
ddesc.setSource('foo.numl')
# create data source
dsource = ddesc.createDataSource()
dsource.setId('dataS1')
# create slice
slice = dsource.createSlice()
slice.setReference('SpeciesIds')
slice.setValue('S1')
# specify mapping
timeDesc = libsedml.CompositeDescription()
timeDesc.setIndexType('double')
timeDesc.setId('time')
timeDesc.setName('time')
speciesDesc = timeDesc.createCompositeDescription()
speciesDesc.setIndexType('string')
speciesDesc.setId('SpeciesIds')
speciesDesc.setName('SpeciesIds')
concentrationDesc = speciesDesc.createAtomicDescription()
concentrationDesc.setValueType("double")
concentrationDesc.setName("Concentrations")
dimDesc = ddesc.createDimensionDescription()
dimDesc.append(timeDesc)
# write the document
libsedml.writeSedML(doc, args[1]); | python | def main (args):
"""Usage: create_sedml2 output-filename
"""
if (len(args) != 2):
print(main.__doc__)
sys.exit(1);
# create the document
doc = libsedml.SedDocument();
doc.setLevel(1);
doc.setVersion(3);
# create a data description
ddesc = doc.createDataDescription()
ddesc.setId('data1')
ddesc.setName('Oscli Timecourse data')
ddesc.setSource('foo.numl')
# create data source
dsource = ddesc.createDataSource()
dsource.setId('dataS1')
# create slice
slice = dsource.createSlice()
slice.setReference('SpeciesIds')
slice.setValue('S1')
# specify mapping
timeDesc = libsedml.CompositeDescription()
timeDesc.setIndexType('double')
timeDesc.setId('time')
timeDesc.setName('time')
speciesDesc = timeDesc.createCompositeDescription()
speciesDesc.setIndexType('string')
speciesDesc.setId('SpeciesIds')
speciesDesc.setName('SpeciesIds')
concentrationDesc = speciesDesc.createAtomicDescription()
concentrationDesc.setValueType("double")
concentrationDesc.setName("Concentrations")
dimDesc = ddesc.createDimensionDescription()
dimDesc.append(timeDesc)
# write the document
libsedml.writeSedML(doc, args[1]); | [
"def",
"main",
"(",
"args",
")",
":",
"if",
"(",
"len",
"(",
"args",
")",
"!=",
"2",
")",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# create the document",
"doc",
"=",
"libsedml",
".",
"SedDocument",
"(",
... | Usage: create_sedml2 output-filename | [
"Usage",
":",
"create_sedml2",
"output",
"-",
"filename"
] | train | https://github.com/fbergmann/libSEDML/blob/2611274d993cb92c663f8f0296896a6e441f75fd/examples/python/create_sedml2.py#L41-L88 |
yjzhang/uncurl_python | uncurl/gap_score.py | preproc_data | def preproc_data(data, gene_subset=False, **kwargs):
"""
basic data preprocessing before running gap score
Assumes that data is a matrix of shape (genes, cells).
Returns a matrix of shape (cells, 8), using the first 8 SVD
components. Why 8? It's an arbitrary selection...
"""
import uncurl
from uncurl.preprocessing import log1p, cell_normalize
from sklearn.decomposition import TruncatedSVD
data_subset = data
if gene_subset:
gene_subset = uncurl.max_variance_genes(data)
data_subset = data[gene_subset, :]
tsvd = TruncatedSVD(min(8, data_subset.shape[0] - 1))
data_tsvd = tsvd.fit_transform(log1p(cell_normalize(data_subset)).T)
return data_tsvd | python | def preproc_data(data, gene_subset=False, **kwargs):
"""
basic data preprocessing before running gap score
Assumes that data is a matrix of shape (genes, cells).
Returns a matrix of shape (cells, 8), using the first 8 SVD
components. Why 8? It's an arbitrary selection...
"""
import uncurl
from uncurl.preprocessing import log1p, cell_normalize
from sklearn.decomposition import TruncatedSVD
data_subset = data
if gene_subset:
gene_subset = uncurl.max_variance_genes(data)
data_subset = data[gene_subset, :]
tsvd = TruncatedSVD(min(8, data_subset.shape[0] - 1))
data_tsvd = tsvd.fit_transform(log1p(cell_normalize(data_subset)).T)
return data_tsvd | [
"def",
"preproc_data",
"(",
"data",
",",
"gene_subset",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"uncurl",
"from",
"uncurl",
".",
"preprocessing",
"import",
"log1p",
",",
"cell_normalize",
"from",
"sklearn",
".",
"decomposition",
"import",
... | basic data preprocessing before running gap score
Assumes that data is a matrix of shape (genes, cells).
Returns a matrix of shape (cells, 8), using the first 8 SVD
components. Why 8? It's an arbitrary selection... | [
"basic",
"data",
"preprocessing",
"before",
"running",
"gap",
"score"
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/gap_score.py#L7-L25 |
yjzhang/uncurl_python | uncurl/gap_score.py | calculate_bounding_box | def calculate_bounding_box(data):
"""
Returns a 2 x m array indicating the min and max along each
dimension.
"""
mins = data.min(0)
maxes = data.max(0)
return mins, maxes | python | def calculate_bounding_box(data):
"""
Returns a 2 x m array indicating the min and max along each
dimension.
"""
mins = data.min(0)
maxes = data.max(0)
return mins, maxes | [
"def",
"calculate_bounding_box",
"(",
"data",
")",
":",
"mins",
"=",
"data",
".",
"min",
"(",
"0",
")",
"maxes",
"=",
"data",
".",
"max",
"(",
"0",
")",
"return",
"mins",
",",
"maxes"
] | Returns a 2 x m array indicating the min and max along each
dimension. | [
"Returns",
"a",
"2",
"x",
"m",
"array",
"indicating",
"the",
"min",
"and",
"max",
"along",
"each",
"dimension",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/gap_score.py#L27-L34 |
yjzhang/uncurl_python | uncurl/gap_score.py | calculate_gap | def calculate_gap(data, clustering, km, B=50, **kwargs):
"""
See: https://datasciencelab.wordpress.com/2013/12/27/finding-the-k-in-k-means-clustering/
https://web.stanford.edu/~hastie/Papers/gap.pdf
Returns two results: the gap score, and s_k.
"""
k = len(set(clustering))
Wk = km.inertia_
mins, maxes = calculate_bounding_box(data)
Wk_est = []
for i in range(B):
data_sample = (maxes-mins)*np.random.random(data.shape) + mins
km_b = KMeans(k)
km_b.fit_predict(data_sample)
Wk_est.append(km_b.inertia_)
Wk_est = np.log(np.array(Wk_est))
Wk_mean = np.mean(Wk_est)
Wk_std = np.std(Wk_est)
gap = Wk_mean - np.log(Wk)
sk = np.sqrt(1 + 1.0/B)*Wk_std
return gap, sk | python | def calculate_gap(data, clustering, km, B=50, **kwargs):
"""
See: https://datasciencelab.wordpress.com/2013/12/27/finding-the-k-in-k-means-clustering/
https://web.stanford.edu/~hastie/Papers/gap.pdf
Returns two results: the gap score, and s_k.
"""
k = len(set(clustering))
Wk = km.inertia_
mins, maxes = calculate_bounding_box(data)
Wk_est = []
for i in range(B):
data_sample = (maxes-mins)*np.random.random(data.shape) + mins
km_b = KMeans(k)
km_b.fit_predict(data_sample)
Wk_est.append(km_b.inertia_)
Wk_est = np.log(np.array(Wk_est))
Wk_mean = np.mean(Wk_est)
Wk_std = np.std(Wk_est)
gap = Wk_mean - np.log(Wk)
sk = np.sqrt(1 + 1.0/B)*Wk_std
return gap, sk | [
"def",
"calculate_gap",
"(",
"data",
",",
"clustering",
",",
"km",
",",
"B",
"=",
"50",
",",
"*",
"*",
"kwargs",
")",
":",
"k",
"=",
"len",
"(",
"set",
"(",
"clustering",
")",
")",
"Wk",
"=",
"km",
".",
"inertia_",
"mins",
",",
"maxes",
"=",
"c... | See: https://datasciencelab.wordpress.com/2013/12/27/finding-the-k-in-k-means-clustering/
https://web.stanford.edu/~hastie/Papers/gap.pdf
Returns two results: the gap score, and s_k. | [
"See",
":",
"https",
":",
"//",
"datasciencelab",
".",
"wordpress",
".",
"com",
"/",
"2013",
"/",
"12",
"/",
"27",
"/",
"finding",
"-",
"the",
"-",
"k",
"-",
"in",
"-",
"k",
"-",
"means",
"-",
"clustering",
"/"
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/gap_score.py#L36-L58 |
yjzhang/uncurl_python | uncurl/gap_score.py | run_gap_k_selection | def run_gap_k_selection(data, k_min=1, k_max=50, B=5,
skip=5, **kwargs):
"""
Runs gap score for all k from k_min to k_max.
"""
if k_min == k_max:
return k_min
gap_vals = []
sk_vals = []
k_range = list(range(k_min, k_max, skip))
min_k = 0
min_i = 0
for i, k in enumerate(k_range):
km = KMeans(k)
clusters = km.fit_predict(data)
gap, sk = calculate_gap(data, clusters, km, B=B)
if len(gap_vals) > 1:
if gap_vals[-1] >= gap - (skip+1)*sk:
min_i = i
min_k = k_range[i-1]
break
#return k_range[-1], gap_vals, sk_vals
gap_vals.append(gap)
sk_vals.append(sk)
if min_k == 0:
min_k = k_max
if skip == 1:
return min_k, gap_vals, sk_vals
gap_vals = []
sk_vals = []
for k in range(min_k - skip, min_k + skip):
km = KMeans(k)
clusters = km.fit_predict(data)
gap, sk = calculate_gap(data, clusters, km, B=B)
if len(gap_vals) > 1:
if gap_vals[-1] >= gap - sk:
min_k = k-1
return min_k, gap_vals, sk_vals
gap_vals.append(gap)
sk_vals.append(sk)
return k, gap_vals, sk_vals | python | def run_gap_k_selection(data, k_min=1, k_max=50, B=5,
skip=5, **kwargs):
"""
Runs gap score for all k from k_min to k_max.
"""
if k_min == k_max:
return k_min
gap_vals = []
sk_vals = []
k_range = list(range(k_min, k_max, skip))
min_k = 0
min_i = 0
for i, k in enumerate(k_range):
km = KMeans(k)
clusters = km.fit_predict(data)
gap, sk = calculate_gap(data, clusters, km, B=B)
if len(gap_vals) > 1:
if gap_vals[-1] >= gap - (skip+1)*sk:
min_i = i
min_k = k_range[i-1]
break
#return k_range[-1], gap_vals, sk_vals
gap_vals.append(gap)
sk_vals.append(sk)
if min_k == 0:
min_k = k_max
if skip == 1:
return min_k, gap_vals, sk_vals
gap_vals = []
sk_vals = []
for k in range(min_k - skip, min_k + skip):
km = KMeans(k)
clusters = km.fit_predict(data)
gap, sk = calculate_gap(data, clusters, km, B=B)
if len(gap_vals) > 1:
if gap_vals[-1] >= gap - sk:
min_k = k-1
return min_k, gap_vals, sk_vals
gap_vals.append(gap)
sk_vals.append(sk)
return k, gap_vals, sk_vals | [
"def",
"run_gap_k_selection",
"(",
"data",
",",
"k_min",
"=",
"1",
",",
"k_max",
"=",
"50",
",",
"B",
"=",
"5",
",",
"skip",
"=",
"5",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"k_min",
"==",
"k_max",
":",
"return",
"k_min",
"gap_vals",
"=",
"[",
... | Runs gap score for all k from k_min to k_max. | [
"Runs",
"gap",
"score",
"for",
"all",
"k",
"from",
"k_min",
"to",
"k_max",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/gap_score.py#L61-L101 |
bachya/py17track | py17track/client.py | Client._request | async def _request(
self,
method: str,
url: str,
*,
headers: dict = None,
params: dict = None,
json: dict = None) -> dict:
"""Make a request against the RainMachine device."""
if not headers:
headers = {}
try:
async with self._websession.request(method, url, headers=headers,
params=params,
json=json) as resp:
resp.raise_for_status()
data = await resp.json(content_type=None)
return data
except ClientError as err:
raise RequestError(
'Error requesting data from {}: {}'.format(url, err)) | python | async def _request(
self,
method: str,
url: str,
*,
headers: dict = None,
params: dict = None,
json: dict = None) -> dict:
"""Make a request against the RainMachine device."""
if not headers:
headers = {}
try:
async with self._websession.request(method, url, headers=headers,
params=params,
json=json) as resp:
resp.raise_for_status()
data = await resp.json(content_type=None)
return data
except ClientError as err:
raise RequestError(
'Error requesting data from {}: {}'.format(url, err)) | [
"async",
"def",
"_request",
"(",
"self",
",",
"method",
":",
"str",
",",
"url",
":",
"str",
",",
"*",
",",
"headers",
":",
"dict",
"=",
"None",
",",
"params",
":",
"dict",
"=",
"None",
",",
"json",
":",
"dict",
"=",
"None",
")",
"->",
"dict",
"... | Make a request against the RainMachine device. | [
"Make",
"a",
"request",
"against",
"the",
"RainMachine",
"device",
"."
] | train | https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/client.py#L22-L43 |
markperdue/pyvesync | src/pyvesync/vesync.py | VeSync.get_devices | def get_devices(self) -> list:
"""Return list of VeSync devices"""
if not self.enabled:
return None
self.in_process = True
response, _ = helpers.call_api(
'/cloud/v1/deviceManaged/devices',
'post',
headers=helpers.req_headers(self),
json=helpers.req_body(self, 'devicelist')
)
if response and helpers.check_response(response, 'get_devices'):
if 'result' in response and 'list' in response['result']:
device_list = response['result']['list']
outlets, switches, fans = self.process_devices(device_list)
else:
logger.error('Device list in response not found')
else:
logger.error('Error retrieving device list')
self.in_process = False
return (outlets, switches, fans) | python | def get_devices(self) -> list:
"""Return list of VeSync devices"""
if not self.enabled:
return None
self.in_process = True
response, _ = helpers.call_api(
'/cloud/v1/deviceManaged/devices',
'post',
headers=helpers.req_headers(self),
json=helpers.req_body(self, 'devicelist')
)
if response and helpers.check_response(response, 'get_devices'):
if 'result' in response and 'list' in response['result']:
device_list = response['result']['list']
outlets, switches, fans = self.process_devices(device_list)
else:
logger.error('Device list in response not found')
else:
logger.error('Error retrieving device list')
self.in_process = False
return (outlets, switches, fans) | [
"def",
"get_devices",
"(",
"self",
")",
"->",
"list",
":",
"if",
"not",
"self",
".",
"enabled",
":",
"return",
"None",
"self",
".",
"in_process",
"=",
"True",
"response",
",",
"_",
"=",
"helpers",
".",
"call_api",
"(",
"'/cloud/v1/deviceManaged/devices'",
... | Return list of VeSync devices | [
"Return",
"list",
"of",
"VeSync",
"devices"
] | train | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesync.py#L106-L132 |
markperdue/pyvesync | src/pyvesync/vesync.py | VeSync.login | def login(self) -> bool:
"""Return True if log in request succeeds"""
user_check = isinstance(self.username, str) and len(self.username) > 0
pass_check = isinstance(self.password, str) and len(self.password) > 0
if user_check and pass_check:
response, _ = helpers.call_api(
'/cloud/v1/user/login',
'post',
json=helpers.req_body(self, 'login')
)
if response and helpers.check_response(response, 'login'):
self.token = response['result']['token']
self.account_id = response['result']['accountID']
self.enabled = True
return True
else:
logger.error('Error logging in with username and password')
return False
else:
if user_check is False:
logger.error('Username invalid')
if pass_check is False:
logger.error('Password invalid')
return False | python | def login(self) -> bool:
"""Return True if log in request succeeds"""
user_check = isinstance(self.username, str) and len(self.username) > 0
pass_check = isinstance(self.password, str) and len(self.password) > 0
if user_check and pass_check:
response, _ = helpers.call_api(
'/cloud/v1/user/login',
'post',
json=helpers.req_body(self, 'login')
)
if response and helpers.check_response(response, 'login'):
self.token = response['result']['token']
self.account_id = response['result']['accountID']
self.enabled = True
return True
else:
logger.error('Error logging in with username and password')
return False
else:
if user_check is False:
logger.error('Username invalid')
if pass_check is False:
logger.error('Password invalid')
return False | [
"def",
"login",
"(",
"self",
")",
"->",
"bool",
":",
"user_check",
"=",
"isinstance",
"(",
"self",
".",
"username",
",",
"str",
")",
"and",
"len",
"(",
"self",
".",
"username",
")",
">",
"0",
"pass_check",
"=",
"isinstance",
"(",
"self",
".",
"passwo... | Return True if log in request succeeds | [
"Return",
"True",
"if",
"log",
"in",
"request",
"succeeds"
] | train | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesync.py#L134-L162 |
markperdue/pyvesync | src/pyvesync/vesync.py | VeSync.update | def update(self):
"""Fetch updated information about devices"""
if self.device_time_check():
if not self.in_process:
outlets, switches, fans = self.get_devices()
self.outlets = helpers.resolve_updates(self.outlets, outlets)
self.switches = helpers.resolve_updates(
self.switches, switches)
self.fans = helpers.resolve_updates(self.fans, fans)
self.last_update_ts = time.time() | python | def update(self):
"""Fetch updated information about devices"""
if self.device_time_check():
if not self.in_process:
outlets, switches, fans = self.get_devices()
self.outlets = helpers.resolve_updates(self.outlets, outlets)
self.switches = helpers.resolve_updates(
self.switches, switches)
self.fans = helpers.resolve_updates(self.fans, fans)
self.last_update_ts = time.time() | [
"def",
"update",
"(",
"self",
")",
":",
"if",
"self",
".",
"device_time_check",
"(",
")",
":",
"if",
"not",
"self",
".",
"in_process",
":",
"outlets",
",",
"switches",
",",
"fans",
"=",
"self",
".",
"get_devices",
"(",
")",
"self",
".",
"outlets",
"=... | Fetch updated information about devices | [
"Fetch",
"updated",
"information",
"about",
"devices"
] | train | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesync.py#L171-L184 |
markperdue/pyvesync | src/pyvesync/vesync.py | VeSync.update_energy | def update_energy(self, bypass_check=False):
"""Fetch updated energy information about devices"""
for outlet in self.outlets:
outlet.update_energy(bypass_check) | python | def update_energy(self, bypass_check=False):
"""Fetch updated energy information about devices"""
for outlet in self.outlets:
outlet.update_energy(bypass_check) | [
"def",
"update_energy",
"(",
"self",
",",
"bypass_check",
"=",
"False",
")",
":",
"for",
"outlet",
"in",
"self",
".",
"outlets",
":",
"outlet",
".",
"update_energy",
"(",
"bypass_check",
")"
] | Fetch updated energy information about devices | [
"Fetch",
"updated",
"energy",
"information",
"about",
"devices"
] | train | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesync.py#L186-L189 |
yjzhang/uncurl_python | uncurl/fit_dist_data.py | DistFitDataset | def DistFitDataset(Dat):
"""
Given a data matrix, this returns the per-gene fit error for the
Poisson, Normal, and Log-Normal distributions.
Args:
Dat (array): numpy array with shape (genes, cells)
Returns:
d (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution.
"""
#Assumes data to be in the form of a numpy matrix
(r,c) = Dat.shape
Poiss = np.zeros(r)
Norm = np.zeros(r)
LogNorm = np.zeros(r)
for i in range(r):
temp = GetDistFitError(Dat[i])
Poiss[i] = temp['poiss']
Norm[i] = temp['norm']
LogNorm[i] = temp['lognorm']
d = {}
d['poiss'] = Poiss
d['norm'] = Norm
d['lognorm'] = LogNorm
return d | python | def DistFitDataset(Dat):
"""
Given a data matrix, this returns the per-gene fit error for the
Poisson, Normal, and Log-Normal distributions.
Args:
Dat (array): numpy array with shape (genes, cells)
Returns:
d (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution.
"""
#Assumes data to be in the form of a numpy matrix
(r,c) = Dat.shape
Poiss = np.zeros(r)
Norm = np.zeros(r)
LogNorm = np.zeros(r)
for i in range(r):
temp = GetDistFitError(Dat[i])
Poiss[i] = temp['poiss']
Norm[i] = temp['norm']
LogNorm[i] = temp['lognorm']
d = {}
d['poiss'] = Poiss
d['norm'] = Norm
d['lognorm'] = LogNorm
return d | [
"def",
"DistFitDataset",
"(",
"Dat",
")",
":",
"#Assumes data to be in the form of a numpy matrix ",
"(",
"r",
",",
"c",
")",
"=",
"Dat",
".",
"shape",
"Poiss",
"=",
"np",
".",
"zeros",
"(",
"r",
")",
"Norm",
"=",
"np",
".",
"zeros",
"(",
"r",
")",
"Lo... | Given a data matrix, this returns the per-gene fit error for the
Poisson, Normal, and Log-Normal distributions.
Args:
Dat (array): numpy array with shape (genes, cells)
Returns:
d (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution. | [
"Given",
"a",
"data",
"matrix",
"this",
"returns",
"the",
"per",
"-",
"gene",
"fit",
"error",
"for",
"the",
"Poisson",
"Normal",
"and",
"Log",
"-",
"Normal",
"distributions",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/fit_dist_data.py#L55-L80 |
moonso/loqusdb | loqusdb/utils/delete.py | delete | def delete(adapter, case_obj, update=False, existing_case=False):
"""Delete a case and all of it's variants from the database.
Args:
adapter: Connection to database
case_obj(models.Case)
update(bool): If we are in the middle of an update
existing_case(models.Case): If something failed during an update we need to revert
to the original case
"""
# This will overwrite the updated case with the previous one
if update:
adapter.add_case(existing_case)
else:
adapter.delete_case(case_obj)
for file_type in ['vcf_path','vcf_sv_path']:
if not case_obj.get(file_type):
continue
variant_file = case_obj[file_type]
# Get a cyvcf2.VCF object
vcf_obj = get_vcf(variant_file)
delete_variants(
adapter=adapter,
vcf_obj=vcf_obj,
case_obj=case_obj,
) | python | def delete(adapter, case_obj, update=False, existing_case=False):
"""Delete a case and all of it's variants from the database.
Args:
adapter: Connection to database
case_obj(models.Case)
update(bool): If we are in the middle of an update
existing_case(models.Case): If something failed during an update we need to revert
to the original case
"""
# This will overwrite the updated case with the previous one
if update:
adapter.add_case(existing_case)
else:
adapter.delete_case(case_obj)
for file_type in ['vcf_path','vcf_sv_path']:
if not case_obj.get(file_type):
continue
variant_file = case_obj[file_type]
# Get a cyvcf2.VCF object
vcf_obj = get_vcf(variant_file)
delete_variants(
adapter=adapter,
vcf_obj=vcf_obj,
case_obj=case_obj,
) | [
"def",
"delete",
"(",
"adapter",
",",
"case_obj",
",",
"update",
"=",
"False",
",",
"existing_case",
"=",
"False",
")",
":",
"# This will overwrite the updated case with the previous one",
"if",
"update",
":",
"adapter",
".",
"add_case",
"(",
"existing_case",
")",
... | Delete a case and all of it's variants from the database.
Args:
adapter: Connection to database
case_obj(models.Case)
update(bool): If we are in the middle of an update
existing_case(models.Case): If something failed during an update we need to revert
to the original case | [
"Delete",
"a",
"case",
"and",
"all",
"of",
"it",
"s",
"variants",
"from",
"the",
"database",
".",
"Args",
":",
"adapter",
":",
"Connection",
"to",
"database",
"case_obj",
"(",
"models",
".",
"Case",
")",
"update",
"(",
"bool",
")",
":",
"If",
"we",
"... | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/delete.py#L12-L40 |
moonso/loqusdb | loqusdb/utils/delete.py | delete_variants | def delete_variants(adapter, vcf_obj, case_obj, case_id=None):
"""Delete variants for a case in the database
Args:
adapter(loqusdb.plugins.Adapter)
vcf_obj(iterable(dict))
ind_positions(dict)
case_id(str)
Returns:
nr_deleted (int): Number of deleted variants
"""
case_id = case_id or case_obj['case_id']
nr_deleted = 0
start_deleting = datetime.now()
chrom_time = datetime.now()
current_chrom = None
new_chrom = None
for variant in vcf_obj:
formated_variant = build_variant(
variant=variant,
case_obj=case_obj,
case_id=case_id,
)
if not formated_variant:
continue
new_chrom = formated_variant.get('chrom')
adapter.delete_variant(formated_variant)
nr_deleted += 1
if not current_chrom:
LOG.info("Start deleting chromosome {}".format(new_chrom))
current_chrom = new_chrom
chrom_time = datetime.now()
continue
if new_chrom != current_chrom:
LOG.info("Chromosome {0} done".format(current_chrom))
LOG.info("Time to delete chromosome {0}: {1}".format(
current_chrom, datetime.now()-chrom_time))
LOG.info("Start deleting chromosome {0}".format(new_chrom))
current_chrom = new_chrom
return nr_deleted | python | def delete_variants(adapter, vcf_obj, case_obj, case_id=None):
"""Delete variants for a case in the database
Args:
adapter(loqusdb.plugins.Adapter)
vcf_obj(iterable(dict))
ind_positions(dict)
case_id(str)
Returns:
nr_deleted (int): Number of deleted variants
"""
case_id = case_id or case_obj['case_id']
nr_deleted = 0
start_deleting = datetime.now()
chrom_time = datetime.now()
current_chrom = None
new_chrom = None
for variant in vcf_obj:
formated_variant = build_variant(
variant=variant,
case_obj=case_obj,
case_id=case_id,
)
if not formated_variant:
continue
new_chrom = formated_variant.get('chrom')
adapter.delete_variant(formated_variant)
nr_deleted += 1
if not current_chrom:
LOG.info("Start deleting chromosome {}".format(new_chrom))
current_chrom = new_chrom
chrom_time = datetime.now()
continue
if new_chrom != current_chrom:
LOG.info("Chromosome {0} done".format(current_chrom))
LOG.info("Time to delete chromosome {0}: {1}".format(
current_chrom, datetime.now()-chrom_time))
LOG.info("Start deleting chromosome {0}".format(new_chrom))
current_chrom = new_chrom
return nr_deleted | [
"def",
"delete_variants",
"(",
"adapter",
",",
"vcf_obj",
",",
"case_obj",
",",
"case_id",
"=",
"None",
")",
":",
"case_id",
"=",
"case_id",
"or",
"case_obj",
"[",
"'case_id'",
"]",
"nr_deleted",
"=",
"0",
"start_deleting",
"=",
"datetime",
".",
"now",
"("... | Delete variants for a case in the database
Args:
adapter(loqusdb.plugins.Adapter)
vcf_obj(iterable(dict))
ind_positions(dict)
case_id(str)
Returns:
nr_deleted (int): Number of deleted variants | [
"Delete",
"variants",
"for",
"a",
"case",
"in",
"the",
"database",
"Args",
":",
"adapter",
"(",
"loqusdb",
".",
"plugins",
".",
"Adapter",
")",
"vcf_obj",
"(",
"iterable",
"(",
"dict",
"))",
"ind_positions",
"(",
"dict",
")",
"case_id",
"(",
"str",
")",
... | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/delete.py#L42-L89 |
moonso/loqusdb | loqusdb/commands/annotate.py | annotate | def annotate(ctx, variant_file, sv):
"""Annotate the variants in a VCF
"""
adapter = ctx.obj['adapter']
variant_path = os.path.abspath(variant_file)
expected_type = 'snv'
if sv:
expected_type = 'sv'
if 'sv':
nr_cases = adapter.nr_cases(sv_cases=True)
else:
nr_cases = adapter.nr_cases(snv_cases=True)
LOG.info("Found {0} {1} cases in database".format(nr_cases, expected_type))
vcf_obj = get_file_handle(variant_path)
add_headers(vcf_obj, nr_cases=nr_cases, sv=sv)
# Print the headers
for header_line in vcf_obj.raw_header.split('\n'):
if len(header_line) == 0:
continue
click.echo(header_line)
start_inserting = datetime.now()
if sv:
annotated_variants = annotate_svs(adapter, vcf_obj)
else:
annotated_variants = annotate_snvs(adapter, vcf_obj)
# try:
for variant in annotated_variants:
click.echo(str(variant).rstrip()) | python | def annotate(ctx, variant_file, sv):
"""Annotate the variants in a VCF
"""
adapter = ctx.obj['adapter']
variant_path = os.path.abspath(variant_file)
expected_type = 'snv'
if sv:
expected_type = 'sv'
if 'sv':
nr_cases = adapter.nr_cases(sv_cases=True)
else:
nr_cases = adapter.nr_cases(snv_cases=True)
LOG.info("Found {0} {1} cases in database".format(nr_cases, expected_type))
vcf_obj = get_file_handle(variant_path)
add_headers(vcf_obj, nr_cases=nr_cases, sv=sv)
# Print the headers
for header_line in vcf_obj.raw_header.split('\n'):
if len(header_line) == 0:
continue
click.echo(header_line)
start_inserting = datetime.now()
if sv:
annotated_variants = annotate_svs(adapter, vcf_obj)
else:
annotated_variants = annotate_snvs(adapter, vcf_obj)
# try:
for variant in annotated_variants:
click.echo(str(variant).rstrip()) | [
"def",
"annotate",
"(",
"ctx",
",",
"variant_file",
",",
"sv",
")",
":",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"variant_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"variant_file",
")",
"expected_type",
"=",
"'snv'",
"if",
"s... | Annotate the variants in a VCF | [
"Annotate",
"the",
"variants",
"in",
"a",
"VCF"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/annotate.py#L25-L59 |
bachya/py17track | py17track/track.py | Track.find | async def find(self, *tracking_numbers: str) -> list:
"""Get tracking info for one or more tracking numbers."""
data = {'data': [{'num': num} for num in tracking_numbers]}
tracking_resp = await self._request('post', API_URL_TRACK, json=data)
print(tracking_resp)
if not tracking_resp.get('dat'):
raise InvalidTrackingNumberError('Invalid data')
packages = []
for info in tracking_resp['dat']:
package_info = info.get('track', {})
if not package_info:
continue
kwargs = {
'destination_country': package_info.get('c'),
'info_text': package_info.get('z0', {}).get('z'),
'location': package_info.get('z0', {}).get('c'),
'origin_country': package_info.get('b'),
'package_type': package_info.get('d', 0),
'status': package_info.get('e', 0),
'tracking_info_language': package_info.get('ln1', 'Unknown')
}
packages.append(Package(info['no'], **kwargs))
return packages | python | async def find(self, *tracking_numbers: str) -> list:
"""Get tracking info for one or more tracking numbers."""
data = {'data': [{'num': num} for num in tracking_numbers]}
tracking_resp = await self._request('post', API_URL_TRACK, json=data)
print(tracking_resp)
if not tracking_resp.get('dat'):
raise InvalidTrackingNumberError('Invalid data')
packages = []
for info in tracking_resp['dat']:
package_info = info.get('track', {})
if not package_info:
continue
kwargs = {
'destination_country': package_info.get('c'),
'info_text': package_info.get('z0', {}).get('z'),
'location': package_info.get('z0', {}).get('c'),
'origin_country': package_info.get('b'),
'package_type': package_info.get('d', 0),
'status': package_info.get('e', 0),
'tracking_info_language': package_info.get('ln1', 'Unknown')
}
packages.append(Package(info['no'], **kwargs))
return packages | [
"async",
"def",
"find",
"(",
"self",
",",
"*",
"tracking_numbers",
":",
"str",
")",
"->",
"list",
":",
"data",
"=",
"{",
"'data'",
":",
"[",
"{",
"'num'",
":",
"num",
"}",
"for",
"num",
"in",
"tracking_numbers",
"]",
"}",
"tracking_resp",
"=",
"await... | Get tracking info for one or more tracking numbers. | [
"Get",
"tracking",
"info",
"for",
"one",
"or",
"more",
"tracking",
"numbers",
"."
] | train | https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/track.py#L17-L44 |
moonso/loqusdb | loqusdb/commands/cli.py | cli | def cli(ctx, database, username, password, authdb, port, host, uri, verbose, config, test):
"""loqusdb: manage a local variant count database."""
loglevel = "INFO"
if verbose:
loglevel = "DEBUG"
coloredlogs.install(level=loglevel)
LOG.info("Running loqusdb version %s", __version__)
configs = {}
if config:
try:
configs = yaml.safe_load(config)
except yaml.YAMLError as err:
LOG.warning(err)
ctx.abort()
uri = configs.get('uri') or uri
if test:
uri = "mongomock://"
try:
client = get_client(
host=configs.get('host') or host,
port=configs.get('port') or port,
username=configs.get('username') or username,
password=configs.get('password') or password,
authdb=authdb or database or 'loqusdb',
uri=uri,
)
except DB_Error as err:
LOG.warning(err)
ctx.abort()
database = configs.get('db_name') or database
if not database:
database = 'loqusdb'
if uri:
uri_info = uri_parser.parse_uri(uri)
database = uri_info.get('database')
adapter = MongoAdapter(client, db_name=database)
ctx.obj = {}
ctx.obj['db'] = database
ctx.obj['user'] = username
ctx.obj['password'] = password
ctx.obj['port'] = port
ctx.obj['host'] = host
ctx.obj['adapter'] = adapter
ctx.obj['version'] = __version__ | python | def cli(ctx, database, username, password, authdb, port, host, uri, verbose, config, test):
"""loqusdb: manage a local variant count database."""
loglevel = "INFO"
if verbose:
loglevel = "DEBUG"
coloredlogs.install(level=loglevel)
LOG.info("Running loqusdb version %s", __version__)
configs = {}
if config:
try:
configs = yaml.safe_load(config)
except yaml.YAMLError as err:
LOG.warning(err)
ctx.abort()
uri = configs.get('uri') or uri
if test:
uri = "mongomock://"
try:
client = get_client(
host=configs.get('host') or host,
port=configs.get('port') or port,
username=configs.get('username') or username,
password=configs.get('password') or password,
authdb=authdb or database or 'loqusdb',
uri=uri,
)
except DB_Error as err:
LOG.warning(err)
ctx.abort()
database = configs.get('db_name') or database
if not database:
database = 'loqusdb'
if uri:
uri_info = uri_parser.parse_uri(uri)
database = uri_info.get('database')
adapter = MongoAdapter(client, db_name=database)
ctx.obj = {}
ctx.obj['db'] = database
ctx.obj['user'] = username
ctx.obj['password'] = password
ctx.obj['port'] = port
ctx.obj['host'] = host
ctx.obj['adapter'] = adapter
ctx.obj['version'] = __version__ | [
"def",
"cli",
"(",
"ctx",
",",
"database",
",",
"username",
",",
"password",
",",
"authdb",
",",
"port",
",",
"host",
",",
"uri",
",",
"verbose",
",",
"config",
",",
"test",
")",
":",
"loglevel",
"=",
"\"INFO\"",
"if",
"verbose",
":",
"loglevel",
"="... | loqusdb: manage a local variant count database. | [
"loqusdb",
":",
"manage",
"a",
"local",
"variant",
"count",
"database",
"."
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/cli.py#L62-L111 |
yjzhang/uncurl_python | uncurl/qual2quant.py | binarize | def binarize(qualitative):
"""
binarizes an expression dataset.
"""
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
binarized = qualitative > thresholds.reshape((len(thresholds), 1)).repeat(8,1)
return binarized.astype(int) | python | def binarize(qualitative):
"""
binarizes an expression dataset.
"""
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
binarized = qualitative > thresholds.reshape((len(thresholds), 1)).repeat(8,1)
return binarized.astype(int) | [
"def",
"binarize",
"(",
"qualitative",
")",
":",
"thresholds",
"=",
"qualitative",
".",
"min",
"(",
"1",
")",
"+",
"(",
"qualitative",
".",
"max",
"(",
"1",
")",
"-",
"qualitative",
".",
"min",
"(",
"1",
")",
")",
"/",
"2.0",
"binarized",
"=",
"qua... | binarizes an expression dataset. | [
"binarizes",
"an",
"expression",
"dataset",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/qual2quant.py#L43-L49 |
yjzhang/uncurl_python | uncurl/qual2quant.py | qualNorm_filter_genes | def qualNorm_filter_genes(data, qualitative, pval_threshold=0.05, smoothing=1e-5, eps=1e-5):
"""
Does qualNorm but returns a filtered gene set, based on a p-value threshold.
"""
genes, cells = data.shape
clusters = qualitative.shape[1]
output = np.zeros((genes, clusters))
missing_indices = []
genes_included = []
qual_indices = []
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
pvals = np.zeros(genes)
for i in range(genes):
if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:
missing_indices.append(i)
continue
qual_indices.append(i)
threshold = thresholds[i]
data_i = data[i,:]
if sparse.issparse(data):
data_i = data_i.toarray().flatten()
assignments, means = poisson_cluster(data_i.reshape((1, cells)), 2)
means = means.flatten()
high_i = 1
low_i = 0
if means[0]>means[1]:
high_i = 0
low_i = 1
# do a p-value test
p_val = poisson_test(data_i[assignments==low_i], data_i[assignments==high_i], smoothing=smoothing)
pvals[i] = p_val
if p_val <= pval_threshold:
genes_included.append(i)
else:
continue
high_mean = np.median(data_i[assignments==high_i])
low_mean = np.median(data_i[assignments==low_i]) + eps
for k in range(clusters):
if qualitative[i,k]>threshold:
output[i,k] = high_mean
else:
output[i,k] = low_mean
output = output[genes_included,:]
pvals = pvals[genes_included]
return output, pvals, genes_included | python | def qualNorm_filter_genes(data, qualitative, pval_threshold=0.05, smoothing=1e-5, eps=1e-5):
"""
Does qualNorm but returns a filtered gene set, based on a p-value threshold.
"""
genes, cells = data.shape
clusters = qualitative.shape[1]
output = np.zeros((genes, clusters))
missing_indices = []
genes_included = []
qual_indices = []
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
pvals = np.zeros(genes)
for i in range(genes):
if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:
missing_indices.append(i)
continue
qual_indices.append(i)
threshold = thresholds[i]
data_i = data[i,:]
if sparse.issparse(data):
data_i = data_i.toarray().flatten()
assignments, means = poisson_cluster(data_i.reshape((1, cells)), 2)
means = means.flatten()
high_i = 1
low_i = 0
if means[0]>means[1]:
high_i = 0
low_i = 1
# do a p-value test
p_val = poisson_test(data_i[assignments==low_i], data_i[assignments==high_i], smoothing=smoothing)
pvals[i] = p_val
if p_val <= pval_threshold:
genes_included.append(i)
else:
continue
high_mean = np.median(data_i[assignments==high_i])
low_mean = np.median(data_i[assignments==low_i]) + eps
for k in range(clusters):
if qualitative[i,k]>threshold:
output[i,k] = high_mean
else:
output[i,k] = low_mean
output = output[genes_included,:]
pvals = pvals[genes_included]
return output, pvals, genes_included | [
"def",
"qualNorm_filter_genes",
"(",
"data",
",",
"qualitative",
",",
"pval_threshold",
"=",
"0.05",
",",
"smoothing",
"=",
"1e-5",
",",
"eps",
"=",
"1e-5",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"clusters",
"=",
"qualitative",
".",
... | Does qualNorm but returns a filtered gene set, based on a p-value threshold. | [
"Does",
"qualNorm",
"but",
"returns",
"a",
"filtered",
"gene",
"set",
"based",
"on",
"a",
"p",
"-",
"value",
"threshold",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/qual2quant.py#L51-L95 |
yjzhang/uncurl_python | uncurl/qual2quant.py | qualNorm | def qualNorm(data, qualitative):
"""
Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.
Args:
data (array): 2d array of genes x cells
qualitative (array): 2d array of numerical data - genes x clusters
Returns:
Array of starting positions for state estimation or
clustering, with shape genes x clusters
"""
genes, cells = data.shape
clusters = qualitative.shape[1]
output = np.zeros((genes, clusters))
missing_indices = []
qual_indices = []
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
for i in range(genes):
if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:
missing_indices.append(i)
continue
qual_indices.append(i)
threshold = thresholds[i]
data_i = data[i,:]
if sparse.issparse(data):
data_i = data_i.toarray().flatten()
assignments, means = poisson_cluster(data_i.reshape((1, cells)), 2)
means = means.flatten()
high_i = 1
low_i = 0
if means[0]>means[1]:
high_i = 0
low_i = 1
high_mean = np.median(data_i[assignments==high_i])
low_mean = np.median(data_i[assignments==low_i])
for k in range(clusters):
if qualitative[i,k]>threshold:
output[i,k] = high_mean
else:
output[i,k] = low_mean
if missing_indices:
assignments, means = poisson_cluster(data[qual_indices, :], clusters, output[qual_indices, :], max_iters=1)
for ind in missing_indices:
for k in range(clusters):
if len(assignments==k)==0:
output[ind, k] = data[ind,:].mean()
else:
output[ind, k] = data[ind, assignments==k].mean()
return output | python | def qualNorm(data, qualitative):
"""
Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.
Args:
data (array): 2d array of genes x cells
qualitative (array): 2d array of numerical data - genes x clusters
Returns:
Array of starting positions for state estimation or
clustering, with shape genes x clusters
"""
genes, cells = data.shape
clusters = qualitative.shape[1]
output = np.zeros((genes, clusters))
missing_indices = []
qual_indices = []
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
for i in range(genes):
if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:
missing_indices.append(i)
continue
qual_indices.append(i)
threshold = thresholds[i]
data_i = data[i,:]
if sparse.issparse(data):
data_i = data_i.toarray().flatten()
assignments, means = poisson_cluster(data_i.reshape((1, cells)), 2)
means = means.flatten()
high_i = 1
low_i = 0
if means[0]>means[1]:
high_i = 0
low_i = 1
high_mean = np.median(data_i[assignments==high_i])
low_mean = np.median(data_i[assignments==low_i])
for k in range(clusters):
if qualitative[i,k]>threshold:
output[i,k] = high_mean
else:
output[i,k] = low_mean
if missing_indices:
assignments, means = poisson_cluster(data[qual_indices, :], clusters, output[qual_indices, :], max_iters=1)
for ind in missing_indices:
for k in range(clusters):
if len(assignments==k)==0:
output[ind, k] = data[ind,:].mean()
else:
output[ind, k] = data[ind, assignments==k].mean()
return output | [
"def",
"qualNorm",
"(",
"data",
",",
"qualitative",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"clusters",
"=",
"qualitative",
".",
"shape",
"[",
"1",
"]",
"output",
"=",
"np",
".",
"zeros",
"(",
"(",
"genes",
",",
"clusters",
")",
... | Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.
Args:
data (array): 2d array of genes x cells
qualitative (array): 2d array of numerical data - genes x clusters
Returns:
Array of starting positions for state estimation or
clustering, with shape genes x clusters | [
"Generates",
"starting",
"points",
"using",
"binarized",
"data",
".",
"If",
"qualitative",
"data",
"is",
"missing",
"for",
"a",
"given",
"gene",
"all",
"of",
"its",
"entries",
"should",
"be",
"-",
"1",
"in",
"the",
"qualitative",
"matrix",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/qual2quant.py#L97-L146 |
yjzhang/uncurl_python | uncurl/qual2quant.py | qualNormGaussian | def qualNormGaussian(data, qualitative):
"""
Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.
Args:
data (array): 2d array of genes x cells
qualitative (array): 2d array of numerical data - genes x clusters
Returns:
Array of starting positions for state estimation or
clustering, with shape genes x clusters
"""
genes, cells = data.shape
clusters = qualitative.shape[1]
output = np.zeros((genes, clusters))
missing_indices = []
qual_indices = []
for i in range(genes):
if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:
missing_indices.append(i)
continue
qual_indices.append(i)
threshold = (qualitative[i,:].max() - qualitative[i,:].min())/2.0
kmeans = KMeans(n_clusters = 2).fit(data[i,:].reshape((1, cells)))
assignments = kmeans.labels_
means = kmeans.cluster_centers_
high_mean = means.max()
low_mean = means.min()
for k in range(clusters):
if qualitative[i,k]>threshold:
output[i,k] = high_mean
else:
output[i,k] = low_mean
if missing_indices:
#generating centers for missing indices
M_init = output[qual_indices, :]
kmeans = KMeans(n_clusters = 2, init = M_init, max_iter = 1).fit(data[qual_indices, :])
assignments = kmeans.labels_
#assignments, means = poisson_cluster(data[qual_indices, :], clusters, output[qual_indices, :], max_iters=1)
for ind in missing_indices:
for k in range(clusters):
output[ind, k] = np.mean(data[ind, assignments==k])
# TODO: assign to closest
return output | python | def qualNormGaussian(data, qualitative):
"""
Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.
Args:
data (array): 2d array of genes x cells
qualitative (array): 2d array of numerical data - genes x clusters
Returns:
Array of starting positions for state estimation or
clustering, with shape genes x clusters
"""
genes, cells = data.shape
clusters = qualitative.shape[1]
output = np.zeros((genes, clusters))
missing_indices = []
qual_indices = []
for i in range(genes):
if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:
missing_indices.append(i)
continue
qual_indices.append(i)
threshold = (qualitative[i,:].max() - qualitative[i,:].min())/2.0
kmeans = KMeans(n_clusters = 2).fit(data[i,:].reshape((1, cells)))
assignments = kmeans.labels_
means = kmeans.cluster_centers_
high_mean = means.max()
low_mean = means.min()
for k in range(clusters):
if qualitative[i,k]>threshold:
output[i,k] = high_mean
else:
output[i,k] = low_mean
if missing_indices:
#generating centers for missing indices
M_init = output[qual_indices, :]
kmeans = KMeans(n_clusters = 2, init = M_init, max_iter = 1).fit(data[qual_indices, :])
assignments = kmeans.labels_
#assignments, means = poisson_cluster(data[qual_indices, :], clusters, output[qual_indices, :], max_iters=1)
for ind in missing_indices:
for k in range(clusters):
output[ind, k] = np.mean(data[ind, assignments==k])
# TODO: assign to closest
return output | [
"def",
"qualNormGaussian",
"(",
"data",
",",
"qualitative",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"clusters",
"=",
"qualitative",
".",
"shape",
"[",
"1",
"]",
"output",
"=",
"np",
".",
"zeros",
"(",
"(",
"genes",
",",
"clusters",
... | Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.
Args:
data (array): 2d array of genes x cells
qualitative (array): 2d array of numerical data - genes x clusters
Returns:
Array of starting positions for state estimation or
clustering, with shape genes x clusters | [
"Generates",
"starting",
"points",
"using",
"binarized",
"data",
".",
"If",
"qualitative",
"data",
"is",
"missing",
"for",
"a",
"given",
"gene",
"all",
"of",
"its",
"entries",
"should",
"be",
"-",
"1",
"in",
"the",
"qualitative",
"matrix",
"."
] | train | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/qual2quant.py#L149-L192 |
OCHA-DAP/hdx-python-country | setup.py | script_dir | def script_dir(pyobject, follow_symlinks=True):
"""Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory
"""
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = abspath(sys.executable)
else:
path = inspect.getabsfile(pyobject)
if follow_symlinks:
path = realpath(path)
return dirname(path) | python | def script_dir(pyobject, follow_symlinks=True):
"""Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory
"""
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = abspath(sys.executable)
else:
path = inspect.getabsfile(pyobject)
if follow_symlinks:
path = realpath(path)
return dirname(path) | [
"def",
"script_dir",
"(",
"pyobject",
",",
"follow_symlinks",
"=",
"True",
")",
":",
"if",
"getattr",
"(",
"sys",
",",
"'frozen'",
",",
"False",
")",
":",
"# py2exe, PyInstaller, cx_Freeze",
"path",
"=",
"abspath",
"(",
"sys",
".",
"executable",
")",
"else",... | Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory | [
"Get",
"current",
"script",
"s",
"directory"
] | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/setup.py#L11-L27 |
OCHA-DAP/hdx-python-country | setup.py | script_dir_plus_file | def script_dir_plus_file(filename, pyobject, follow_symlinks=True):
"""Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended
"""
return join(script_dir(pyobject, follow_symlinks), filename) | python | def script_dir_plus_file(filename, pyobject, follow_symlinks=True):
"""Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended
"""
return join(script_dir(pyobject, follow_symlinks), filename) | [
"def",
"script_dir_plus_file",
"(",
"filename",
",",
"pyobject",
",",
"follow_symlinks",
"=",
"True",
")",
":",
"return",
"join",
"(",
"script_dir",
"(",
"pyobject",
",",
"follow_symlinks",
")",
",",
"filename",
")"
] | Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended | [
"Get",
"current",
"script",
"s",
"directory",
"and",
"then",
"append",
"a",
"filename"
] | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/setup.py#L30-L41 |
moonso/loqusdb | loqusdb/commands/identity.py | identity | def identity(ctx, variant_id):
"""Check how well SVs are working in the database
"""
if not variant_id:
LOG.warning("Please provide a variant id")
ctx.abort()
adapter = ctx.obj['adapter']
version = ctx.obj['version']
LOG.info("Search variants {0}".format(adapter))
result = adapter.get_clusters(variant_id)
if result.count() == 0:
LOG.info("No hits for variant %s", variant_id)
return
for res in result:
click.echo(res) | python | def identity(ctx, variant_id):
"""Check how well SVs are working in the database
"""
if not variant_id:
LOG.warning("Please provide a variant id")
ctx.abort()
adapter = ctx.obj['adapter']
version = ctx.obj['version']
LOG.info("Search variants {0}".format(adapter))
result = adapter.get_clusters(variant_id)
if result.count() == 0:
LOG.info("No hits for variant %s", variant_id)
return
for res in result:
click.echo(res) | [
"def",
"identity",
"(",
"ctx",
",",
"variant_id",
")",
":",
"if",
"not",
"variant_id",
":",
"LOG",
".",
"warning",
"(",
"\"Please provide a variant id\"",
")",
"ctx",
".",
"abort",
"(",
")",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"vers... | Check how well SVs are working in the database | [
"Check",
"how",
"well",
"SVs",
"are",
"working",
"in",
"the",
"database"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/identity.py#L13-L32 |
ggravlingen/pygleif | pygleif/gleif.py | GLEIFEntity.registration_authority_entity_id | def registration_authority_entity_id(self):
"""
Some entities return the register entity id,
but other do not. Unsure if this is a bug or
inconsistently registered data.
"""
if ATTR_ENTITY_REGISTRATION_AUTHORITY in self.raw:
try:
return self.raw[
ATTR_ENTITY_REGISTRATION_AUTHORITY][
ATTR_ENTITY_REGISTRATION_AUTHORITY_ENTITY_ID][
ATTR_DOLLAR_SIGN]
except KeyError:
pass | python | def registration_authority_entity_id(self):
"""
Some entities return the register entity id,
but other do not. Unsure if this is a bug or
inconsistently registered data.
"""
if ATTR_ENTITY_REGISTRATION_AUTHORITY in self.raw:
try:
return self.raw[
ATTR_ENTITY_REGISTRATION_AUTHORITY][
ATTR_ENTITY_REGISTRATION_AUTHORITY_ENTITY_ID][
ATTR_DOLLAR_SIGN]
except KeyError:
pass | [
"def",
"registration_authority_entity_id",
"(",
"self",
")",
":",
"if",
"ATTR_ENTITY_REGISTRATION_AUTHORITY",
"in",
"self",
".",
"raw",
":",
"try",
":",
"return",
"self",
".",
"raw",
"[",
"ATTR_ENTITY_REGISTRATION_AUTHORITY",
"]",
"[",
"ATTR_ENTITY_REGISTRATION_AUTHORIT... | Some entities return the register entity id,
but other do not. Unsure if this is a bug or
inconsistently registered data. | [
"Some",
"entities",
"return",
"the",
"register",
"entity",
"id",
"but",
"other",
"do",
"not",
".",
"Unsure",
"if",
"this",
"is",
"a",
"bug",
"or",
"inconsistently",
"registered",
"data",
"."
] | train | https://github.com/ggravlingen/pygleif/blob/f0f62f1a2878fce45fedcc2260264153808429f9/pygleif/gleif.py#L127-L141 |
ggravlingen/pygleif | pygleif/gleif.py | GLEIFEntity.legal_form | def legal_form(self):
"""In some cases, the legal form is stored in the JSON-data.
In other cases, an ELF-code, consisting of mix of exactly
four letters and numbers are stored. This ELF-code
can be looked up in a registry where a code maps to
a organizational type. ELF-codes are not unique,
it can reoccur under different names in different
countries"""
if ATTR_ENTITY_LEGAL_FORM in self.raw:
try:
return LEGAL_FORMS[self.legal_jurisdiction][
self.raw[ATTR_ENTITY_LEGAL_FORM][
ATTR_ENTITY_LEGAL_FORM_CODE][ATTR_DOLLAR_SIGN]
]
except KeyError:
legal_form = self.raw[
ATTR_ENTITY_LEGAL_FORM][
ATTR_ENTITY_LEGAL_FORM_CODE][ATTR_DOLLAR_SIGN]
if len(legal_form) == 4:
# If this is returned, the ELF should
# be added to the constants.
return 'ELF code: ' + legal_form
else:
return legal_form | python | def legal_form(self):
"""In some cases, the legal form is stored in the JSON-data.
In other cases, an ELF-code, consisting of mix of exactly
four letters and numbers are stored. This ELF-code
can be looked up in a registry where a code maps to
a organizational type. ELF-codes are not unique,
it can reoccur under different names in different
countries"""
if ATTR_ENTITY_LEGAL_FORM in self.raw:
try:
return LEGAL_FORMS[self.legal_jurisdiction][
self.raw[ATTR_ENTITY_LEGAL_FORM][
ATTR_ENTITY_LEGAL_FORM_CODE][ATTR_DOLLAR_SIGN]
]
except KeyError:
legal_form = self.raw[
ATTR_ENTITY_LEGAL_FORM][
ATTR_ENTITY_LEGAL_FORM_CODE][ATTR_DOLLAR_SIGN]
if len(legal_form) == 4:
# If this is returned, the ELF should
# be added to the constants.
return 'ELF code: ' + legal_form
else:
return legal_form | [
"def",
"legal_form",
"(",
"self",
")",
":",
"if",
"ATTR_ENTITY_LEGAL_FORM",
"in",
"self",
".",
"raw",
":",
"try",
":",
"return",
"LEGAL_FORMS",
"[",
"self",
".",
"legal_jurisdiction",
"]",
"[",
"self",
".",
"raw",
"[",
"ATTR_ENTITY_LEGAL_FORM",
"]",
"[",
"... | In some cases, the legal form is stored in the JSON-data.
In other cases, an ELF-code, consisting of mix of exactly
four letters and numbers are stored. This ELF-code
can be looked up in a registry where a code maps to
a organizational type. ELF-codes are not unique,
it can reoccur under different names in different
countries | [
"In",
"some",
"cases",
"the",
"legal",
"form",
"is",
"stored",
"in",
"the",
"JSON",
"-",
"data",
".",
"In",
"other",
"cases",
"an",
"ELF",
"-",
"code",
"consisting",
"of",
"mix",
"of",
"exactly",
"four",
"letters",
"and",
"numbers",
"are",
"stored",
".... | train | https://github.com/ggravlingen/pygleif/blob/f0f62f1a2878fce45fedcc2260264153808429f9/pygleif/gleif.py#L157-L182 |
ggravlingen/pygleif | pygleif/gleif.py | DirectChild.valid_child_records | def valid_child_records(self):
child_lei = list()
"""Loop through data to find a valid record. Return list of LEI."""
for d in self.raw['data']:
# We're not very greedy here, but it seems some records have
# lapsed even through the issuer is active
if d['attributes']['relationship']['status'] in ['ACTIVE']:
child_lei.append(
d['attributes']['relationship']['startNode']['id'])
return child_lei | python | def valid_child_records(self):
child_lei = list()
"""Loop through data to find a valid record. Return list of LEI."""
for d in self.raw['data']:
# We're not very greedy here, but it seems some records have
# lapsed even through the issuer is active
if d['attributes']['relationship']['status'] in ['ACTIVE']:
child_lei.append(
d['attributes']['relationship']['startNode']['id'])
return child_lei | [
"def",
"valid_child_records",
"(",
"self",
")",
":",
"child_lei",
"=",
"list",
"(",
")",
"for",
"d",
"in",
"self",
".",
"raw",
"[",
"'data'",
"]",
":",
"# We're not very greedy here, but it seems some records have",
"# lapsed even through the issuer is active",
"if",
... | Loop through data to find a valid record. Return list of LEI. | [
"Loop",
"through",
"data",
"to",
"find",
"a",
"valid",
"record",
".",
"Return",
"list",
"of",
"LEI",
"."
] | train | https://github.com/ggravlingen/pygleif/blob/f0f62f1a2878fce45fedcc2260264153808429f9/pygleif/gleif.py#L305-L317 |
moonso/loqusdb | loqusdb/utils/annotate.py | annotate_variant | def annotate_variant(variant, var_obj=None):
"""Annotate a cyvcf variant with observations
Args:
variant(cyvcf2.variant)
var_obj(dict)
Returns:
variant(cyvcf2.variant): Annotated variant
"""
if var_obj:
variant.INFO['Obs'] = var_obj['observations']
if var_obj.get('homozygote'):
variant.INFO['Hom'] = var_obj['homozygote']
if var_obj.get('hemizygote'):
variant.INFO['Hem'] = var_obj['hemizygote']
return variant | python | def annotate_variant(variant, var_obj=None):
"""Annotate a cyvcf variant with observations
Args:
variant(cyvcf2.variant)
var_obj(dict)
Returns:
variant(cyvcf2.variant): Annotated variant
"""
if var_obj:
variant.INFO['Obs'] = var_obj['observations']
if var_obj.get('homozygote'):
variant.INFO['Hom'] = var_obj['homozygote']
if var_obj.get('hemizygote'):
variant.INFO['Hem'] = var_obj['hemizygote']
return variant | [
"def",
"annotate_variant",
"(",
"variant",
",",
"var_obj",
"=",
"None",
")",
":",
"if",
"var_obj",
":",
"variant",
".",
"INFO",
"[",
"'Obs'",
"]",
"=",
"var_obj",
"[",
"'observations'",
"]",
"if",
"var_obj",
".",
"get",
"(",
"'homozygote'",
")",
":",
"... | Annotate a cyvcf variant with observations
Args:
variant(cyvcf2.variant)
var_obj(dict)
Returns:
variant(cyvcf2.variant): Annotated variant | [
"Annotate",
"a",
"cyvcf",
"variant",
"with",
"observations",
"Args",
":",
"variant",
"(",
"cyvcf2",
".",
"variant",
")",
"var_obj",
"(",
"dict",
")",
"Returns",
":",
"variant",
"(",
"cyvcf2",
".",
"variant",
")",
":",
"Annotated",
"variant"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/annotate.py#L12-L30 |
moonso/loqusdb | loqusdb/utils/annotate.py | annotate_snv | def annotate_snv(adpter, variant):
"""Annotate an SNV/INDEL variant
Args:
adapter(loqusdb.plugin.adapter)
variant(cyvcf2.Variant)
"""
variant_id = get_variant_id(variant)
variant_obj = adapter.get_variant(variant={'_id':variant_id})
annotated_variant = annotated_variant(variant, variant_obj)
return annotated_variant | python | def annotate_snv(adpter, variant):
"""Annotate an SNV/INDEL variant
Args:
adapter(loqusdb.plugin.adapter)
variant(cyvcf2.Variant)
"""
variant_id = get_variant_id(variant)
variant_obj = adapter.get_variant(variant={'_id':variant_id})
annotated_variant = annotated_variant(variant, variant_obj)
return annotated_variant | [
"def",
"annotate_snv",
"(",
"adpter",
",",
"variant",
")",
":",
"variant_id",
"=",
"get_variant_id",
"(",
"variant",
")",
"variant_obj",
"=",
"adapter",
".",
"get_variant",
"(",
"variant",
"=",
"{",
"'_id'",
":",
"variant_id",
"}",
")",
"annotated_variant",
... | Annotate an SNV/INDEL variant
Args:
adapter(loqusdb.plugin.adapter)
variant(cyvcf2.Variant) | [
"Annotate",
"an",
"SNV",
"/",
"INDEL",
"variant",
"Args",
":",
"adapter",
"(",
"loqusdb",
".",
"plugin",
".",
"adapter",
")",
"variant",
"(",
"cyvcf2",
".",
"Variant",
")"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/annotate.py#L32-L43 |
moonso/loqusdb | loqusdb/utils/annotate.py | annotate_svs | def annotate_svs(adapter, vcf_obj):
"""Annotate all SV variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant)
"""
for nr_variants, variant in enumerate(vcf_obj, 1):
variant_info = get_coords(variant)
match = adapter.get_structural_variant(variant_info)
if match:
annotate_variant(variant, match)
yield variant | python | def annotate_svs(adapter, vcf_obj):
"""Annotate all SV variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant)
"""
for nr_variants, variant in enumerate(vcf_obj, 1):
variant_info = get_coords(variant)
match = adapter.get_structural_variant(variant_info)
if match:
annotate_variant(variant, match)
yield variant | [
"def",
"annotate_svs",
"(",
"adapter",
",",
"vcf_obj",
")",
":",
"for",
"nr_variants",
",",
"variant",
"in",
"enumerate",
"(",
"vcf_obj",
",",
"1",
")",
":",
"variant_info",
"=",
"get_coords",
"(",
"variant",
")",
"match",
"=",
"adapter",
".",
"get_structu... | Annotate all SV variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant) | [
"Annotate",
"all",
"SV",
"variants",
"in",
"a",
"VCF",
"Args",
":",
"adapter",
"(",
"loqusdb",
".",
"plugin",
".",
"adapter",
")",
"vcf_obj",
"(",
"cyvcf2",
".",
"VCF",
")",
"Yields",
":",
"variant",
"(",
"cyvcf2",
".",
"Variant",
")"
] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/annotate.py#L45-L60 |
moonso/loqusdb | loqusdb/utils/annotate.py | annotate_snvs | def annotate_snvs(adapter, vcf_obj):
"""Annotate all variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant): Annotated variant
"""
variants = {}
for nr_variants, variant in enumerate(vcf_obj, 1):
# Add the variant to current batch
variants[get_variant_id(variant)] = variant
# If batch len == 1000 we annotate the batch
if (nr_variants % 1000) == 0:
for var_obj in adapter.search_variants(list(variants.keys())):
var_id = var_obj['_id']
if var_id in variants:
annotate_variant(variants[var_id], var_obj)
for variant_id in variants:
yield variants[variant_id]
variants = {}
for var_obj in adapter.search_variants(list(variants.keys())):
var_id = var_obj['_id']
if var_id in variants:
annotate_variant(variants[var_id], var_obj)
for variant_id in variants:
yield variants[variant_id] | python | def annotate_snvs(adapter, vcf_obj):
"""Annotate all variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant): Annotated variant
"""
variants = {}
for nr_variants, variant in enumerate(vcf_obj, 1):
# Add the variant to current batch
variants[get_variant_id(variant)] = variant
# If batch len == 1000 we annotate the batch
if (nr_variants % 1000) == 0:
for var_obj in adapter.search_variants(list(variants.keys())):
var_id = var_obj['_id']
if var_id in variants:
annotate_variant(variants[var_id], var_obj)
for variant_id in variants:
yield variants[variant_id]
variants = {}
for var_obj in adapter.search_variants(list(variants.keys())):
var_id = var_obj['_id']
if var_id in variants:
annotate_variant(variants[var_id], var_obj)
for variant_id in variants:
yield variants[variant_id] | [
"def",
"annotate_snvs",
"(",
"adapter",
",",
"vcf_obj",
")",
":",
"variants",
"=",
"{",
"}",
"for",
"nr_variants",
",",
"variant",
"in",
"enumerate",
"(",
"vcf_obj",
",",
"1",
")",
":",
"# Add the variant to current batch",
"variants",
"[",
"get_variant_id",
"... | Annotate all variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant): Annotated variant | [
"Annotate",
"all",
"variants",
"in",
"a",
"VCF",
"Args",
":",
"adapter",
"(",
"loqusdb",
".",
"plugin",
".",
"adapter",
")",
"vcf_obj",
"(",
"cyvcf2",
".",
"VCF",
")",
"Yields",
":",
"variant",
"(",
"cyvcf2",
".",
"Variant",
")",
":",
"Annotated",
"var... | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/annotate.py#L63-L97 |
bosth/plpygis | plpygis/geometry.py | Geometry.from_geojson | def from_geojson(geojson, srid=4326):
"""
Create a Geometry from a GeoJSON. The SRID can be overridden from the
expected 4326.
"""
type_ = geojson["type"].lower()
if type_ == "geometrycollection":
geometries = []
for geometry in geojson["geometries"]:
geometries.append(Geometry.from_geojson(geometry, srid=None))
return GeometryCollection(geometries, srid)
elif type_ == "point":
return Point(geojson["coordinates"], srid=srid)
elif type_ == "linestring":
return LineString(geojson["coordinates"], srid=srid)
elif type_ == "polygon":
return Polygon(geojson["coordinates"], srid=srid)
elif type_ == "multipoint":
geometries = _MultiGeometry._multi_from_geojson(geojson, Point)
return MultiPoint(geometries, srid=srid)
elif type_ == "multilinestring":
geometries = _MultiGeometry._multi_from_geojson(geojson, LineString)
return MultiLineString(geometries, srid=srid)
elif type_ == "multipolygon":
geometries = _MultiGeometry._multi_from_geojson(geojson, Polygon)
return MultiPolygon(geometries, srid=srid) | python | def from_geojson(geojson, srid=4326):
"""
Create a Geometry from a GeoJSON. The SRID can be overridden from the
expected 4326.
"""
type_ = geojson["type"].lower()
if type_ == "geometrycollection":
geometries = []
for geometry in geojson["geometries"]:
geometries.append(Geometry.from_geojson(geometry, srid=None))
return GeometryCollection(geometries, srid)
elif type_ == "point":
return Point(geojson["coordinates"], srid=srid)
elif type_ == "linestring":
return LineString(geojson["coordinates"], srid=srid)
elif type_ == "polygon":
return Polygon(geojson["coordinates"], srid=srid)
elif type_ == "multipoint":
geometries = _MultiGeometry._multi_from_geojson(geojson, Point)
return MultiPoint(geometries, srid=srid)
elif type_ == "multilinestring":
geometries = _MultiGeometry._multi_from_geojson(geojson, LineString)
return MultiLineString(geometries, srid=srid)
elif type_ == "multipolygon":
geometries = _MultiGeometry._multi_from_geojson(geojson, Polygon)
return MultiPolygon(geometries, srid=srid) | [
"def",
"from_geojson",
"(",
"geojson",
",",
"srid",
"=",
"4326",
")",
":",
"type_",
"=",
"geojson",
"[",
"\"type\"",
"]",
".",
"lower",
"(",
")",
"if",
"type_",
"==",
"\"geometrycollection\"",
":",
"geometries",
"=",
"[",
"]",
"for",
"geometry",
"in",
... | Create a Geometry from a GeoJSON. The SRID can be overridden from the
expected 4326. | [
"Create",
"a",
"Geometry",
"from",
"a",
"GeoJSON",
".",
"The",
"SRID",
"can",
"be",
"overridden",
"from",
"the",
"expected",
"4326",
"."
] | train | https://github.com/bosth/plpygis/blob/9469cc469df4c8cd407de158903d5465cda804ea/plpygis/geometry.py#L77-L102 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.