blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e077000339f79423ee4da9c789b77491ab87ac5f
|
fbe3a52d2dd02bec18f7f52b31e357aed192a308
|
/misc/begin/exercises/def1.py
|
4cbd865b23fdbc03446b3cc80ad9e332cc9a1e7d
|
[] |
no_license
|
lherrada/python
|
8fc5bd5ceb6038479fa6347dd6c0bd6e17f92e98
|
d8260f35ba91b89590ef8e489188fb80ca1aed4e
|
refs/heads/master
| 2022-10-29T06:23:17.297554
| 2022-09-24T15:45:59
| 2022-09-24T15:45:59
| 23,411,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#!/usr/bin/python
def general(func,arg):
apply(func,(arg,))
def name(x):
print "Your name is " + x
def age(n):
print "Your age is %d" % n
print "=" * 30
datain=[(name,"Erikita"),(age,38)]
for i,j in datain:
apply(i,(j,))
for i,j in datain:
i(j)
#general(name,"Erikita")
#general(age,38)
#name("Erika")
#x=name
#x("Luis")
#age(37)
|
[
"[email protected]"
] | |
346a1d1871be3ea9c34e4439423a76c4f242e810
|
669e9241b02bdaa303fbc2fd4023b90d4d179a59
|
/Randomized Pulled String/challenge3.py
|
2d68218a4951ecd60943d4a45d32bde6066f8181
|
[] |
no_license
|
benjaminpotter/HatchProjects
|
0854cf46ae7c3781468116a5d63b703dd54ae68c
|
7f6a948d3474c755d071751b725c059e6c7f3553
|
refs/heads/master
| 2022-01-28T16:58:03.449073
| 2019-08-16T13:47:30
| 2019-08-16T13:47:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
def mouseClicked():
background(0)
strokeWeight(10)
for i in range (0, width) :
r = random(0, 255)
x = random (0, width)
stroke(r, r, r, 100)
line(i, 0, x, height)
|
[
"[email protected]"
] | |
83f325539952c23909157086bbb01b3725047fbd
|
d60f13e52d385fd7f839ee441d8df05b34f8c75b
|
/wirecell/test/__main__.py
|
ebc11006d8d4b16dfbd3ebe5983fdcf478ad5421
|
[] |
no_license
|
wenqiang-gu/wire-cell-python
|
07fe7ac420fedf747e97ba424052e85222316234
|
981541f5618b94d55ee5f07c6eeff6fbbfa5fa93
|
refs/heads/master
| 2022-10-02T08:51:27.193403
| 2022-07-29T16:23:42
| 2022-07-29T16:23:42
| 228,528,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
import math
import click
from wirecell.util import ario, plottools
@click.group("test")
@click.pass_context
def cli(ctx):
'''
Wire Cell Test Commands
'''
@cli.command("plot")
@click.option("-n", "--name", default="noise",
help="The test name")
@click.argument("datafile")
@click.argument("output")
@click.pass_context
def plot(ctx, name, datafile, output):
'''
Make plots from file made by test_<test>.
'''
from importlib import import_module
mod = import_module(f'wirecell.test.{name}')
fp = ario.load(datafile)
with plottools.pages(output) as out:
mod.plot(fp, out)
def main():
cli(obj=dict())
if '__main__' == __name__:
main()
|
[
"[email protected]"
] | |
850ade5eeed22d497b51edf7a8f5ccd3b3049007
|
61efd764ae4586b6b2ee5e6e2c255079e2b01cfc
|
/azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/metric_specification.py
|
a86668fcff8d8d4acf56738739afb3ab74282378
|
[
"MIT"
] |
permissive
|
AutorestCI/azure-sdk-for-python
|
a3642f53b5bf79d1dbb77851ec56f4cc0c5b3b61
|
60b0726619ce9d7baca41f6cd38f741d74c4e54a
|
refs/heads/master
| 2021-01-21T02:23:59.207091
| 2018-01-31T21:31:27
| 2018-01-31T21:31:27
| 55,251,306
| 4
| 3
| null | 2017-11-13T17:57:46
| 2016-04-01T17:48:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,163
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricSpecification(Model):
"""Description of metrics specification.
:param name: The name of the metric.
:type name: str
:param display_name: The display name of the metric.
:type display_name: str
:param display_description: The description of the metric.
:type display_description: str
:param unit: Units the metric to be displayed in.
:type unit: str
:param aggregation_type: The aggregation type.
:type aggregation_type: str
:param availabilities: List of availability.
:type availabilities:
list[~azure.mgmt.network.v2017_10_01.models.Availability]
:param enable_regional_mdm_account: Whether regional MDM account enabled.
:type enable_regional_mdm_account: bool
:param fill_gap_with_zero: Whether gaps would be filled with zeros.
:type fill_gap_with_zero: bool
:param metric_filter_pattern: Pattern for the filter of the metric.
:type metric_filter_pattern: str
:param dimensions: List of dimensions.
:type dimensions: list[~azure.mgmt.network.v2017_10_01.models.Dimension]
:param is_internal: Whether the metric is internal.
:type is_internal: bool
:param source_mdm_account: The source MDM account.
:type source_mdm_account: str
:param source_mdm_namespace: The source MDM namespace.
:type source_mdm_namespace: str
:param resource_id_dimension_name_override: The resource Id dimension name
override.
:type resource_id_dimension_name_override: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'availabilities': {'key': 'availabilities', 'type': '[Availability]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'metric_filter_pattern': {'key': 'metricFilterPattern', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[Dimension]'},
'is_internal': {'key': 'isInternal', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'resource_id_dimension_name_override': {'key': 'resourceIdDimensionNameOverride', 'type': 'str'},
}
def __init__(self, name=None, display_name=None, display_description=None, unit=None, aggregation_type=None, availabilities=None, enable_regional_mdm_account=None, fill_gap_with_zero=None, metric_filter_pattern=None, dimensions=None, is_internal=None, source_mdm_account=None, source_mdm_namespace=None, resource_id_dimension_name_override=None):
super(MetricSpecification, self).__init__()
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.aggregation_type = aggregation_type
self.availabilities = availabilities
self.enable_regional_mdm_account = enable_regional_mdm_account
self.fill_gap_with_zero = fill_gap_with_zero
self.metric_filter_pattern = metric_filter_pattern
self.dimensions = dimensions
self.is_internal = is_internal
self.source_mdm_account = source_mdm_account
self.source_mdm_namespace = source_mdm_namespace
self.resource_id_dimension_name_override = resource_id_dimension_name_override
|
[
"[email protected]"
] | |
c9ce85723629a707758ea22deedc74f0c563ea12
|
4b89a7de426fb53b999b5f3834404215a90817df
|
/pyobjc-framework-GameCenter/setup.py
|
21ba4c119f399fb8e08f6ccbc52b420a124e686f
|
[] |
no_license
|
peeyush-tm/pyobjc
|
a1f3ec167482566ddc7c895cfa2aca436109cf66
|
da488946f6cc67a83dcc26c04484ca4f10fabc82
|
refs/heads/master
| 2021-01-20T19:26:06.015044
| 2016-05-22T14:53:37
| 2016-05-22T14:53:37
| 60,502,688
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
'''
Wrappers for the "GameCenter" framework on MacOS X. The Address Book is
a centralized database for contact and other information for people. Appliations
that make use of the GameCenter framework all use the same database.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
'''
from pyobjc_setup import setup, Extension
import os
VERSION="3.2a1"
setup(
name='pyobjc-framework-GameCenter',
version=VERSION,
description = "Wrappers for the framework GameCenter on Mac OS X",
long_description=__doc__,
packages = [ "GameCenter" ],
setup_requires = [
'pyobjc-core>=' + VERSION,
],
install_requires = [
'pyobjc-core>=' + VERSION,
'pyobjc-framework-Cocoa>=' + VERSION,
],
ext_modules = [
Extension("GameCenter._GameCenter",
[ "Modules/_GameCenter.m" ],
extra_link_args=["-framework", "GameKit"],
depends=[
os.path.join('Modules', fn)
for fn in os.listdir('Modules')
if fn.startswith('_GameCenter')
]
),
],
min_os_level='10.8',
)
|
[
"[email protected]"
] | |
53092dfd2bd0fa00448c9e96ce8c9b25bf3e34ce
|
70fec09ceb625608d561937955c285c0c39f6d95
|
/examples/basic_examples/http_middleware_service.py
|
c93260f707026e51e7be79c6dc733285377333fb
|
[
"MIT"
] |
permissive
|
kalaspuff/tomodachi
|
b285e2c73696d14e3c84a479745e00824fba7190
|
deca849ec2b4cdc3d27f06e9ce0056fac0146a1a
|
refs/heads/master
| 2023-08-31T00:32:12.042486
| 2023-08-21T13:02:24
| 2023-08-21T13:02:24
| 62,165,703
| 191
| 28
|
MIT
| 2023-09-11T23:32:51
| 2016-06-28T18:43:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
import asyncio
from typing import Any, Callable, Dict
from aiohttp import web
import tomodachi
from tomodachi import Options, http, http_error
async def middleware_function(
func: Callable, service: Any, request: web.Request, context: Dict, *args: Any, **kwargs: Any
) -> Any:
# Functionality before function is called
tomodachi.get_logger().info("middleware before")
return_value = await func(*args, **kwargs)
# There's also the possibility to pass in extra arguments or keywords arguments, for example:
# return_value = await func(*args, id='overridden', **kwargs)
# Functionality after function is called
tomodachi.get_logger().info("middleware after")
return return_value
class ExampleHttpMiddlewareService(tomodachi.Service):
name = "example-http-service"
# Adds a middleware function that is run on every HTTP call. Several middlewares can be chained.
http_middleware = [middleware_function]
# Some options can be specified to define credentials, used ports, hostnames, access log, etc.
options = Options(
http=Options.HTTP(
port=4711,
content_type="text/plain; charset=utf-8",
access_log=True,
),
)
@http("GET", r"/example/?")
async def example(self, request: web.Request, **kwargs: Any) -> str:
await asyncio.sleep(1)
return "友達" # tomodachi
@http("GET", r"/example/(?P<id>[^/]+?)/?")
async def example_with_id(self, request: web.Request, id: str) -> str:
return "友達 (id: {})".format(id)
@http_error(status_code=404)
async def error_404(self, request: web.Request, **kwargs: Any) -> str:
return "error 404"
|
[
"[email protected]"
] | |
625a77678dafad3f72ea2f4629bed9b901e7f7cd
|
2919484ba494fdb9ce60005392286d293d98c325
|
/deep_autoviml/models/big_deep.py
|
14b1e0dc911254d0e247500c32d6d37fae9f5323
|
[
"Apache-2.0"
] |
permissive
|
Arunava98/deep_autoviml
|
d6c8d7bb701967d671eae6a8329018e32589d09d
|
9902bb230f90d9da367445656fcefad2e2d5aea3
|
refs/heads/master
| 2023-07-20T03:31:38.705198
| 2021-08-26T14:19:38
| 2021-08-26T14:19:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,506
|
py
|
############################################################################################
#Copyright 2021 Google LLC
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
import tensorflow as tf
from tensorflow import keras
#### Make sure it is Tensorflow 2.4 or greater!
from tensorflow.keras.optimizers import SGD, Adam, RMSprop
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from tensorflow.keras import models
from tensorflow.keras import callbacks
from tensorflow.keras import backend as K
from tensorflow.keras import utils
from tensorflow.keras import layers
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Reshape, MaxPooling1D, MaxPooling2D
from tensorflow.keras.layers import AveragePooling2D, AveragePooling1D
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Embedding, Reshape, Dropout, Dense
from tensorflow.keras.layers import Activation, Dense, Embedding, GlobalAveragePooling1D
from tensorflow.keras.layers import GlobalMaxPooling1D, Dropout, Conv1D
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
############################################################################################
model = models.Sequential([
BatchNormalization(),
Dropout(0.5),
layers.Dense(128, activation='relu', kernel_initializer='he_normal'),
BatchNormalization(),
Dropout(0.5),
layers.Dense(64, activation='relu', kernel_initializer='he_normal'),
BatchNormalization(),
Dropout(0.2),
layers.Dense(32, activation='relu', kernel_initializer='he_normal'),
BatchNormalization(),
Dropout(0.2),
])
|
[
"[email protected]"
] | |
459c72af3cd511acf54b8b60834225780fea43e4
|
5a79600f6db7a8c65fa3182f822891d7fd68eeda
|
/tests/test_gpu_openacc.py
|
e1fc8aa0bbbf714cbefa9cc3d031f4e9e91790f1
|
[
"MIT"
] |
permissive
|
alisiahkoohi/devito
|
867fb05c89f24193951835227abdc271f42cc6e2
|
f535a44dff12de2837eb6e3217a65ffb2d371cb8
|
refs/heads/master
| 2023-03-16T05:50:23.610576
| 2021-05-24T21:49:32
| 2021-05-24T22:21:40
| 128,473,180
| 0
| 0
|
MIT
| 2023-03-02T12:58:21
| 2018-04-06T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,509
|
py
|
import pytest
import numpy as np
from conftest import skipif
from devito import Grid, Function, TimeFunction, Eq, Operator, norm, solve
from devito.data import LEFT
from devito.ir.iet import FindNodes, Section, retrieve_iteration_tree
from examples.seismic import TimeAxis, RickerSource, Receiver
class TestCodeGeneration(object):
def test_basic(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
op = Operator(Eq(u.forward, u + 1), platform='nvidiaX', language='openacc')
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
assert trees[0][1].pragmas[0].value ==\
'acc parallel loop collapse(3) present(u)'
assert op.body[1].header[0].value ==\
('acc enter data copyin(u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
assert str(op.body[1].footer[0]) == ''
assert op.body[1].footer[1].contents[0].value ==\
('acc exit data copyout(u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
assert op.body[1].footer[1].contents[1].value ==\
('acc exit data delete(u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
def test_streaming_postponed_deletion(self):
grid = Grid(shape=(10, 10, 10))
u = TimeFunction(name='u', grid=grid)
v = TimeFunction(name='v', grid=grid)
usave = TimeFunction(name='usave', grid=grid, save=10)
eqns = [Eq(u.forward, u + usave),
Eq(v.forward, v + u.forward.dx + usave)]
op = Operator(eqns, platform='nvidiaX', language='openacc',
opt=('streaming', 'orchestrate'))
sections = FindNodes(Section).visit(op)
assert len(sections) == 2
assert str(sections[1].body[0].body[0].footer[1]) ==\
('#pragma acc exit data delete(usave[time:1][0:usave_vec->size[1]]'
'[0:usave_vec->size[2]][0:usave_vec->size[3]])')
def test_streaming_with_host_loop(self):
grid = Grid(shape=(10, 10, 10))
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, save=10)
eqns = [Eq(f, u),
Eq(u.forward, f + 1)]
op = Operator(eqns, platform='nvidiaX', language='openacc',
opt=('streaming', 'orchestrate'))
# Check generated code
assert len(op._func_table) == 2
assert 'init_device0' in op._func_table
assert 'prefetch_host_to_device0' in op._func_table
sections = FindNodes(Section).visit(op)
assert len(sections) == 2
s = sections[0].body[0].body[0]
assert str(s.body[3].footer[1]) == ('#pragma acc exit data delete'
'(u[time:1][0:u_vec->size[1]][0:u_vec'
'->size[2]][0:u_vec->size[3]])')
assert str(s.body[2]) == ('#pragma acc data present(u[time:1][0:u_vec->'
'size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
trees = retrieve_iteration_tree(op)
assert len(trees) == 3
assert 'present(f)' in str(trees[0][1].pragmas[0])
class TestOperator(object):
@skipif('nodevice')
def test_op_apply(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid, dtype=np.int32)
op = Operator(Eq(u.forward, u + 1))
# Make sure we've indeed generated OpenACC code
assert 'acc parallel' in str(op)
time_steps = 1000
op.apply(time_M=time_steps)
assert np.all(np.array(u.data[0, :, :, :]) == time_steps)
@skipif('nodevice')
def test_iso_ac(self):
shape = (101, 101)
extent = (1000, 1000)
origin = (0., 0.)
v = np.empty(shape, dtype=np.float32)
v[:, :51] = 1.5
v[:, 51:] = 2.5
grid = Grid(shape=shape, extent=extent, origin=origin)
t0 = 0.
tn = 1000.
dt = 1.6
time_range = TimeAxis(start=t0, stop=tn, step=dt)
f0 = 0.010
src = RickerSource(name='src', grid=grid, f0=f0,
npoint=1, time_range=time_range)
domain_size = np.array(extent)
src.coordinates.data[0, :] = domain_size*.5
src.coordinates.data[0, -1] = 20.
rec = Receiver(name='rec', grid=grid, npoint=101, time_range=time_range)
rec.coordinates.data[:, 0] = np.linspace(0, domain_size[0], num=101)
rec.coordinates.data[:, 1] = 20.
u = TimeFunction(name="u", grid=grid, time_order=2, space_order=2)
m = Function(name='m', grid=grid)
m.data[:] = 1./(v*v)
pde = m * u.dt2 - u.laplace
stencil = Eq(u.forward, solve(pde, u.forward))
src_term = src.inject(field=u.forward, expr=src * dt**2 / m)
rec_term = rec.interpolate(expr=u.forward)
op = Operator([stencil] + src_term + rec_term)
# Make sure we've indeed generated OpenACC code
assert 'acc parallel' in str(op)
op(time=time_range.num-1, dt=dt)
assert np.isclose(norm(rec), 490.56, atol=1e-2, rtol=0)
class TestMPI(object):
@skipif('nodevice')
@pytest.mark.parallel(mode=2)
def test_basic(self):
grid = Grid(shape=(6, 6))
x, y = grid.dimensions
t = grid.stepping_dim
u = TimeFunction(name='u', grid=grid, space_order=2)
u.data[:] = 1.
expr = u[t, x, y-1] + u[t, x-1, y] + u[t, x, y] + u[t, x, y+1] + u[t, x+1, y]
op = Operator(Eq(u.forward, expr), platform='nvidiaX', language='openacc')
# Make sure we've indeed generated OpenACC+MPI code
assert 'acc parallel' in str(op)
assert len(op._func_table) == 4
op(time_M=1)
glb_pos_map = grid.distributor.glb_pos_map
if LEFT in glb_pos_map[x]:
assert np.all(u.data[0] == [[11., 16., 17., 17., 16., 11.],
[16., 23., 24., 24., 23., 16.],
[17., 24., 25., 25., 24., 17.]])
else:
assert np.all(u.data[0] == [[17., 24., 25., 25., 24., 17.],
[16., 23., 24., 24., 23., 16.],
[11., 16., 17., 17., 16., 11.]])
@skipif('nodevice')
@pytest.mark.parallel(mode=2)
def test_iso_ac(self):
TestOperator().test_iso_ac()
|
[
"[email protected]"
] | |
ac31523ba9787d027e63b488024b15c9e839e46c
|
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
|
/classes/_point12.py
|
3f67194c74253a3ea60ca8994c2d9259631a918f
|
[] |
no_license
|
obaica/xcp2k
|
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
|
6e15c2c95658f545102595dc1783f5e03a9e6916
|
refs/heads/master
| 2020-07-15T17:27:43.378835
| 2019-02-11T16:32:24
| 2019-02-11T16:32:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
from xcp2k.inputsection import InputSection
class _point12(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Type = None
self.Atoms = []
self.Weights = []
self.Xyz = None
self._name = "POINT"
self._keywords = {'Xyz': 'XYZ', 'Type': 'TYPE'}
self._repeated_keywords = {'Weights': 'WEIGHTS', 'Atoms': 'ATOMS'}
|
[
"[email protected]"
] | |
332064ba5922ff92b2319eb3b292136ddec583f8
|
3fcc7957ed103ead0db8d4e6020c52403559e63b
|
/1557.py
|
04bb7afeb9d7032e1dee17c65612b5604da1c506
|
[] |
no_license
|
gabrielreiss/URI
|
db3082bd89832bb4f45d2375db376454c2ff8f27
|
01bc927d1eee8eb16a16de786e981faa494088e8
|
refs/heads/master
| 2022-04-22T08:25:28.855996
| 2020-04-15T14:58:47
| 2020-04-15T14:58:47
| 255,950,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
cont = False
while cont == False:
n = int(input())
if n == 0:
cont = True
else:
m = []
w, h = n, n
m = [[0 for x in range(w)] for y in range(h)]
for i in range(0, n):
for j in range(0, n):
m[i][j] = 2 ** (i+j)
T = len(str(m[n-1][n-1]))
for i in range(n):
for j in range(n):
m[i][j] = str(m[i][j])
while len(m[i][j]) < T:
m[i][j] = ' ' + m[i][j]
M = ' '.join(m[i])
print(M)
print()
|
[
"[email protected]"
] | |
6c050c0d77f4e5d5ec77c6bef6bca2540f25d9b6
|
461052f4a7197db023ad3deb864bf1784fdd7854
|
/library/migrations/0003_auto_20200513_1625.py
|
451151225554e0605b2693ef162763660f71eb46
|
[
"MIT"
] |
permissive
|
ArRosid/training_drf
|
1660a08272c09302b39adc8e19e3674a78863685
|
4369c8113a67bb3f18b6890210902f09d617569f
|
refs/heads/master
| 2022-06-20T02:03:49.373355
| 2020-05-13T16:38:03
| 2020-05-13T16:38:03
| 263,639,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
# Generated by Django 3.0.6 on 2020-05-13 16:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('library', '0002_book_modified_by'),
]
operations = [
migrations.AddField(
model_name='book',
name='deleted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='book_deleted_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='book',
name='is_deleted',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='book',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='book_modified_by', to=settings.AUTH_USER_MODEL),
),
]
|
[
"[email protected]"
] | |
dd258e1388ef102e9d77f492101ef00bda3bda1f
|
0dc67428c50acf9dea7c17da9c603169a05e201c
|
/customer/urls.py
|
b58afe08ab753951ca3b7f89accd78318da1be54
|
[] |
no_license
|
sherrywilly/Razorpay
|
fe9a48ca9a9dd1d7d59ad959535e7ae2e6045305
|
3fe96ff7d6e988b3c276950e0615c0a4eeb1da8e
|
refs/heads/master
| 2023-07-02T11:37:55.951514
| 2021-07-29T04:36:19
| 2021-07-29T04:36:19
| 390,238,480
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
from django.urls import path
from .views import completed, create_contacts, create_fund_account, create_payout, index,payment, refund,verifyPayment
from .webhooks import VerifyPayHook
urlpatterns = [
path('',index,name="index"),
path('payment/continue/',payment,name="pay"),
path('handlerequest/',verifyPayment,name="verify"),
path('payment/<payid>/refund/',refund,name="refund"),
path('payments',completed),
# path('payment/refund/',refund,name="refund"),
path('payouts/<int:pk>/add_contact/',create_contacts,name="create"),
path('payouts/<int:id>/add_bank/',create_fund_account,name="create_bank"),
path('payouts/<int:id>/pay/',create_payout,name="create"),
# path('payouts/<int:id>/pay/',create_payout,name="create"),
#####################!-------------- HOOK URLS ----------------##########################
path('hooks/verify/',VerifyPayHook.as_view()),
# path('hooks/verify/refund/',VerifyRefundHook.as_view())
]
|
[
"[email protected]"
] | |
d80456331b4a047786914c0b00ae1b4e517dc147
|
3f06e7ae747e935f7a2d1e1bae27a764c36a77d1
|
/day23.py
|
28136ee107dd5557680c2c853d2ec3f553c3faa0
|
[] |
no_license
|
mn113/adventofcode2016
|
94465f36c46e9aa21d879d82e043e1db8c55c9da
|
3a93b23519acbfe326b8bf7c056f1747bbea036a
|
refs/heads/master
| 2022-12-11T22:57:21.937221
| 2022-12-04T16:37:24
| 2022-12-04T16:37:24
| 75,545,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,572
|
py
|
#! /usr/bin/env python
# Find result of long-running instruction set
import sys
import time
def intOrRegisterValue(x):
if x in 'abcd':
return registers[x]
else:
return int(x)
registers = {x:0 for x in 'abcd'}
registers['a'] = 7 # Part 1
registers['a'] = 12 # Part 2
print "Start:" + str(registers)
with open('day23_input.txt') as fp:
lines = fp.readlines()
l = len(lines)
i = 0
while i < l:
line = lines[i]
sys.stdout.write(str(registers) + ' : ' + str(i) + ' : ' + line)
sys.stdout.write('\r')
sys.stdout.flush()
#print registers
#print i, '/', l, ':', line
words = line.split()
if words[0] == 'tgl':
toggleDist = intOrRegisterValue(words[1])
# Convert to integer:
j = i + toggleDist
if j < 0 or j >= l:
# Out of range, start next loop immediately
i = i + 1
continue
elif lines[j][:3] == 'inc': # Toggle inc to dec
lines[j] = 'dec' + lines[j][3:]
elif lines[j][:3] == 'dec': # Toggle dec to inc
lines[j] = 'inc' + lines[j][3:]
elif lines[j][:3] == 'tgl': # Toggle tgl to inc
lines[j] = 'inc' + lines[j][3:]
elif lines[j][:3] == 'jnz': # Toggle jnz to cpy
lines[j] = 'cpy' + lines[j][3:]
else:
# cpy doesn't change when toggled
pass
print "Altered", j, lines[j]
elif words[0] == 'inc':
reg = words[1]
registers[reg] = registers[reg] + 1
elif words[0] == 'dec':
reg = words[1]
registers[reg] = registers[reg] - 1
elif words[0] == 'cpy':
src = words[1]
dest = words[2]
# Copy register?
registers[dest] = intOrRegisterValue(src)
elif words[0] == 'jnz':
# Test:
to_test = intOrRegisterValue(words[1])
jumpval = intOrRegisterValue(words[2])
if to_test != 0:
i = i + jumpval
continue # start next loop immediately
elif words[0] == 'ADD':
times = 1
if len(words) > 3:
times = intOrRegisterValue(words[3])
registers[words[2]] = registers[words[2]] + times * registers[words[1]]
elif words[0] == 'ZER':
registers[words[1]] = 0
i = i + 1
time.sleep(0.0)
print "---"
print registers
|
[
"[email protected]"
] | |
43125388e7f13fb3f397da7be3da1133ae9fbb3d
|
0b01cb61a4ae4ae236a354cbfa23064e9057e434
|
/alipay/aop/api/response/KoubeiServindustryPortfolioDataCreateResponse.py
|
e5a08c2fa59d81fa90b88ce8d9d521a12247d995
|
[
"Apache-2.0"
] |
permissive
|
hipacloud/alipay-sdk-python-all
|
e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13
|
bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d
|
refs/heads/master
| 2022-11-14T11:12:24.441822
| 2020-07-14T03:12:15
| 2020-07-14T03:12:15
| 277,970,730
| 0
| 0
|
Apache-2.0
| 2020-07-08T02:33:15
| 2020-07-08T02:33:14
| null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiServindustryPortfolioDataCreateResponse(AlipayResponse):
def __init__(self):
super(KoubeiServindustryPortfolioDataCreateResponse, self).__init__()
self._portfolio_id = None
@property
def portfolio_id(self):
return self._portfolio_id
@portfolio_id.setter
def portfolio_id(self, value):
self._portfolio_id = value
def parse_response_content(self, response_content):
response = super(KoubeiServindustryPortfolioDataCreateResponse, self).parse_response_content(response_content)
if 'portfolio_id' in response:
self.portfolio_id = response['portfolio_id']
|
[
"[email protected]"
] | |
b9e6149164b87a1472585e824d937adcc220d393
|
c0cb1559188be071f1cd91d0adbad0ca850be0e1
|
/problème dame dans un échéquier.py
|
2dd481736794d0a2e70c1647a6960e629ebc9a32
|
[] |
no_license
|
mines-nancy-tcss5ac-2018/td1-cordel8u
|
d7299e661082d57ddf9ed2652c6ba345696d6641
|
a766d874042bae2394aa84fded2ff683647d6ea5
|
refs/heads/master
| 2020-03-31T22:59:50.329544
| 2018-10-11T18:46:45
| 2018-10-11T18:46:45
| 152,639,007
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
Python
| false
| false
| 2,590
|
py
|
from scipy import *
def case_libre(n,L,M):
#Prend en argument un tableau M correspondant à l'échéquier
#et une liste L correspondant au placement de la nouvelle dame
#il ressort un nouveau tableau où il est inscrit les cases
#où il est impossible de placer une nouvelle dame
S=array(zeros((n, n)))
for i in range (n):
for j in range (n):
S[i][j]=M[i][j]
for j in range(len(M)):
S [L[0]][j]=1
S [ j ][ L [1] ]=1
a=L[0]+1
b=L[1]+1
while a<n and b<n:
S[a][b]=1
a+=1
b+=1
a=L[0]+1
b=L[1]-1
while a<n and b>-1:
S[a][b]=1
a+=1
b-=1
a=L[0]-1
b=L[1]-1
while a>-1 and b>-1:
S[a][b]=1
a-=1
b-=1
a=L[0]-1
b=L[1]+1
while a>-1 and b<n:
S[a][b]=1
a-=1
b+=1
return(array(S))
def verif(M):
#vérifie si il reste des cases libre au placement d'une dame
z=False
for i in range (len(M)):
for j in range (len(M[i])):
if M[i][j]== 0:
z=True
return(z)
def indice(M):
#ressort l'indice d'une case libre au placement d'une dame
a=[-1,-1]
i=-1
while a==[-1,-1]:
i+=1
if 0 in M[i]:
K=list(M[i])
a=[i,K.index(0)]
return (a)
#M=array([[1,2,2],[1,4,0]])
#print(indice(M))
def iteration(d,n,L,N,compte):
#recherche les toutes les combinaisons possibles et
#ajoute plus 1 au compteur dès qu'il en trouve une
#fonction dont le fonctionnement est difficile à décrire mais je peux l'expliquer
#à l'oral son mécanisme grâce à des dessins
if d!=0 and verif(N[-1]):
L.append(indice(N[-1]))
N.append(case_libre(n,L[-1],N[-1]))
d-=1
return(iteration(d,n,L,N,compte))
if d==0:
compte+=1
a=L[-1]
del L[-1]
del N[-1]
N[-1][a[0]][a[1]]=1
d+=1
return(iteration(d,n,L,N,compte))
if d!=0 and not(verif(N[-1])):
if len(N)==1:
return(compte)
else:
a=L[-1]
del L[-1]
del N[-1]
N[-1][a[0]][a[1]]=1
d+=1
return(iteration(d,n,L,N,compte))
def solve(d,n):
compte=0
L=[]
N=[]
M=array(zeros((n, n)))
N.append(M)
return(iteration(d,n,L,N,compte))
print(solve(4,4))
|
[
"[email protected]"
] | |
b2262ac385c5fdf6442a2e8d4893d66427960a22
|
b54d6a18bc5e86462c1f085386bc48065db5851c
|
/RandLinkVelDist.py
|
e43b4b1d5dc7d36505295ad323282e22a34e50c3
|
[] |
no_license
|
zoshs2/Percolation_Seoul
|
5b5b8ebabe186fbc9e265fc190c3d0641e196517
|
69c0aa99d1f7a2fb9259681a1ed63794cbe5ea5c
|
refs/heads/main
| 2023-07-28T20:50:13.393765
| 2021-09-28T13:25:31
| 2021-09-28T13:25:31
| 390,687,544
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,764
|
py
|
import os
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from datetime import datetime
from statsmodels.nonparametric.kernel_regression import KernelReg
def RandLinkVelDist(date_dataset, sample=20, reg=False, time_step=5, savefig:'file_name'=False):
'''
Display the circadian velocity distribution of randomly-selected road samples.
'''
VEL_RESOLUTION = 5
timestep = int(time_step / VEL_RESOLUTION)
TIME = date_dataset.loc[0, ['PRCS_YEAR', 'PRCS_MON', 'PRCS_DAY', 'PRCS_HH', 'PRCS_MIN']].astype(np.int64).values
TIME = datetime(TIME[0], TIME[1], TIME[2], TIME[3], TIME[4])
filename_date = "s" + str(sample) + "_" + str(TIME.strftime("%Y%m%d"))
RandData = date_dataset[date_dataset['LINK_ID'].isin(np.random.choice(date_dataset['LINK_ID'].unique(), sample))].reset_index(drop=True)
TimeIdx = RandData.groupby(['PRCS_HH', 'PRCS_MIN'])['PRCS_SPD'].mean().index # mean() is just used to get a groupy time('Hour', 'Min') index.
time_xaxis = list(map(lambda x : str(format(x[0], '02d'))+':'+str(format(x[1], '02d')), TimeIdx))
time_xaxis = [datetime.strptime(i, '%H:%M') for i in time_xaxis]
RandIDs = RandData['LINK_ID'].unique()
fig = plt.figure(facecolor='w', figsize=(15, 8))
ax = plt.gca() # Get the Current Axes (GCA)
cmap = plt.get_cmap('gnuplot')
colors = [cmap(i) for i in np.linspace(0, 1, sample)]
for i, ID in enumerate(RandIDs):
RandOne = RandData[RandData['LINK_ID']==ID].sort_values(by=['PRCS_HH', 'PRCS_MIN'])
VelHist = RandOne['PRCS_SPD'].values
if reg is True:
VelShape = VelHist.shape[0]
kde = KernelReg(endog=VelHist, exog=np.arange(VelShape), var_type='c', bw=[5])
estimator = kde.fit(np.arange(VelShape))
estimator = np.reshape(estimator[0], VelShape)
plt.plot(time_xaxis, estimator, c=colors[i], label=str(ID))
continue
plt.plot(time_xaxis[::timestep], VelHist[::timestep], c=colors[i], label=str(ID))
fmt = mpl.dates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
fig.autofmt_xdate()
ax.set_ylabel('Velocity (km/h)', fontsize=18)
ax.set_xlabel('Time', fontsize=18)
if savefig is not False:
filename = savefig + "_RandLinkVelDist_" + filename_date
if reg is True:
filename = "(Reg)" + filename
with open(filename+'.txt', 'w') as f:
for ID in RandIDs:
f.write("{}\n".format(ID))
print(filename, ".txt saved on ", os.getcwd())
print(filename, ".png saved on ", os.getcwd())
plt.savefig(filename + ".png")
plt.show()
return
|
[
"[email protected]"
] | |
c91a7b5364ed05d94b915ad3edca42e51af1ea75
|
f11600b9a256bf6a2b584d127faddc27a0f0b474
|
/normal/662.py
|
df18e5f15146532ef8f12376b46b3043f70c7355
|
[] |
no_license
|
longhao54/leetcode
|
9c1f0ce4ca505ec33640dd9b334bae906acd2db5
|
d156c6a13c89727f80ed6244cae40574395ecf34
|
refs/heads/master
| 2022-10-24T07:40:47.242861
| 2022-10-20T08:50:52
| 2022-10-20T08:50:52
| 196,952,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
ans = 1
dp = [(root,1)]
while dp:
s, m = float('inf'), float('-inf')
l = len(dp)
for i in range(l):
t, i = dp.pop(0)
if t.left:
dp.append((t.left, i*2))
if t.right:
dp.append((t.right, i*2+1))
s = min(s, i)
m = max(m, i)
ans = max(m-s+1, ans)
return ans
|
[
"[email protected]"
] | |
3fd3878a08b3f0b3f00dac287d62c71984f01380
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_available_waf_rule_sets_result_py3.py
|
1d90cb1f1470bffbefbb643312ec48f97b2613b3
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707
| 2018-03-29T17:16:15
| 2018-03-29T17:16:15
| 21,287,134
| 1
| 3
|
MIT
| 2019-10-25T15:56:00
| 2014-06-27T19:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewayAvailableWafRuleSetsResult(Model):
"""Response for ApplicationGatewayAvailableWafRuleSets API service call.
:param value: The list of application gateway rule sets.
:type value:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayFirewallRuleSet]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationGatewayFirewallRuleSet]'},
}
def __init__(self, *, value=None, **kwargs) -> None:
super(ApplicationGatewayAvailableWafRuleSetsResult, self).__init__(**kwargs)
self.value = value
|
[
"[email protected]"
] | |
56052fc5690dc0fbd9529a96cbe1b602c35676a9
|
dfc827bf144be6edf735a8b59b000d8216e4bb00
|
/CODE/postprocessing/Thesis/GaussBump/SimpleRead.py
|
d19224edb9ae2717dba2faecbed8532dbb7825c9
|
[] |
no_license
|
jordanpitt3141/ALL
|
c5f55e2642d4c18b63b4226ddf7c8ca492c8163c
|
3f35c9d8e422e9088fe096a267efda2031ba0123
|
refs/heads/master
| 2020-07-12T16:26:59.684440
| 2019-05-08T04:12:26
| 2019-05-08T04:12:26
| 94,275,573
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,952
|
py
|
import csv
from numpy.linalg import norm
from scipy import *
import os
from pylab import plot, show, legend,xlim,ylim,savefig,title,xlabel,ylabel,clf, loglog
from numpy import ones
wdir = "/home/jp/Documents/PhD/project/data/ThesisRedo2019/DryForced/FEVM2NoRegTol/12/"
sdir = "/home/jp/Documents/PhD/project/master/FigureData/ThesisRedo/DryForced/FEVM2/Ex/"
if not os.path.exists(sdir):
os.makedirs(sdir)
ts = "10.0"
gap = 8
s = wdir + "outList"+ts+"s.txt"
with open(s,'r') as file1:
readfile = csv.reader(file1, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
b = []
w = []
h = []
u = []
G = []
x = []
j = -1
for row in readfile:
if (j >= 0):
#ASPECTRAT/constantmultiplier
x.append(float(row[0]))
h.append(float(row[1]))
G.append(float(row[2]))
u.append(float(row[3]))
b.append(float(row[4]))
w.append(float(row[5]))
j = j + 1
x = array(x[::gap])
b = array(b[::gap])
w = array(w[::gap])
h = array(h[::gap])
u = array(u[::gap])
G = array(G[::gap])
n = len(x)
s = sdir + "Stage"+ts+"s.dat"
with open(s,'w') as file1:
for i in range(n):
s ="%3.8f%5s%1.20f\n" %(x[i]," ",w[i])
file1.write(s)
s = sdir + "Bed"+ts+"s.dat"
with open(s,'w') as file1:
for i in range(n):
s ="%3.8f%5s%1.20f\n" %(x[i]," ",b[i])
file1.write(s)
s = sdir + "h"+ts+"s.dat"
with open(s,'w') as file1:
for i in range(n):
s ="%3.8f%5s%1.20f\n" %(x[i]," ",h[i])
file1.write(s)
s = sdir + "u"+ts+"s.dat"
with open(s,'w') as file1:
for i in range(n):
s ="%3.8f%5s%1.20f\n" %(x[i]," ",u[i])
file1.write(s)
s = sdir + "G"+ts+"s.dat"
with open(s,'w') as file1:
for i in range(n):
s ="%3.8f%5s%1.20f\n" %(x[i]," ",G[i])
file1.write(s)
|
[
"[email protected]"
] | |
cd388b1fa34c8b7c139387d2f9da86e2be08a184
|
bf9c1aa7ac16d467921affa7381dae301e0a1308
|
/apps/articles/urls.py
|
4ba8201ad1d278b04f4c849955da1484c39b3dd6
|
[] |
no_license
|
clincher/ecigar
|
ec12223bbbcad383e30ea588babee0a89b15db9d
|
f534bee7ede5c3af882792616c440c7736193fd0
|
refs/heads/master
| 2020-12-24T15:14:18.688748
| 2016-12-26T00:29:44
| 2016-12-26T00:29:44
| 2,352,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
from django.conf.urls.defaults import patterns, url
from django.views.generic import ListView, DetailView
from models import Article
urlpatterns = patterns('',
# Products
url(r'^stat.html$',
ListView.as_view(model=Article),
name='article_list'
),
url(r'^stat(?P<slug>[0-9A-Za-z-_.//]+).html$',
DetailView.as_view(model=Article),
name='article_detail'
),
)
|
[
"[email protected]"
] | |
d706cbc2c581af29582c417ee42d30c6d487eef0
|
ad715f9713dc5c6c570a5ac51a18b11932edf548
|
/tensorflow/lite/testing/op_tests/scatter_nd.py
|
8a365ae5b96365937c5c2c28468aa81e1870ed84
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
rockzhuang/tensorflow
|
f1f31bc8edfa402b748c500efb97473c001bac95
|
cb40c060b36c6a75edfefbc4e5fc7ee720273e13
|
refs/heads/master
| 2022-11-08T20:41:36.735747
| 2022-10-21T01:45:52
| 2022-10-21T01:45:52
| 161,580,587
| 27
| 11
|
Apache-2.0
| 2019-01-23T11:00:44
| 2018-12-13T03:47:28
|
C++
|
UTF-8
|
Python
| false
| false
| 2,856
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for scatter_nd."""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_scatter_nd_tests(options):
"""Make a set of tests to do scatter_nd."""
test_parameters = [{
"indices_dtype": [tf.int32],
"indices_shape": [[4, 1]],
"indices_value": [[[4], [3], [1], [7]]],
"updates_dtype": [tf.int32, tf.int64, tf.float32, tf.bool],
"updates_shape": [[4]],
"shape_dtype": [tf.int32],
"shape_shape": [[1]],
"shape_value": [[8]]
}, {
"indices_dtype": [tf.int32],
"indices_shape": [[4, 2]],
"indices_value": [[[0, 0], [1, 0], [0, 2], [1, 2]]],
"updates_dtype": [tf.int32, tf.int64, tf.float32, tf.bool],
"updates_shape": [[4, 5]],
"shape_dtype": [tf.int32],
"shape_shape": [[3]],
"shape_value": [[2, 3, 5]]
}]
def build_graph(parameters):
"""Build the scatter_nd op testing graph."""
indices = tf.compat.v1.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
updates = tf.compat.v1.placeholder(
dtype=parameters["updates_dtype"],
name="updates",
shape=parameters["updates_shape"])
shape = tf.compat.v1.placeholder(
dtype=parameters["shape_dtype"],
name="shape",
shape=parameters["shape_shape"])
out = tf.scatter_nd(indices, updates, shape)
return [indices, updates, shape], [out]
def build_inputs(parameters, sess, inputs, outputs):
indices = np.array(parameters["indices_value"])
updates = create_tensor_data(parameters["updates_dtype"],
parameters["updates_shape"])
shape = np.array(parameters["shape_value"])
return [indices, updates, shape], sess.run(
outputs, feed_dict=dict(zip(inputs, [indices, updates, shape])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
[
"[email protected]"
] | |
6c708d71414961bfd27dd63946aaa70d181350d5
|
6dc463ce97fc275787cfdef563317f3f7e4f5fcf
|
/radio_table_widget_app/widgets.py
|
557a613116b686330885748746143cf0bdc904d1
|
[] |
no_license
|
chapkovski/table_radio_widget
|
7ea7506d801213cb24a832096fbf88ab7eb89c92
|
320a2b2f5462c6abe8bd0a355b1b4ac8defe3adf
|
refs/heads/master
| 2020-03-22T09:29:23.298900
| 2018-07-06T17:24:23
| 2018-07-06T17:24:23
| 139,840,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
from django.forms import RadioSelect
class TableRadio(RadioSelect):
template_name = 'widgets/multiple_input.html'
option_template_name = 'widgets/input_option.html'
def __init__(self, top_row=None, bottom_row=None, attrs=None, choices=(), ):
self.top_row = top_row
self.bottom_row = bottom_row
return super().__init__(attrs, choices)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['top_row'] = self.top_row
context['bottom_row'] = self.bottom_row
context['col_width'] = 100 / len(self.choices)
return context
|
[
"[email protected]"
] | |
8e22d1ea23f7ca524327b2070d521659d9c3922e
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/3gziWsCxqGwGGZmr5_11.py
|
6392e84ea4d1c88aaa1154379e9f4d945b640bab
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
"""
Given two integers as arguments, create a function that finds the largest
prime within the range of the two integers.
### Examples
fat_prime(2, 10) ➞ 7
# range [2, 3, 4, 5, 6, 7, 8, 9, 10] and the largest prime is 7.
fat_prime(10, 2) ➞ 7
# [10, 9, 8, 7, 6, 5, 4, 3, 2] and the largest prime is 7.
fat_prime(4, 24) ➞ 23
# range [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] the largest prime is 23.
### Notes
All numbers will be positive integers.
"""
def fat_prime(a, b):
y = min(a,b)
z = max(a,b)
for i in range(z,y,-1):
if is_prime(i):
return i
return None
def is_prime(n):
for i in range(2,n):
if n % i == 0:
return False
return True
|
[
"[email protected]"
] | |
20a405147dc239db1af8b180b78f4310c43f38b0
|
ae66ad38a7b19c01f1099d671dd127716a5d4c34
|
/accounts/migrations/0025_auto_20180511_1233.py
|
e1d9e06c37affbd3e572a0f042dd681de84ec054
|
[] |
no_license
|
selbieh/django-freelacer-website
|
6fd1eb009e9b30738bfa59fa78f530144b273231
|
0971a7fc3dc7e63a1909bb6adf3a84d7d9083324
|
refs/heads/master
| 2022-11-22T19:07:48.470928
| 2019-11-24T12:24:26
| 2019-11-24T12:24:26
| 172,359,908
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
# Generated by Django 2.0.4 on 2018-05-11 10:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0024_auto_20180511_1229'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='pic',
field=models.ImageField(upload_to='profile/profile_pic'),
),
migrations.AlterField(
model_name='userprofile',
name='resume',
field=models.FileField(upload_to='profile/resume'),
),
]
|
[
"[email protected]"
] | |
02572ac0d7a899647d2e88f1a95a0f55337c7e01
|
fc1c1e88a191b47f745625688d33555901fd8e9a
|
/meraki/models/protocol_4_enum.py
|
a5a84ca844f12ecbee618d6942e1886545423e86
|
[
"MIT",
"Python-2.0"
] |
permissive
|
RaulCatalano/meraki-python-sdk
|
9161673cfd715d147e0a6ddb556d9c9913e06580
|
9894089eb013318243ae48869cc5130eb37f80c0
|
refs/heads/master
| 2022-04-02T08:36:03.907147
| 2020-02-03T19:24:04
| 2020-02-03T19:24:04
| 416,889,849
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
# -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Protocol4Enum(object):
"""Implementation of the 'Protocol4' enum.
The protocol of the incoming packet. Can be one of "ANY", "TCP" or "UDP".
Default value is "ANY"
Attributes:
ANY: TODO: type description here.
TCP: TODO: type description here.
UDP: TODO: type description here.
"""
ANY = 'ANY'
TCP = 'TCP'
UDP = 'UDP'
|
[
"[email protected]"
] | |
651b60b515fe7843967505febf81ecf3864711a5
|
a7f39db24ce26ab0f02650ffd97007222aa536c5
|
/so.guishiwen.org_shiwen.py
|
1c43a5299d50000c95b371c9ff9420f1d01ebc75
|
[] |
no_license
|
hpifu/py-ancient
|
6f51067c4c6ef1adb8241994e03dccb29e35b501
|
a845e86057432a39f8239263aa7bf0e97c3f4c76
|
refs/heads/master
| 2022-12-13T00:40:09.735917
| 2019-10-27T05:21:52
| 2019-10-27T05:21:52
| 204,660,319
| 0
| 0
| null | 2022-07-06T20:16:22
| 2019-08-27T08:48:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
#!/usr/bin/env python3
import requests
from pyquery import PyQuery as pq
www = "https://so.gushiwen.org/shiwen"
def getPage(url):
res = requests.get(
url,
headers={
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36",
}
)
return res.text
def analyst(text):
d = pq(text)
shis = []
sons = d("div.sons")
for son in sons.items():
name = son("p b").text()
if not name:
continue
infos = list(son("p.source a").items())
dynasty = infos[0].text()
author = infos[1].text()
content = son("div.contson").text()
tags = son("div.tag a").text()
shis.append({
"name": name,
"dynasty": dynasty,
"author": author,
"tags": tags,
"content": content,
})
next = d("div.pagesright a.amore").attr("href")
return shis, next
def main():
print(analyst(getPage(www+"/default_4A111111111111A1.aspx")))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
db32253902147d6de63a312faa4dc7a41e150337
|
c016088a3bdb255d4f5253185d27b5a4c75feb1b
|
/11_testing_your_code/11_3/employee.py
|
919260baf759a0a8360fcd951f4ce7399a8e2888
|
[
"MIT"
] |
permissive
|
simonhoch/python_basics
|
b0b7c37ff647b653bb4c16a116e5521fc6b438b6
|
4ecf12c074e641e3cdeb0a6690846eb9133f96af
|
refs/heads/master
| 2021-04-03T10:11:10.660454
| 2018-03-13T20:04:46
| 2018-03-13T20:26:25
| 125,107,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
class Employee ():
"""Simple attempt to describe an employee"""
def __init__(self, first_name, last_name, annual_salary):
"""Initialization of the employee class"""
self.first_name = first_name.title()
self.last_name = last_name.title()
self.annual_salary = annual_salary
def give_a_raise(self, salary_raise=5000):
"""Add a raise for an employee"""
self.annual_salary += salary_raise
def edit_informations(self):
"""Edit information of a salary"""
print(self.first_name + ', ' + self.last_name + ', salary: '
+ str(self.annual_salary))
|
[
"[email protected]"
] | |
c08a06098466014eebcd6ca0f27fc1259e9c1c1a
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/directconnect_write_f/virtual-interface_delete.py
|
80e3a4f24ad845af820b46f860e9d930d4b0b178
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
associate-virtual-interface : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/directconnect/associate-virtual-interface.html
describe-virtual-interfaces : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/directconnect/describe-virtual-interfaces.html
"""
write_parameter("directconnect", "delete-virtual-interface")
|
[
"[email protected]"
] | |
6e2d152884470b76fab1e4f8be0c8476ae6e0fb1
|
f29e8c30b9f7b66cb66bfb634608adec74a4aee0
|
/012desafio - ler preco, retorna desconto.py
|
f26b56ea286e0bc9a308999fc0202fc8be53aad4
|
[] |
no_license
|
brunoparodi/Curso-GUANABARA
|
acafe1390ccd2ba5648ca30f73f54b95a6c57201
|
16b7a293a54f1a471fa07830bc66709a88fceb79
|
refs/heads/master
| 2020-04-24T09:16:12.095977
| 2019-02-21T11:17:01
| 2019-02-21T11:17:01
| 171,857,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
preco = float(input('Informe o preço do produto: R$'))
desconto = preco * (5 / 100)
print('O preço com desconto é: R${:.2f}.'.format(preco - desconto))
|
[
"[email protected]"
] | |
faaabc87d530eda66341796909e94a28dc6d25c5
|
b306aab9dcea2dd83dda700bc9f7b9f1a32cff3a
|
/CAIL2020/cocr/det_infer.py
|
a89f3a16932ef293bd9a8018db8f313597098ffd
|
[
"Apache-2.0"
] |
permissive
|
Tulpen/CAIL
|
d6ca9981c7ea2603ae61675ba330a9614cd9398d
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
refs/heads/master
| 2023-04-23T20:07:56.774530
| 2021-04-16T13:18:36
| 2021-04-16T13:18:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,839
|
py
|
import argparse
import os
import sys
import pathlib
import torch
from torch import nn
from torchvision import transforms
from torchocr.networks import build_model
from torchocr.datasets.det_modules import ResizeShortSize
from torchocr.postprocess import build_post_process
import cv2
from matplotlib import pyplot as plt
from torchocr.utils import draw_ocr_box_txt, draw_bbox
class DetInfer:
def __init__(self, model_path):
ckpt = torch.load(model_path, map_location='cpu')
cfg = ckpt['cfg']
self.model = build_model(cfg.model)
state_dict = {}
for k, v in ckpt['state_dict'].items():
state_dict[k.replace('module.', '')] = v
self.model.load_state_dict(state_dict)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.model.to(self.device)
self.model.eval()
self.resize = ResizeShortSize(736, False)
self.post_proess = build_post_process(cfg.post_process)
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=cfg.dataset.train.dataset.mean, std=cfg.dataset.train.dataset.std)
])
def predict(self, img, is_output_polygon=False):
# 预处理根据训练来
data = {'img': img, 'shape': [img.shape[:2]], 'text_polys': []}
data = self.resize(data)
tensor = self.transform(data['img'])
tensor = tensor.unsqueeze(dim=0)
tensor = tensor.to(self.device)
out = self.model(tensor)
box_list, score_list = self.post_proess(out, data['shape'], is_output_polygon=is_output_polygon)
box_list, score_list = box_list[0], score_list[0]
if len(box_list) > 0:
idx = [x.sum() > 0 for x in box_list]
box_list = [box_list[i] for i, v in enumerate(idx) if v]
score_list = [score_list[i] for i, v in enumerate(idx) if v]
else:
box_list, score_list = [], []
return box_list, score_list
def init_args():
import argparse
parser = argparse.ArgumentParser(description='PytorchOCR infer')
parser.add_argument('--model_path', required=False, type=str, help='rec model path', default=r'F:\CAIL\CAIL2020\cocr\model\db_ResNet50_vd_icdar2015withconfig.pth')
parser.add_argument('--img_path', required=False, type=str, help='img path for predict', default=r'F:\CAIL\CAIL2020\cocr\data\icdar2015\detection\test\imgs\img_2.jpg')
args = parser.parse_args()
return args
def resize(img, scale_percent = 60):
scale_percent = 60 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
return resized
if __name__ == '__main__':
# ===> 获取配置文件参数
parser = argparse.ArgumentParser(description='train')
parser.add_argument('--config', type=str, default='config/det.json',
help='train config file path')
parser.add_argument('-m','--model_path', required=False, type=str, help='rec model path', default=r'F:\CAIL\CAIL2020\cocr\model\det-model.bin')
parser.add_argument('-i','--img_path', required=False, type=str, help='img path for predict', default=r'F:\CAIL\CAIL2020\cocr\data\t2\architecture (1).jpg')
args = parser.parse_args()
# for i in range(1,11):
img = cv2.imread(args.img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if img.shape[0] > 1500:
img = resize(img, img.shape[0]*100./1024)
model = DetInfer(args.model_path)
box_list, score_list = model.predict(img, is_output_polygon=True)
img = draw_ocr_box_txt(img, box_list)
img = draw_bbox(img, box_list)
plt.imshow(img)
plt.show()
|
[
"[email protected]"
] | |
4293b3acde9fd16c7d98f4e36d670978acca31a3
|
f73f5f5d0770f731b5e76da39131ff36c9fde11e
|
/django_libs/tests/models_tests.py
|
7c46c414daad0a285198d5f569b7e8cfa6ef2ad1
|
[
"MIT"
] |
permissive
|
SurferTank/django-libs
|
fcede8d7dff4ea58c728d05ff0030a3ce892a08e
|
6ad3f7cf5f9a7a4848557d73af4a93054b34e27f
|
refs/heads/master
| 2021-02-09T01:28:32.153104
| 2020-10-26T03:11:23
| 2020-10-26T03:11:23
| 244,222,230
| 0
| 0
|
MIT
| 2020-03-01T20:55:22
| 2020-03-01T20:55:21
| null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
"""Tests for the models of the ``django_libs`` app."""
from django.test import TestCase
from ..models import ColorField
from ..widgets import ColorPickerWidget
class ColorFieldTestCase(TestCase):
"""Tests for the ``ColorField`` model."""
longMessage = True
def test_functions(self):
color_field = ColorField()
color_field.formfield
self.assertIsInstance(
color_field.formfield().widget, ColorPickerWidget, msg=(
'Should add the color field widget.'))
|
[
"[email protected]"
] | |
4467952bc3588edaf21b854d2cb536eb9a03be12
|
03034837c5f10d19fcc4dc51388f056ec43fd1d2
|
/pro21.py
|
a5fa31a3792fe95d280dc9afd594c679ff30987a
|
[] |
no_license
|
shaukhk01/project01
|
e95c19844757c631f7ffbdd910b20316f49a945b
|
79cfe784612fdbb4816c9fc3fc7222c845a3268f
|
refs/heads/master
| 2020-06-26T07:20:49.844532
| 2019-08-20T06:06:40
| 2019-08-20T06:06:40
| 199,569,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
import re
def main():
matched = re.finditer('a{2}','abcdeaakkkaa')
for m in matched:
print(m.start(),'--',m.group())
main()
|
[
"[email protected]"
] | |
175b16aa461473aa8fbeb39f96459c4ddc826859
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02937/s405333986.py
|
77f923e46db0a03de4ebb7d4023a7c9648601069
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
s=list(input())
t=list(input())
ns=len(s)
nt=len(t)
t_set=list(set(t))
s_set=list(set(s))
for i in range(len(t_set)):
if t_set[i] not in s_set:
print(-1)
exit()
from collections import defaultdict, deque
from bisect import bisect_right
ds=defaultdict(list)
for i in range(ns):
ds[s[i]].append(i)
components=[0]*26
for i in range(26):
components[i]=len(ds[chr(i+97)])
lt=[-1]*nt
ord('a')
last=-1
for i in range(nt):
j=bisect_right(ds[t[i]],last)
if j==components[ord(t[i])-97]:
lt[i]=ds[t[i]][0]
else:
lt[i]=ds[t[i]][j]
last=lt[i]
kuriage=0
for i in range(1,nt):
if lt[i]<=lt[i-1]:
kuriage+=1
print(kuriage*ns+lt[-1]+1)
|
[
"[email protected]"
] | |
4ad57e5623a534930577b2344a2f132f793c8bb5
|
7dc295d045982180f89e2bca204148c715dcdd8c
|
/using_context/using_redirection.py
|
b3b04bf12790a109758b99bd39c6bd769572946a
|
[] |
no_license
|
onionmccabbage/AdvancedPythonMay2021
|
0c582e2502672c5d0974a46da1f689ac44c41728
|
5194fb191f8d01521d54d6867084ae6845a3726c
|
refs/heads/main
| 2023-04-20T07:32:29.119310
| 2021-05-13T14:53:01
| 2021-05-13T14:53:01
| 366,090,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
# we can write a class to redirect the context (simple solution)
import sys # sys is in control of inputs and outputs
class Redirect:
'''
Provide an easy way to redirect the standard output
(which defaults to printing to the console)
'''
def __init__(self, new_stdout):
self.new_stdout = new_stdout
# we override __enter__ and __exit__
def __enter__(self):
'''implement a redirection'''
#store the current stdout
self.save_stdout = sys.stdout
#set a new stdout
sys.stdout = self.new_stdout # we have redefined a member of sys!!!!
def __exit__(self, exc_type, exc_value, exc_traceback):
'''restore the original stdout'''
sys.stdout = self.save_stdout
if __name__ == '__main__':
# print(sys.stdout)
# make use of our redicetion class
with open('mylog.txt', 'a') as fobj: # open a file access object
with Redirect(fobj):
print('this gets printed to our log file') # look - no file reference
print('this will print to the console') # back to stdout default
|
[
"[email protected]"
] | |
ba5bf4df83c1a5e401c6ac4d470108fae419940f
|
25bb4e760769cc483a20f27b6312698891dce034
|
/python/Closures and Decorators/decorators-2-name-directory-English.py
|
529c7f5310dd4be88cfeca5669d97fbd3c92bd2b
|
[] |
no_license
|
rangaeeeee/codes-hackerrank
|
e13d22adff1ef74974e34251d9bfac6cfd36f2b0
|
ce7fdf7f336c10164fd2f779d4ed3713849d7c2b
|
refs/heads/master
| 2021-01-19T17:07:28.451983
| 2017-09-01T18:05:33
| 2017-09-01T18:05:33
| 101,049,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
from operator import itemgetter
def person_lister(f):
def inner(people):
people=sorted(people,key=lambda x: int(x[2]))
return [f(p) for p in people]
return inner
|
[
"[email protected]"
] | |
26b18e37eff8d9418bc37752e4f8fe2f947df0b1
|
07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8
|
/lib/python3.6/site-packages/tensorflow/contrib/predictor/contrib_estimator_predictor.py
|
afeb0cc8d0fc8739a534d1ebdf77758c20ae8948
|
[] |
no_license
|
cronos91/ML-exercise
|
39c5cd7f94bb90c57450f9a85d40c2f014900ea4
|
3b7afeeb6a7c87384049a9b87cac1fe4c294e415
|
refs/heads/master
| 2021-05-09T22:02:55.131977
| 2017-12-14T13:50:44
| 2017-12-14T13:50:44
| 118,736,043
| 0
| 0
| null | 2018-01-24T08:30:23
| 2018-01-24T08:30:22
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f616a461aa1df558fae47ff2bc5c7d16ceb00620172d2724deccf893b3da6f46
size 3152
|
[
"[email protected]"
] | |
b327b6904a68a6fac9133923566f52491e3e7255
|
96db160b6075e49101686eb4947fefb2e0909985
|
/Store/views.py
|
3cba59847230429be847c64618fcdb291698a251
|
[] |
no_license
|
hdforoozan/Restaurant
|
7c43b1c89e8edc504a27dac2515313b979069c88
|
d9420dc5dcd42bcb6c5952474ef996845ec4381c
|
refs/heads/master
| 2022-12-09T13:38:57.970747
| 2019-09-29T20:45:10
| 2019-09-29T20:45:10
| 208,814,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,784
|
py
|
from datetime import datetime
from django.shortcuts import render
from .models import Store, Employee, Manager
from Food.models import Food
from django.urls import reverse_lazy
from django.views.generic import TemplateView,DetailView,ListView, CreateView,DeleteView,UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from Cart.forms import CartAddFoodForm
from Order.models import Order
from Comment.forms import CommentForm
from Comment.models import Comment
from Food.forms import SearchForm
class HomePageView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['most_sell_foods'] = Food.objects.filter(name__icontains='p')
context['cheapest_foods'] = Food.objects.filter(price__lte=10)
context['search_form'] = SearchForm()
return context
##############################################################
# Store Model Views
##############################################################
class StoreListView(LoginRequiredMixin, ListView):
model = Store
context_object_name = 'stores'
class StoreDetailView(LoginRequiredMixin, DetailView):
model = Store
context_object_name = 'store'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
store = Store.objects.get(id=self.kwargs['pk'])
context['foods'] = Food.objects.filter(stores=store).filter(run_out=False)
context['employees'] = Employee.objects.filter(store__id=self.kwargs['pk'])
paid_orders = Order.objects.filter(paid=True)
monthly_income = 0
for item in paid_orders:
if item.store_id == self.kwargs['pk']:
monthly_income += item.get_total_cost()
context['monthly_income'] = monthly_income
return context
class StoreCreateView(LoginRequiredMixin, CreateView):
model = Store
fields = ['user','manager','foods','branch_num','image','pub_date','address']
class StoreUpdateView(LoginRequiredMixin, UpdateView):
model = Store
fields = ['manager','foods','branch_num','image','address']
context_object_name = 'store'
template_name = 'Store/store_update_form.html'
class StoreDeleteView(LoginRequiredMixin, DeleteView):
model = Store
success_url = reverse_lazy('store-list')
context_object_name = 'store'
class StoreFoodDetailView(LoginRequiredMixin, DetailView):
model = Store
context_object_name = 'store'
template_name = 'Store/store_food_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
store = Store.objects.get(id=self.kwargs['pk'])
food = Food.objects.filter(stores=store).get(id=self.kwargs['food_id'])
context['food'] = food
context['cart_food_form'] = CartAddFoodForm()
context['comment_form'] = CommentForm()
comments = Comment.objects.filter(food=food)[:5]
comment_times = []
now = datetime.now()
date_format = "%Y-%m-%d %H:%M:%S"
time1 = now.strftime("%Y-%m-%d %H:%M:%S")
time_now = datetime.strptime(time1,date_format)
for comment in comments:
time2 = comment.created.strftime("%Y-%m-%d %H:%M:%S")
time_2 = now.strptime(time2,date_format)
diff_time = time_now - time_2
if diff_time.days > 0:
weeks = int(diff_time.days / 7)
months = int(diff_time.days / 30)
if months > 0:
comment_times.append('{} months ago'.format(months))
else:
if weeks > 0:
comment_times.append('{} weeks ago'.format(weeks))
else:
comment_times.append('{} days ago'.format(diff_time.days))
else:
hours = int(diff_time.seconds / (3600))
if hours > 0:
comment_times.append('{} hours ago'.format(hours))
else:
minutes = int((diff_time.seconds % 3600) / 60)
if minutes > 0:
comment_times.append('{} minutes ago'.format(minutes))
else:
comment_times.append('just now')
food_comments = zip(comments,comment_times)
context['food_comments'] = food_comments
self.request.session['store_id'] = store.id
return context
##############################################################
# Manager Model Views
###############################################################
class ManagerDetailView(LoginRequiredMixin, DetailView):
model = Manager
context_object_name = 'manager'
class ManagerUpdateView(LoginRequiredMixin, UpdateView):
model = Manager
fields = ['name','address','phone_num','education_degree','image']
context_object_name = 'manager'
template_name = 'Store/manager_update_form.html'
class ManagerDeleteView(LoginRequiredMixin, DeleteView):
model = Manager
success_url = reverse_lazy('store-list')
context_object_name = 'manager'
##############################################################
# Employee Model Views
###############################################################
class EmployeeDetailView(LoginRequiredMixin, DetailView):
model = Employee
context_object_name = 'employee'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
store_employees = Employee.objects.filter(store_id=self.kwargs['pk'])
employee = Employee.objects.get(id=self.kwargs['employee_id'])
if employee in store_employees:
context['employee'] = employee
else:
context['employee'] = None
return context
class EmployeeCreateView(LoginRequiredMixin, CreateView):
model = Employee
fields = ['store','name','address','phone_num','pub_date','image','position','education_degree','monthly_salary']
class EmployeeUpdateView(LoginRequiredMixin, UpdateView):
model = Employee
fields = ['name','address','phone_num','image','education_degree','position']
context_object_name = 'employee'
template_name = 'Store/employee_update_form.html'
class EmployeeDeleteView(LoginRequiredMixin, DeleteView):
model = Employee
success_url = reverse_lazy('store-detail')
context_object_name = 'employee'
|
[
"[email protected]"
] | |
f8880d12b3954bf1f29a84e2fa0adf8ba9e779d6
|
52a4d869976a97498bdf56a8d0ff92cac138a136
|
/Bioinformatics Textbook Track/Chapter 2/rosalind_ba2d.py
|
238db719734990db59ec3be92ca629ff672af9ea
|
[] |
no_license
|
aakibinesar/Rosalind
|
d726369a787d848cc378976b886189978a60a3a5
|
375bbdbfb16bf11b2f980701bbd0ba74a1605cdb
|
refs/heads/master
| 2022-08-18T09:36:00.941080
| 2020-05-24T18:49:38
| 2020-05-24T18:49:38
| 264,722,651
| 0
| 0
| null | 2020-05-17T17:51:03
| 2020-05-17T17:40:59
| null |
UTF-8
|
Python
| false
| false
| 1,561
|
py
|
def greedymotifsearch(dna,k,t):
best = [s[:k] for s in dna]
for i in range(len(dna[0])-k+1):
tempbest = [dna[0][i:i+k]]
for m in range(1,t):
matrix = motifsToProfile(tempbest)
tempbest.append(profileMostProbablekmer(dna[m],k,matrix))
if score(tempbest) < score(best):
best = tempbest
return best
def score(motifs):
z = zip(*motifs)
thescore = 0
for string in z:
score = len(string) - max([string.count('A'), string.count('C'), string.count('G'), string.count('T')])
thescore += score
return thescore
def motifsToProfile(motifs):
d = {}
n = float(len(motifs))
z = list(zip(*motifs))
for i in range(len(z)):
d.setdefault('A', []).append(z[i].count('A')/n)
d.setdefault('C', []).append(z[i].count('C')/n)
d.setdefault('G', []).append(z[i].count('G')/n)
d.setdefault('T', []).append(z[i].count('T')/n)
return d
def profileMostProbablekmer(text, k , matrix):
maxp = None
probablekmer = None
for i in range(len(text)-k+1):
kmer = text[i:i+k]
pt = 1
for j in range(k):
p = matrix[kmer[j]][j]
pt *=p
if maxp == None or pt > maxp:
maxp = pt
probablekmer = kmer
return probablekmer
with open('rosalind_ba2d.txt') as f:
k,t = map(int,f.readline().rstrip().split(' '))
strings = [st.rstrip() for st in f.readlines()]
print('\n'.join(greedymotifsearch(strings,k,t))) # bug: may be wrong , try several times
|
[
"[email protected]"
] | |
8d1dcda3139a9d6e5d1dcd75a2e85017e18a0a4a
|
78c3082e9082b5b50435805723ae00a58ca88e30
|
/03.AI알고리즘 소스코드/venv/Lib/site-packages/caffe2/python/operator_test/flatten_op_test.py
|
ba5fce81296a516900f9cabf049c0c697338ce54
|
[] |
no_license
|
jinStar-kimmy/algorithm
|
26c1bc456d5319578110f3d56f8bd19122356603
|
59ae8afd8d133f59a6b8d8cee76790fd9dfe1ff7
|
refs/heads/master
| 2023-08-28T13:16:45.690232
| 2021-10-20T08:23:46
| 2021-10-20T08:23:46
| 419,217,105
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
from hypothesis import given
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFlatten(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=2, max_dim=4),
**hu.gcs)
def test_flatten(self, X, gc, dc):
for axis in range(X.ndim + 1):
op = core.CreateOperator(
"Flatten",
["X"],
["Y"],
axis=axis)
def flatten_ref(X):
shape = X.shape
outer = np.prod(shape[:axis]).astype(int)
inner = np.prod(shape[axis:]).astype(int)
return np.copy(X).reshape(outer, inner),
self.assertReferenceChecks(gc, op, [X], flatten_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
[
"[email protected]"
] | |
0ad9c543040c66b73a4c0063a4834e93bf347cb7
|
19bcb4784f2ddda66d5ccf9eb268c45baf1f122c
|
/python/nn/results/get_results_aggr.py
|
21dc15f59a6a51107391466207eeb449a8b19102
|
[
"MIT"
] |
permissive
|
PeterJackNaylor/AutomaticWSI
|
bb76f9983479b1a1a6d7ad089eb9bb098da91136
|
a26f3d8efff005dcf2d1a14705785579ce5484c8
|
refs/heads/master
| 2023-09-04T09:12:48.946814
| 2023-08-30T09:24:17
| 2023-08-30T09:24:17
| 226,664,370
| 1
| 1
|
MIT
| 2020-03-19T10:49:47
| 2019-12-08T12:30:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
import os
from glob import glob
import pandas as pd
def get_options():
import argparse
parser = argparse.ArgumentParser(
description='takes a folder with ')
parser.add_argument('--path', required=True,
metavar="str", type=str,
help='folder where the result files can be found')
parser.add_argument('--name', required=True,
metavar="str", type=str,
help='name of the output')
args = parser.parse_args()
return args
def fres(st):
return st.split('at_res_')[1].split('___be')[0]
def fmodel(st):
return st.split('_for_')[0]
def fy(st):
return st.split('_for_')[1].split('_at_')[0]
def ftype(st):
return st.split('___best')[1]
def main():
options = get_options()
files = glob(os.path.join(options.path, '*best.csv'))
stats = pd.DataFrame()
for f in files:
table = pd.read_csv(f)
table = table.drop('Unnamed: 0', axis=1)
table['counts'] = table.shape[0]
table['mean'] = table.shape[0]
col = os.path.basename(f).split('.')[0]
stats[col + "mean"] = table.mean()
stats[col + "Std.Dev"] = table.std()
# stats[col + "Var"] = table.var()
stats = stats.T
stats['res'] = stats.apply(lambda x: fres(x.name), axis=1)
stats['model'] = stats.apply(lambda x: fmodel(x.name), axis=1)
stats['y'] = stats.apply(lambda x: fy(x.name), axis=1)
stats['type'] = stats.apply(lambda x: ftype(x.name), axis=1)
import pdb; pdb.set_trace()
stats = stats.set_index(['y', 'model', 'res', 'type'])
stats.to_csv(options.name)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
17fef6c5d241acb0b7bb102fad34566c88da3eff
|
ce5ce3764e75774c0b7eab47893987b9f311b1b9
|
/.history/moments/views_20210527215915.py
|
aeb3ce256b14aff202e4f75fe445d9d3152b2c1b
|
[] |
no_license
|
iSecloud/django-wechat
|
410fb8a23b50dc2343c2f0167bbae560bf6e9694
|
faaff9bb7f9454a63b2f8411d3577169b470baad
|
refs/heads/main
| 2023-05-15T06:53:16.252422
| 2021-06-07T14:00:35
| 2021-06-07T14:00:35
| 374,685,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'homepage.html')
|
[
"[email protected]"
] | |
a161fd86ce0916197d8943f40b551acd0ba600bc
|
50f0d33b12778f911fe16a4e18d0659936b9086b
|
/0x05-python-exceptions/4-list_division.py
|
e67e5211367f6871f31a26fa72ddb8ede0d0caa0
|
[] |
no_license
|
monicajoa/holbertonschool-higher_level_programming
|
4f4eaa7aa2cad1642e7aed54663cb30eb92e1b4f
|
451d20174144ad96fa726a4389c7aae72abf2495
|
refs/heads/master
| 2022-12-18T00:35:00.682624
| 2020-09-25T05:14:57
| 2020-09-25T05:14:57
| 259,479,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
#!/usr/bin/python3
def list_division(my_list_1, my_list_2, list_length):
list_new = []
result = 0
for i in range(list_length):
try:
result = my_list_1[i] / my_list_2[i]
except ZeroDivisionError:
result = 0
print("division by 0")
except IndexError:
result = 0
print("out of range")
except TypeError:
result = 0
print("wrong type")
finally:
list_new.append(result)
return (list_new)
|
[
"[email protected]"
] | |
4f8360cb9656c65b9ab0af1060d4f523bca4959f
|
6a95b330e1beec08b917ff45eccfd6be3fd4629f
|
/kubernetes/client/models/v1_namespace_status.py
|
523e7e43d3f44960d90c934e8371361de7fc1cc0
|
[
"Apache-2.0"
] |
permissive
|
TokkoLabs/client-python
|
f4a83d6540e64861b59e322c951380a670578d7f
|
f1ad9c6889105d8510472606c98f8d3807f82020
|
refs/heads/master
| 2023-07-14T01:36:46.152341
| 2017-12-21T21:32:11
| 2017-12-21T21:32:11
| 115,042,671
| 0
| 0
|
Apache-2.0
| 2021-08-06T03:29:17
| 2017-12-21T20:05:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,306
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NamespaceStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'phase': 'str'
}
attribute_map = {
'phase': 'phase'
}
def __init__(self, phase=None):
"""
V1NamespaceStatus - a model defined in Swagger
"""
self._phase = None
self.discriminator = None
if phase is not None:
self.phase = phase
@property
def phase(self):
"""
Gets the phase of this V1NamespaceStatus.
Phase is the current lifecycle phase of the namespace. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases
:return: The phase of this V1NamespaceStatus.
:rtype: str
"""
return self._phase
@phase.setter
def phase(self, phase):
"""
Sets the phase of this V1NamespaceStatus.
Phase is the current lifecycle phase of the namespace. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases
:param phase: The phase of this V1NamespaceStatus.
:type: str
"""
self._phase = phase
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1NamespaceStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
32b8393a60a17cb8d5d3a614d581aae9fcb466f1
|
a7f16c95f973905e880ad4dc277fbba890486654
|
/wildlifecompliance/migrations/0283_auto_20190814_1036.py
|
dcfe076119285e3560ffaf749bd7547dd1ce0fd5
|
[
"Apache-2.0"
] |
permissive
|
dbca-wa/wildlifecompliance
|
9e98e9c093aeb25dbb7ff8d107be47e29bcd05e1
|
cb12ad9ea1171f10b5297cdb7e1eb6ea484e633d
|
refs/heads/master
| 2023-08-08T14:37:05.824428
| 2023-07-31T02:57:23
| 2023-07-31T02:57:23
| 232,276,030
| 1
| 17
|
NOASSERTION
| 2023-07-31T02:57:24
| 2020-01-07T08:12:53
|
Python
|
UTF-8
|
Python
| false
| false
| 772
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-08-14 02:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0282_auto_20190813_1820'),
]
operations = [
migrations.RemoveField(
model_name='inspectiontypeapprovaldocument',
name='log_entry',
),
migrations.AddField(
model_name='inspectiontype',
name='approval_document',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inspection_type', to='wildlifecompliance.InspectionTypeApprovalDocument'),
),
]
|
[
"[email protected]"
] | |
7e7f5a718ac8033167bc5a225a645a38c8c3650a
|
e5dd21ac1305257fe163995f951cbbfbf3051fd7
|
/Cracking_the_Coding_Interview/8-6.py
|
c8c86de2f9e0015f6f18220cf9120789b84d7d12
|
[] |
no_license
|
QI1002/exampool
|
d3d3cdad040e4a861b934122ef12e059f7d6cd01
|
08800f78482f9fd9d6641c3eabc5880e69782f42
|
refs/heads/master
| 2021-01-02T09:35:15.283632
| 2020-10-13T14:54:25
| 2020-10-13T14:54:25
| 99,178,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
def paintBox(point, box):
x = point[0]
y = point[1]
if (x < 0 or x >= len(box[0])):
return
if (y < 0 or y >= len(box)):
return
if (box[y][x] == 1):
return
box[y][x] = 1
paintBox((x-1,y), box)
paintBox((x+1,y), box)
paintBox((x,y-1), box)
paintBox((x,y+1), box)
box = []
m = 4
n = 6
for i in range(n):
box.append([])
for j in range(m):
box[i].append(0)
point = (2,3)
paintBox(point, box)
print(box)
|
[
"[email protected]"
] | |
897cf0c437285e8773f49a6e7cb7f12530950287
|
491f9ca49bbb275c99248134c604da9fb43ee9fe
|
/MD_analysis/process_radius_of_gyration.py
|
2d4077af75475dccee4e3c7ab1dad1d1e233f511
|
[] |
no_license
|
KineOdegardHanssen/PhD-subprojects
|
9ef0facf7da4b2a80b4bea9c890aa04f0ddcfd1a
|
c275539689b53b94cbb85c0fdb3cea5885fc40e9
|
refs/heads/Windows
| 2023-06-08T13:32:15.179813
| 2023-06-05T08:40:10
| 2023-06-05T08:40:10
| 195,783,664
| 2
| 0
| null | 2020-08-18T14:42:21
| 2019-07-08T09:49:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,058
|
py
|
import matplotlib.pyplot as plt # To plot
from scipy.optimize import curve_fit
import numpy as np
import random
import math
import time
start_time = time.process_time()
M = 9
N = 101
kangle = 20
kbond = 200
Kangle = kangle
Kbond = kbond
#factors = [0.1,1,10,100,250]
#charges = [0,-1,-5,-10]
#spacings = [1,2,3,4,5,10,40,100]
N = 101
spacing = 1
gridspacing = spacing
spacings = [1,2,3,4,5,6,7,8,10,15,40,100]#[1,2,3,4,5,8,10,15,40,100]
#spacings = [40]
#dielectrics = [1,2,10,100] # For lg = 2 nm
dielectrics = [1,2,10,50,100,1000]
#lengths = [21,61,101,141]
#lensp = [1,3,5,7]
#krfacs = [0.01, 0.05, 0.10, 0.50, 1.00]
#kangles = [20, 100, 200, 1000, 2000]
wallenergies = [1.042]
charge = -1
T = 310
spacesims = False
dielsims = True
wallsims = False
if spacesims == True:
Nsp = len(spacings)
outfilename = 'table_radgyrs_chaingrid_quadratic_M%iN%i_Langevin_wall%.3f_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge%i_T%i_theta0is180_twofirst_are_fixed_varyspacing.txt' % (M,N,wallenergy,kangle,kbond,charge,T)
outfile = open(outfilename,'w')
outfile.write('begin{table}\n\centering\n\caption{}\n begin{tabular}{r|c|c|c|c|c|c|c|c|c|c}\nSpacing/Chain & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & Average \ \ \n\hline\n')
if dielsims == True:
Nsp = len(dielectrics)
outfilename = 'table_radgyrs_chaingrid_quadratic_M%iN%i_Langevin_gridspacing%i_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge%i_T%i_theta0is180_twofirst_are_fixed_varydielectric.txt' % (M,N,gridspacing,Kangle,Kbond,charge,T)
outfile = open(outfilename,'w')
outfile.write('begin{table}\n\centering\n\caption{}\n begin{tabular}{r|c|c|c|c|c|c|c|c|c|c}\nDielectric/Chain & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & Average \ \ \n\hline\n')
if wallsims == True:
Nsp = len(wallenergies)
outfilename = 'table_radgyrs_chaingrid_quadratic_M%iN%i_Langevin_gridspacing%i_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge%i_T%i_theta0is180_twofirst_are_fixed_varywallenergy.txt' % (M,N,gridspacing,Kangle,Kbond,charge,T)
outfile = open(outfilename,'w')
outfile.write('begin{table}\n\centering\n\caption{}\n begin{tabular}{r|c|c|c|c|c|c|c|c|c|c}\n $\epsilon_w$/Chain & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & Average \ \ \n\hline\n')
totalaverage = np.zeros(Nsp)
totalrms = np.zeros(Nsp)
for i in range(Nsp):
if spacesims == True:
spacing = spacings[i]
outfile.write('%i' % spacing)
infilename = 'log.radgyrs_chaingrid_quadratic_M%iN%i_gridspacing%i_Langevin_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge' % (M,N,spacing,kangle,kbond)+str(charge)+'_T%i_theta0is180_twofirst_are_fixed' % T
if dielsims == True:
dielectric = dielectrics[i]
outfile.write('%i' % dielectric)
infilename = 'log.chaingrid_quadratic_M%iN%i_gridspacing%i_Langevin_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge%i_dielectric%i_T%i_theta0is180_twofirst_are_fixed' % (M,N,gridspacing,Kangle,Kbond,charge,dielectric,T)
if wallsims == True:
wallenergy = wallenergies[i]
outfile.write('%.3f' % wallenergy)
infilename = 'log.chaingrid_quadratic_M%iN%i_gridspacing%i_Langevin_wall%.3f_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge%i_T%i_theta0is180_twofirst_are_fixed_with_rgyr' % (M,N,spacing,wallenergy,kangle,kbond,charge,T)
# Readying arrays:
radgyr_average = np.zeros(M)
radgyr_stdv = np.zeros(M)
# This is really not the optimal solution:
allradgyrs_vals = []
allradgyrs_inds = []
infile = open(infilename,'r')
lines = infile.readlines()
#print('infilename:', infilename)
# Finding the mean and rms:
# Finding the mean:
starter1 = 0
starter2 = 0
counter = 0
for line in lines:
words = line.split()
#print('words=',words)
#print('starter1:', starter1, '; starter2:', starter2)
if len(words)>2:
if words[1]=='Run' and words[2]=='and':
# Finding the line: ####################### Run and write to file #########################################
starter1 = 1
#print('First mark hit')
#if starter1==1:
# print(words)
if starter1==1 and starter2==1:
# Test if we should break:
if len(words)>0:
if words[0]=='WARNING:' or words[0]=='Loop':
break
#print('Starting to read data')
if len(words)==12 or len(words)==18:
#print('I am in')
if len(words)==12:
addon = 3
else:
addon = 9
for j in range(M):
#print(words)
thisvalue = float(words[j+addon])
radgyr_average[j] += thisvalue
allradgyrs_vals.append(thisvalue)
allradgyrs_inds.append(j)
counter+=1
if starter1==1 and starter2==0:
if len(words)>0:
if words[0]=='Step':
starter2=1
#print('Second mark hit')
infile.close()
radgyr_average /= counter
totalaverage[i] = np.mean(radgyr_average)
# Finding the rms:
for j in range(len(allradgyrs_vals)):
chain = allradgyrs_inds[j]
val = allradgyrs_vals[j]
radgyr_stdv[chain] += (radgyr_average[chain]-val)**2
totalrms[i] = (totalaverage[i]-val)**2
totalrms[i] = np.sqrt(totalrms[i]/(counter-1))
for j in range(M):
radgyr_stdv[j] = np.sqrt(radgyr_stdv[j]/(counter-1))
outfile.write(' & %.3f$\pm$%.3f' % (radgyr_average[j], radgyr_stdv[j]))
outfile.write(' & %.4f$\pm$%.4f \ \ \n' % (totalaverage[i], totalrms[i]))
outfile.write('\end{tabular}\n\label{table:radgyrs_chain_and_total_something}\n\end{table}')
outfile.close()
|
[
"[email protected]"
] | |
290cccac3244c8f49b7fe30dc928990ec75a0610
|
b7ba02a29b10c449a8e405063c5eede32c36f0c8
|
/doc/conf.py
|
31ed7b46162d1b1b5bd2fbd7c00247768fd3b1bc
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Paebbels/pyHTTPRequestRouter
|
96e32756ef4ef0e538f8990cef99719eac5dad0d
|
10592ecdd9fd57bc04d218a7cdbb050d7ae38cc5
|
refs/heads/master
| 2021-06-22T14:25:37.250642
| 2020-01-02T00:57:08
| 2020-01-02T00:57:08
| 214,282,531
| 0
| 0
|
NOASSERTION
| 2021-04-28T22:09:05
| 2019-10-10T20:46:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,408
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
#sys.path.insert(0, os.path.abspath('../pyHTTPRequestRouter'))
#sys.path.insert(0, os.path.abspath('_extensions'))
#sys.path.insert(0, os.path.abspath('_themes/sphinx_rtd_theme'))
# -- Project information -----------------------------------------------------
project = 'pyHTTPRequestRouter'
copyright = '2017-2019, Patrick Lehmann'
author = 'Patrick Lehmann'
# The full version, including alpha/beta/rc tags
release = 'v0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# Sphinx theme
"sphinx_rtd_theme",
# Standard Sphinx extensions
"sphinx.ext.autodoc",
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
# SphinxContrib extensions
# Other extensions
# 'DocumentMember',
# local extensions (patched)
# local extensions
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store"
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# ==============================================================================
# Sphinx.Ext.InterSphinx
# ==============================================================================
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
# 'pyFlags': ('http://pyFlags.readthedocs.io/en/latest', None),
'pyExceptions': ('http://pyExceptions.readthedocs.io/en/latest', None),
'pyAttributes': ('http://pyAttributes.readthedocs.io/en/latest', None),
'pyGenericPath': ('http://pyGenericPath.readthedocs.io/en/latest', None),
'pyHTTPInterface': ('http://pyHTTPInterface.readthedocs.io/en/latest', None),
}
# ==============================================================================
# Sphinx.Ext.AutoDoc
# ==============================================================================
# see: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration
autodoc_member_order = "bysource" # alphabetical, groupwise, bysource
# ==============================================================================
# Sphinx.Ext.ExtLinks
# ==============================================================================
extlinks = {
'issue': ('https://github.com/Paebbels/pyHTTPRequestRouter/issues/%s', 'issue #'),
'pull': ('https://github.com/Paebbels/pyHTTPRequestRouter/pull/%s', 'pull request #'),
'src': ('https://github.com/Paebbels/pyHTTPRequestRouter/blob/master/pyHTTPRequestRouter/%s?ts=2', None),
# 'test': ('https://github.com/Paebbels/pyHTTPRequestRouter/blob/master/test/%s?ts=2', None)
}
# ==============================================================================
# Sphinx.Ext.Graphviz
# ==============================================================================
graphviz_output_format = "svg"
|
[
"[email protected]"
] | |
5ce998a8d321fbb8c92ffc3515a595137019c013
|
222d525f50f0c955ba6e8af0b41a9bd9c04d99a9
|
/venv/Lib/site-packages/pandas/_version.py
|
560c9c69332156b1bd4362ce45c1cffa515f362a
|
[] |
no_license
|
Sakthi-zebra/Rest_RCI
|
943c4652a239808b71d0d2ba5c28acca7435cf68
|
9a716860986a3d1fafee70f0c6339810fce152f2
|
refs/heads/master
| 2021-01-05T16:44:41.126142
| 2020-02-17T10:22:16
| 2020-02-17T10:22:16
| 241,074,751
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
# This file was generated by 'versioneer.py' (0.15) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
from warnings import catch_warnings
with catch_warnings(record=True):
import json
import sys
version_json = '''
{
"dirty": false,
"error": null,
"full-revisionid": "29d6b0232aab9576afa896ff5bab0b994760495a",
"version": "1.0.1"
}
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
|
[
"[email protected]"
] | |
7132a6efe79998e97391fede2615e22427a1242a
|
5ffed81ced523b6e417b4e48d20380b6f16f8f42
|
/pre_exam/vacantion.py
|
10a4829eea56d31166c28138daf1f4126ed1418f
|
[] |
no_license
|
Nikoletazl/Basics-Python
|
0f3f095bd51f9546c681e3cdd268232de88749ab
|
17aef1b95814f13a02053681aae3e617e56f2fe6
|
refs/heads/main
| 2023-08-14T15:48:48.450249
| 2021-10-08T15:02:35
| 2021-10-08T15:02:35
| 415,027,622
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
count_people = int(input())
count_nights = int(input())
count_cards = int(input())
count_tickets = int(input())
sum_one_person = count_nights * 20
sum_cards = count_cards * 1.60
tickets = count_tickets * 6
total_sum_one = sum_one_person + sum_cards + tickets
group_sum = total_sum_one * count_people
end_sum = group_sum + 0.25 * group_sum
print(f"{end_sum:.2f}")
|
[
"[email protected]"
] | |
e6fc546651d2205d4808a4a327045054eda8451d
|
7db0883137d119565540f2d071638c4016f39213
|
/Note/Project_Play/BaiduBaike/SpiderMan.py
|
eb7543c12a969940ffb61d81059fa69f378fe5f0
|
[] |
no_license
|
PhilHuang-d/python---
|
cf22a4cc00d4beaaf75ef7ca87a4c5d31a9d5efe
|
152c18f51838ce652b79a0cd24765b1a1c237eee
|
refs/heads/master
| 2021-09-13T05:32:53.754865
| 2018-04-25T13:36:40
| 2018-04-25T13:36:40
| 108,812,447
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
#coding:utf-8
import DataOutput
import HTMLDownloader
import URLManger
import HtmlParser
class SpiderMan(object):
def __init__(self):
self.manager = URLManger.UrlManager()
self.downloader = HTMLDownloader.HtmlDownloader()
self.parser = HtmlParser.HhmlParser()
self.output = DataOutput.DataOutput()
def crawl(self,root_url):
#添加url入口
self.manager.add_new_url(root_url)
while(self.manager.has_new_url() and self.manager.old_url_size() < 100):
try:
#从url管理器获取新的url
new_url = self.manager.add_new_url()
#html下载器下载网页
html = self.downloader.download(new_url)
#html解析器抽取网页数据
new_urls,data = self.parser.parser(new_url,html)
#将抽取的url添加到url管理器中
self.manager.add_new_urls(new_url)
#数据存储器存储文件
self.output.store_data(data)
print("已经抓取%s个链接")%self.manager.old_url_size()
except Exception,e:
print("抓取失败")
self.output.output_html()
if __name__=="__main__":
spider_man = SpiderMan()
spider_man.crawl("http://baike.baidu.com/view/284853.htm")
|
[
"[email protected]"
] | |
4ab2d1b2588ed24e75a8fd18060032ba278250ad
|
bd792a49f21d901f14165993d45e114e4df60340
|
/venv/bin/futurize
|
2a1ba3df470dca8741f0b0ce6790bd1dcee24bdc
|
[] |
no_license
|
Cynthrial/Serial-read
|
82d0883ad4de01fe48e58523d2d0f4bcf97b3835
|
eb706094b95b09cfc8870bff0f6385d04d807996
|
refs/heads/master
| 2020-04-13T13:37:32.380790
| 2018-12-27T02:35:04
| 2018-12-27T02:35:04
| 163,236,705
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
#!/root/PycharmProjects/Serial_read/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.17.1','console_scripts','futurize'
__requires__ = 'future==0.17.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.17.1', 'console_scripts', 'futurize')()
)
|
[
"[email protected]"
] | ||
28d939d002a35bc02c16215e3fe153a32445d91b
|
8726a58628e1d6c8e6e8cba0bb67de80bad72a51
|
/wizard/create_data_template.py
|
5cfc1efe6cda7e3d4c77f4974c579767e7e5f7b0
|
[] |
no_license
|
cgsoftware/jasper_reports
|
54a612a44cd94963794b16ab4266026b233b8ba4
|
207bdea1b8738dff88260f4ea76da8b627e05375
|
refs/heads/master
| 2021-01-10T19:33:22.921656
| 2011-09-06T15:10:39
| 2011-09-06T15:10:39
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 5,766
|
py
|
# encoding: iso-8859-15
from xml.dom.minidom import getDOMImplementation
import wizard
import pooler
import base64
import osv
import string
from tools.translate import _
view_form_start = """<?xml version="1.0"?>
<form string="Create Data Template">
<group colspan="2">
<field name="model"/>
<field name="depth"/>
</group>
</form>"""
view_fields_start = {
'model': { 'string': 'Model', 'type': 'many2one', 'relation': 'ir.model', 'required': True },
'depth': { 'string':'Depth', 'type':'integer', 'required': True },
}
view_form_end = """<?xml version="1.0"?>
<form string="Create Data Template">
<group colspan="2">
<field name="model"/>
<field name="data" filename="filename"/>
<field name="filename" invisible="1"/>
</group>
</form>"""
view_fields_end = {
'model': { 'string': 'Model', 'type': 'char', 'readonly': True },
'data': { 'string': 'XML', 'type': 'binary', 'relation': 'ir.model', 'readonly': True },
'filename': { 'string': 'File Name', 'type': 'char' },
}
src_chars = """àáäâÀÁÄÂèéëêÈÉËÊìíïîÌÍÏÎòóöôÒÓÖÔùúüûÙÚÜÛçñºª·¤ '"()/*-+?¿!&$[]{}@#`'^:;<>=~%,\\"""
src_chars = unicode( src_chars, 'iso-8859-1' )
dst_chars = """aaaaAAAAeeeeEEEEiiiiIIIIooooOOOOuuuuUUUUcnoa_e________________________________"""
dst_chars = unicode( dst_chars, 'iso-8859-1' )
class create_data_template(wizard.interface):
def _action_start(self, cr, uid, data, context):
res = {
'depth': 1
}
return res
def normalize(self, text):
if isinstance( text, unicode ):
text = text.encode('utf-8')
return text
def unaccent(self, text):
if isinstance( text, str ):
text = unicode( text, 'utf-8' )
output = text
for c in xrange(len(src_chars)):
output = output.replace( src_chars[c], dst_chars[c] )
return output.strip('_').encode( 'utf-8' )
def generate_xml(self, cr, uid, context, pool, modelName, parentNode, document, depth, first_call):
# First of all add "id" field
fieldNode = document.createElement('id')
parentNode.appendChild( fieldNode )
valueNode = document.createTextNode( '1' )
fieldNode.appendChild( valueNode )
language = context.get('lang')
if language == 'en_US':
language = False
# Then add all fields in alphabetical order
model = pool.get(modelName)
fields = model._columns.keys()
fields.sort()
for field in fields:
name = False
if language:
# Obtain field string for user's language.
name = pool.get('ir.translation')._get_source(cr, uid, modelName + ',' + field, 'field', language)
#name = self.unaccent( name )
#name = self.normalize( name )
#help = pool.get('ir.translation')._get_source(cr, uid, modelName + ',' + field, 'help', language)
#help = self.normalize( help )
if not name:
# If there's not description in user's language, use default (english) one.
name = pool.get(modelName)._columns[field].string
#help = pool.get(modelName)._columns[field].help
if name:
name = self.unaccent( name )
# After unaccent the name might result in an empty string
if name:
name = '%s-%s' % (self.unaccent( name ), field )
else:
name = field
fieldNode = document.createElement( name )
#if name:
#fieldNode.setAttribute( 'name', name )
#if help:
#fieldNode.setAttribute( 'help', help )
parentNode.appendChild( fieldNode )
fieldType = model._columns[field]._type
if fieldType in ('many2one','one2many','many2many'):
if depth <= 1:
continue
newName = model._columns[field]._obj
self.generate_xml(cr, uid, context, pool, newName, fieldNode, document, depth-1, False)
continue
if fieldType == 'float':
value = '12345.67'
elif fieldType == 'integer':
value = '12345'
elif fieldType == 'date':
value = '2009-12-31 00:00:00'
elif fieldType == 'time':
value = '12:34:56'
elif fieldType == 'datetime':
value = '2009-12-31 12:34:56'
else:
value = field
valueNode = document.createTextNode( value )
fieldNode.appendChild( valueNode )
if depth > 1 and modelName != 'Attachments':
# Create relation with attachments
fieldNode = document.createElement( '%s-Attachments' % _('Attachments') )
parentNode.appendChild( fieldNode )
self.generate_xml(cr, uid, context, pool, 'ir.attachment', fieldNode, document, depth-1, False)
if first_call:
# Create relation with user
fieldNode = document.createElement( '%s-User' % _('User') )
parentNode.appendChild( fieldNode )
self.generate_xml(cr, uid, context, pool, 'res.users', fieldNode, document, depth-1, False)
def _action_create_xml(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
form = data['form']
values = pool.get('ir.model').read(cr, uid, form['model'], ['name','model'], context)
name = values['name']
model = values['model']
document = getDOMImplementation().createDocument(None, 'data', None)
topNode = document.documentElement
recordNode = document.createElement('record')
topNode.appendChild( recordNode )
self.generate_xml( cr, uid, context, pool, model, recordNode, document, form['depth'], True )
topNode.toxml()
res = {
'model': name,
'data': base64.encodestring( topNode.toxml() ),
'filename': 'jasper.xml',
}
return res
states = {
'init': {
'actions': [_action_start],
'result': {
'type': 'form',
'arch': view_form_start,
'fields': view_fields_start,
'state': [('end','Cancel','gtk-cancel'),('create','Create','gtk-ok')]
}
},
'create': {
'actions': [_action_create_xml],
'result': {
'type': 'form',
'arch': view_form_end,
'fields': view_fields_end,
'state': [('end','Accept','gtk-ok')]
}
}
}
create_data_template('jasper_create_data_template')
|
[
"[email protected]"
] | |
958e6cc962347253a2d2217e8fb7795f660d2001
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/Mkt3yqQMsw9e3Jmjq_4.py
|
02def150323524f87c2640e1f03e1201eb361dcd
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,137
|
py
|
"""
Write a function which, given a permutation of `[0, 1, ..., n-1]` ( `n>0`)
represented by a shuffled list, returns the permutation in _disjoint cycle
form_ as a set of tuples.
A **permutation** is a particular (re)ordering of a set of objects. For
example, `[1,3,0,4]` is a permutation on the 4 objects `[0,1,2,3]`. In this
problem, we represent permutations on `n` objects as lists containing the
numbers in `list(range(n)) == [0, ..., n-1]`.
A **cycle** or **cyclic permutation** is a particular kind of permutation
whereby all elements are sent to one another in a cyclic fashion. In this
problem, we represent cycles as tuples.
* For example, the permutation `[1,2,3,0]` is a cyclic permutation of `[0,1,2,3]` because it can be made from `[0,1,2,3]` by applying the mapping `{0:1, 1:2, 2:3, 3:0}`, which maps elements in the _cycle_ `0➞1➞2➞3➞0`. We represent this cycle by the tuple `(0,1,2,3)`, where each element gets sent to the one on the right, and the last is sent to the first.
* The cycles `(0,1,2,3)`, `(1,2,3,0)`, `(2,3,0,1)` and `(3,0,1,2)` all represent the same cycle; namely `0➞1➞2➞3➞0` . We always choose the cycle to have the lowest element first: `(0,1,2,3)`.
Finally, any permutation can be written in **disjoint cycle form** , or as an
unordered set of cyclic permutations. _Disjoint_ means none of the cycles have
any elements in common. This form is unique up to the order of the cycles and
up to the cycle representation.
* The cyclic permutation `[0,1,3,2,4,5]` can be written as `(2,3)`—since 2 an 3 are swapped—and so the disjoint cycle form is `{(2,3)}`.
* `[1,0,3,2]` is the mapping `{0:1, 1:0, 2:3, 3:2}` and has disjoint cycle form`{(0, 1), (2, 3)}` .
Your function takes a list (the permutation) and returns a set of tuples (the
set of cyclic permutations).
### Examples
disjoint_cycle_form([1, 0]) ➞ {(0, 1)}
# 0 and 1 are swapped, but lowest is listed first.
disjoint_cycle_form([0, 1, 2, 3]) ➞ set()
# Permutation is already in order.
disjoint_cycle_form([0, 1, 3, 2]) ➞ {(2, 3)}
disjoint_cycle_form([1, 0, 3, 2]) ➞ {(0, 1), (2, 3)}
# or {(2, 3), (0, 1)}; the cycle order in a set doesn't matter.
disjoint_cycle_form([1, 3, 0, 2]) ➞ {(0, 1, 3, 2)}
### Notes
Look up "disjoint cycle notation" or "cycle decomposition" for more
information about permutations. This is the kind of thing you learn in a first
course in Group Theory. Note that the given permutations will always have at
least one element (the only such permutation is `[0]`), and a permutation of
length `n` will always contain the elements of `range(n)` (that is, `0` to
`n-1` inclusive).
"""
def cycles(perm):
remain = set(perm)
result = []
while len(remain) > 0:
n = remain.pop()
cycle = [n]
while True:
n = perm[n]
if n not in remain:
break
remain.remove(n)
cycle.append(n)
result.append(cycle)
return result
def disjoint_cycle_form(perm):
ans = set()
for cycle in cycles(perm):
if len(cycle) > 1:
ans.add(tuple(cycle))
return ans
|
[
"[email protected]"
] | |
5606b5c260655962cf20f5832309e6fa1fba193e
|
077beb02d73045eb97261a1c5e7021bfe709e55c
|
/tests/new_item/test_invalid_new_item_class.py
|
0412c3d225cd16df9a5cc57f7d49be2a1a918cd8
|
[
"MIT"
] |
permissive
|
zcutlip/pyonepassword
|
a91d8491d807c2cede2c483a66872b7913ad3aac
|
3ced5acf3667f1af73cad26ae0ef31e8c4b19585
|
refs/heads/main
| 2023-09-04T03:16:49.170698
| 2023-06-26T19:51:32
| 2023-06-26T19:51:32
| 201,505,055
| 48
| 13
|
MIT
| 2023-09-05T01:44:18
| 2019-08-09T16:29:56
|
Python
|
UTF-8
|
Python
| false
| false
| 619
|
py
|
import pytest
from pyonepassword.api.exceptions import OPInvalidItemException
from pyonepassword.op_items._new_item import OPNewItemMixin
class OPInvalidLoginItemTemplate(OPNewItemMixin):
def __init__(self, title: str, fields=[], sections=[], extra_data={}):
super().__init__(title, fields, sections, extra_data)
def test_invalid_new_item_class_01():
with pytest.raises(OPInvalidItemException):
OPNewItemMixin("invalid-new-item")
def test_invalid_login_item_template_01():
with pytest.raises(OPInvalidItemException):
OPInvalidLoginItemTemplate("invalid login item template")
|
[
"[email protected]"
] | |
9c151b3a6ea5ad2faf547932fcbb58f8c96ed5ea
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/Control/GaudiSequencer/share/test_athretrysequencer.py
|
019c631baffe73d52c8ae1a6ebcba84383c65e5e
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,634
|
py
|
###############################################################
#
# Job options file
#
#==============================================================
import AthenaCommon.Constants as Lvl
from AthenaCommon.AppMgr import theApp
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
# get a handle on the job main sequence
from AthenaCommon.AlgSequence import AlgSequence, AthSequencer
job = AlgSequence()
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
if not 'EVTMAX' in dir():
EVTMAX = 10
pass
theApp.EvtMax = EVTMAX
#--------------------------------------------------------------
# Sequence(s)
#--------------------------------------------------------------
## Sequencer configuration ##
job += CfgMgr.AthRetrySequencer ('seq', MaxRetries = 5)
import AthenaPython.PyAthena as PyAthena
class PyPush(PyAthena.Alg):
def __init__(self, name='PyPush', **kw):
## init base class
kw['name'] = name
super(PyPush, self).__init__(**kw)
def initialize(self):
self.evts = 0
return PyAthena.StatusCode.Success
def execute(self):
self.setFilterPassed(True)
self.evts += 1
self.msg.info("events seen: %s" % self.evts)
return PyAthena.StatusCode.Success
def finalize(self):
self.msg.info("finalize ==> total events: %s" % self.evts)
return PyAthena.StatusCode.Success
pass
class PyPull(PyAthena.Alg):
def __init__(self, name='PyPull', **kw):
## init base class
kw['name'] = name
super(PyPull, self).__init__(**kw)
def initialize(self):
self.evts = 0
return PyAthena.StatusCode.Success
def execute(self):
self.evts += 1
self.setFilterPassed(True)
import random
if random.random() < 0.8:
self.msg.info("requesting more events!!")
self.setFilterPassed(False)
else:
self.msg.info("event quite satisfying...")
self.msg.info("seen %s event(s)" % self.evts)
return PyAthena.StatusCode.Success
def finalize(self):
self.msg.info("finalize ==> total events: %s" % self.evts)
return PyAthena.StatusCode.Success
pass
job.seq += PyPush("push", OutputLevel = Lvl.INFO)
job.seq += PyPull("pull", OutputLevel = Lvl.INFO)
job += CfgMgr.AthEventCounter("counter")
#svcMgr.MessageSvc.OutputLevel = Lvl.INFO
#==============================================================
#
# End of job options file
#
###############################################################
|
[
"[email protected]"
] | |
aebf3cbd105f56502484732cbb959833a049352b
|
6bce631b869a8717eed29eae186688a7fdb7f5c8
|
/venv/Lib/site-packages/test/test_stock_price.py
|
4ce7f9435eb697392c8e98c7711ab22e0976e446
|
[] |
no_license
|
singhd3101/CS5100-Stock-Market-Prediction
|
6d43bd39633dd80bb1141dc550302874a5bc0939
|
2804a6270a05155e168d0f2518bcd97f1c9bcb3e
|
refs/heads/master
| 2020-11-26T03:56:02.613630
| 2019-12-19T02:22:13
| 2019-12-19T02:22:13
| 228,958,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
# coding: utf-8
"""
Intrinio API
Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner. # noqa: E501
OpenAPI spec version: 2.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import intrinio_sdk
from intrinio_sdk.models.stock_price import StockPrice # noqa: E501
from intrinio_sdk.rest import ApiException
class TestStockPrice(unittest.TestCase):
"""StockPrice unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStockPrice(self):
"""Test StockPrice"""
# FIXME: construct object with mandatory attributes with example values
# model = intrinio_sdk.models.stock_price.StockPrice() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
ac68c34a9df77b38ee0be71b8c371854aa47da18
|
142fd48d2c09bc83ba31b96553fc6d27fad596a3
|
/v1/202.happy-number.132775164.ac.py
|
76ae51a59bdadd727573d185296fe6de77a038ba
|
[] |
no_license
|
goalong/lc
|
baaa8ecc55ecdb136271687d21609832f32ccf6e
|
7b45d500e65c759cc2e278d33d9d21925a713017
|
refs/heads/master
| 2021-10-28T03:40:23.534592
| 2019-04-21T14:29:47
| 2019-04-21T14:29:47
| 111,088,996
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
#
# [202] Happy Number
#
# https://leetcode.com/problems/happy-number/description/
#
# algorithms
# Easy (41.39%)
# Total Accepted: 153.8K
# Total Submissions: 371.6K
# Testcase Example: '1'
#
# Write an algorithm to determine if a number is "happy".
#
# A happy number is a number defined by the following process: Starting with
# any positive integer, replace the number by the sum of the squares of its
# digits, and repeat the process until the number equals 1 (where it will
# stay), or it loops endlessly in a cycle which does not include 1. Those
# numbers for which this process ends in 1 are happy numbers.
#
# Example: 19 is a happy number
#
#
# 12 + 92 = 82
# 82 + 22 = 68
# 62 + 82 = 100
# 12 + 02 + 02 = 1
#
#
# Credits:Special thanks to @mithmatt and @ts for adding this problem and
# creating all test cases.
#
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
# 3 star.
memo = set()
while n not in memo:
memo.add(n)
n = self.get_next(n)
if n == 1:
return True
return False
def get_next(self, num):
num_list = list(str(num))
rs = sum([int(i)*int(i) for i in num_list])
return rs
|
[
"[email protected]"
] | |
2c8f96dfd60e771a4512c4b9b459a21ff197f9ae
|
e04c3af194afacf7e454eb63a1f917c0df46698d
|
/MAST/test/workflow_test/workflow_setup.py
|
4fecd61563ecda63785f435c50709a593de50be3
|
[
"MIT"
] |
permissive
|
kcantosh/MAST
|
050716de2580fe53cf241b0d281a84f13175b542
|
4138b87e5a1038eb65023232f80907333d3196f2
|
refs/heads/dev
| 2021-01-20T16:51:22.759949
| 2017-01-31T16:40:45
| 2017-01-31T16:40:45
| 82,833,665
| 0
| 1
| null | 2017-02-22T17:34:13
| 2017-02-22T17:34:13
| null |
UTF-8
|
Python
| false
| false
| 4,053
|
py
|
##############################################################
# This code is part of the MAterials Simulation Toolkit (MAST)
#
# Maintainer: Tam Mayeshiba
# Last updated: 2016-02-08
##############################################################
##############################################################
# Requirements:
# 1. Home directory access from where the test will be run
# 2. MAST installation
##############################################################
import os
import time
import shutil
import numpy as np
from MAST.utility import MASTError
from MAST.utility import dirutil
from MAST.utility import MASTFile
import MAST
import subprocess
testname ="workflow_test"
testdir = dirutil.get_test_dir(testname)
checkname = os.path.join(testdir, "WORKFLOW_CONFIG")
def verify_checks():
checkfile=MASTFile(checkname)
for myline in checkfile.data:
if "Check" in myline:
checkresult = myline.split(":")[1].strip()[0].lower()
if checkresult == 'y':
print "Checks okay"
else:
raise MASTError("verify checks","Checks for workflow setup not verified. Check %s" % checkname)
return
def get_variables():
verify_checks()
myvars=dict()
checkfile=MASTFile(checkname)
for myline in checkfile.data:
if myline[0:9] == "workflow_":
mykey = myline.split("=")[0].strip()
myval = myline.split("=")[1].strip()
myvars[mykey] = myval
return myvars
def create_workflow_test_script(inputfile):
myvars = get_variables()
# set up testing directory tree
wtdir=myvars['workflow_test_directory']
mast_test_dir=os.path.join(wtdir,"no_directory_yet")
while not (os.path.isdir(mast_test_dir)):
timestamp=time.strftime("%Y%m%dT%H%M%S")
mast_test_dir = os.path.join(wtdir,"output_test_%s" % timestamp)
if not (os.path.isdir(mast_test_dir)):
shutil.copytree("%s/mini_mast_tree" % wtdir, mast_test_dir)
# set up output file and submission script
shortname = inputfile.split(".")[0]
output="%s/output_%s" % (wtdir, shortname)
submitscript="%s/submit_%s.sh" % (wtdir, shortname)
generic_script="%s/generic_mast_workflow.sh" % wtdir
bashcommand="bash %s %s %s %s %s %s >> %s" % (generic_script,
mast_test_dir,
myvars["workflow_examples_located"],
inputfile,
myvars["workflow_activate_command"],
myvars["workflow_testing_environment"],
output)
submitfile=MASTFile()
submitfile.data.append(bashcommand + "\n")
submitfile.to_file(submitscript)
return [mast_test_dir, submitscript, output]
def generic_submit(inputfile):
[mast_test_dir, submitscript, outputname] = create_workflow_test_script(inputfile)
mygsub = "bash %s" % submitscript
gproc = subprocess.Popen(mygsub, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gproc.wait()
if not (os.path.isfile(outputname)):
print "Sleep 5"
time.sleep(5)
if not (os.path.isfile(outputname)):
raise OSError("Test did not create output %s" % outputname)
print "Output %s created" % outputname
waitct=0
tailcmd = "tail -n 3 %s" % outputname
maxwait=502
while waitct < maxwait:
tail3proc=subprocess.Popen(tailcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
tail3=tail3proc.communicate()[0]
tail3proc.wait()
for tailline in tail3.split("\n"):
if "Workflow completed" in tailline:
return ["Completed", mast_test_dir]
time.sleep(30)
waitct = waitct + 1
print "Output not complete. Attempt %i/%i" % (waitct, maxwait)
return ["Unfinished", mast_test_dir]
def get_finished_recipe_dir(mast_test_dir):
trydirs=os.listdir(os.path.join(mast_test_dir,"ARCHIVE"))
for trydir in trydirs:
trypath=os.path.join(mast_test_dir,"ARCHIVE",trydir)
if (os.path.isdir(trypath)):
return trypath
return ""
|
[
"[email protected]"
] | |
389ce0bd3e07869ffa7d5d82fc97f0e6114b317e
|
1740075fca5d99eee47d8ab10e918be07f544d55
|
/catalog/migrations/0002_auto_20191107_1239.py
|
bd20f9edcd6d8711f45f088ad0c948df3acd2e3a
|
[] |
no_license
|
Grayw0lf/local_library
|
0933bd5d35ef64ee4dc90dd0cdd83686a8eeed3a
|
652f0260bfd153138eaee24810685c52f4063b07
|
refs/heads/master
| 2023-04-30T10:23:38.048841
| 2019-11-13T21:10:09
| 2019-11-13T21:10:09
| 221,551,305
| 1
| 0
| null | 2023-04-21T20:40:05
| 2019-11-13T21:03:49
|
Python
|
UTF-8
|
Python
| false
| false
| 688
|
py
|
# Generated by Django 2.2.7 on 2019-11-07 09:39
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='author',
old_name='date_of_died',
new_name='date_of_death',
),
migrations.AlterField(
model_name='bookinstance',
name='id',
field=models.UUIDField(default=uuid.UUID('976d9b8b-7c2f-4e07-9879-78d7f1d2fe11'), help_text='Unique ID for this particular book across whole library', primary_key=True, serialize=False),
),
]
|
[
"[email protected]"
] | |
46e425071b72856e84300bad5e705cc2c7dff76d
|
800b5cd8c3d58b60d80aca551e54af28ec3c9f18
|
/code/chapter_05_example_14.py
|
81334fc4e4cc158d144cc5ba91bcb59c006f0045
|
[] |
no_license
|
CyberLight/two-scoops-of-django-1.8
|
6591347cb20f3c16e252943c04f0f524f8e8b235
|
423971ad609ec9a552617fc4f7424e701295c09b
|
refs/heads/master
| 2021-01-21T03:02:52.704822
| 2015-05-11T16:32:31
| 2015-05-11T16:32:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
"""
Using This Code Example
=========================
The code examples provided are provided by Daniel Greenfeld and Audrey Roy of
Two Scoops Press to help you reference Two Scoops of Django: Best Practices
for Django 1.8. Code samples follow PEP-0008, with exceptions made for the
purposes of improving book formatting. Example code is provided "as is", and
is not intended to be, and should not be considered or labeled as "tutorial code".
Permissions
============
In general, you may use the code we've provided with this book in your programs
and documentation. You do not need to contact us for permission unless you're
reproducing a significant portion of the code or using it in commercial
distributions. Examples:
* Writing a program that uses several chunks of code from this course does not require permission.
* Selling or distributing a digital package from material taken from this book does require permission.
* Answering a question by citing this book and quoting example code does not require permission.
* Incorporating a significant amount of example code from this book into your product's documentation does require permission.
Attributions usually include the title, author, publisher and an ISBN. For
example, "Two Scoops of Django: Best Practices for Django 1.8, by Daniel
Roy Greenfeld and Audrey Roy Greenfeld. Copyright 2015 Two Scoops Press (ISBN-GOES-HERE)."
If you feel your use of code examples falls outside fair use of the permission
given here, please contact us at [email protected]."""
# Top of settings/production.py
import os
SOME_SECRET_KEY = os.environ["SOME_SECRET_KEY"]
|
[
"[email protected]"
] | |
6f2edb09e5c1f151145ab5c1adacec423009c475
|
e452f89c51180487f2ed68c33ca2fed54e14a967
|
/1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/03_Conditional-Statements-Advanced/01.Lab-04-Personal-Titles.py
|
72a970d05c0e96713bf60476264312a5d9ccd0bc
|
[
"MIT"
] |
permissive
|
karolinanikolova/SoftUni-Software-Engineering
|
c996f18eea9fb93164ab674614e90b357ef4858a
|
7891924956598b11a1e30e2c220457c85c40f064
|
refs/heads/main
| 2023-06-21T23:24:55.224528
| 2021-07-22T16:15:59
| 2021-07-22T16:15:59
| 367,432,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
# 4. Обръщение според възраст и пол
# Да се напише конзолна програма, която прочита възраст (реално число) и пол ('m' или 'f'), въведени от потребителя, и отпечатва обръщение измежду следните:
# • "Mr." – мъж (пол 'm') на 16 или повече години
# • "Master" – момче (пол 'm') под 16 години
# • "Ms." – жена (пол 'f') на 16 или повече години
# • "Miss" – момиче (пол 'f') под 16 години
age = float(input())
sex = input()
if sex == 'f':
if age >= 16:
print('Ms.')
elif age < 16:
print('Miss')
elif sex == 'm':
if age >= 16:
print('Mr.')
elif age < 16:
print('Master')
|
[
"[email protected]"
] | |
18a62f5f58f3eacf0f4b6e83ac4fda4770a77484
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/StructuredProductsDealPackage/FPythonCode/SP_ModuleReload.py
|
ed019b05682e9d07250ac27a96aa65a7a6824bdd
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,569
|
py
|
# Need to add
# Additional Info
# - Participation (DealPackage)
# - CapitalProtection (DealPackage)
# - StrikePricePct (Instrument)
# - BarrierLevelPct (Instrument)
# - ProductQuantity (Deal Package)
# - AccumulatorLeverage
# Exotic Events
# - Initial Fixing
# ChoiceLists
# - AccDec (Val Group)
# - accDecModelDesc (Valuation Extension)
import SP_DealPackageHelper
import importlib
importlib.reload(SP_DealPackageHelper)
import SP_BusinessCalculations
importlib.reload(SP_BusinessCalculations)
import CompositeComponentBase
importlib.reload(CompositeComponentBase)
import CompositeExoticEventComponents
importlib.reload(CompositeExoticEventComponents)
import CompositeExoticComponents
importlib.reload(CompositeExoticComponents)
import CompositeOptionAdditionComponents
importlib.reload(CompositeOptionAdditionComponents)
import CompositeCashFlowComponents
importlib.reload(CompositeCashFlowComponents)
import CompositeOptionComponents
importlib.reload(CompositeOptionComponents)
import CompositeBasketComponents
importlib.reload(CompositeBasketComponents)
import CompositeBasketOptionComponents
importlib.reload (CompositeBasketOptionComponents)
import CompositeTradeComponents
importlib.reload(CompositeTradeComponents)
import StructuredProductBase
importlib.reload(StructuredProductBase)
import Validation_BarrierReverseConvertible
importlib.reload(Validation_BarrierReverseConvertible)
import SP_BarrierReverseConvertible
importlib.reload(SP_BarrierReverseConvertible)
import SP_CapitalProtectedNote
importlib.reload(SP_CapitalProtectedNote)
import SP_EqStraddle
importlib.reload(SP_EqStraddle)
import SP_CallPutSpread
importlib.reload(SP_CallPutSpread)
import SP_DualCurrencyDeposit
importlib.reload(SP_DualCurrencyDeposit)
import SP_WeddingCakeDeposit
importlib.reload(SP_WeddingCakeDeposit)
import SP_AccumulatorSetup
importlib.reload(SP_AccumulatorSetup)
import SP_AccumulatorCustomInsDef
importlib.reload(SP_AccumulatorCustomInsDef)
import SP_AccumulatorValuation
importlib.reload(SP_AccumulatorValuation)
import SP_AccumulatorModel
importlib.reload(SP_AccumulatorModel)
import SP_AccumulatorDealPackage
importlib.reload(SP_AccumulatorDealPackage)
import SP_Autocall
importlib.reload(SP_Autocall)
import SP_CapitalProtectedCertificate
importlib.reload(SP_CapitalProtectedCertificate)
import SP_CustomTradeActions
importlib.reload(SP_CustomTradeActions)
import SP_InvokeTradeActions
importlib.reload(SP_InvokeTradeActions)
import CustomLifeCycleEvents
importlib.reload(CustomLifeCycleEvents)
|
[
"[email protected]"
] | |
d60cd1bfe7525f7f1d1505b330008095c64c52b2
|
5e59252778f8b6465f6e9c4a1890297624cab8f8
|
/shell.py
|
15b5a123b00f2886e529971c6a178f4639a69ac8
|
[] |
no_license
|
tazjel/rpathcmd
|
fa62dfed77d56ea100c8f76a035486b2761058ee
|
0ebffe639f329665824fdd94d8b5c89ce695f153
|
refs/heads/master
| 2021-01-16T20:03:25.225459
| 2012-11-05T16:09:17
| 2012-11-05T16:09:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,104
|
py
|
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2012 James Tanner <[email protected]>
#
# NOTE: the 'self' variable is an instance of RpathShell
import atexit, logging, os, readline, re, sys
from cmd import Cmd
from pwd import getpwuid
from rpathcmd.utils import *
import pdb
class RpathShell(Cmd):
__module_list = [ 'api', 'projects', 'groups', 'systems', 'images', 'platforms', 'targets', 'packages']
# a SyntaxError is thrown if we don't wrap this in an 'exec'
for module in __module_list:
exec 'from %s import *' % module
# maximum length of history file
HISTORY_LENGTH = 1024
cmdqueue = []
completekey = 'tab'
stdout = sys.stdout
#prompt_template = 'rpathcmd {SSM:##}> '
prompt_template = 'rpathcmd> '
current_line = ''
# do nothing on an empty line
emptyline = lambda self: None
def __init__(self, options):
self.session = ''
self.username = ''
self.server = ''
self.ssm = {}
self.postcmd(False, '')
# make the options available everywhere
self.options = options
#pdb.set_trace()
userinfo = getpwuid(os.getuid())
self.conf_dir = os.path.join(userinfo[5], '.spacecmd')
try:
if not os.path.isdir(self.conf_dir):
os.mkdir(self.conf_dir, 0700)
except OSError:
logging.error('Could not create directory %s' % self.conf_dir)
self.history_file = os.path.join(self.conf_dir, 'history')
try:
# don't split on hyphens or colons during tab completion
newdelims = readline.get_completer_delims()
newdelims = re.sub(':|-|/', '', newdelims)
readline.set_completer_delims(newdelims)
if not options.nohistory:
try:
if os.path.isfile(self.history_file):
readline.read_history_file(self.history_file)
readline.set_history_length(self.HISTORY_LENGTH)
# always write the history file on exit
atexit.register(readline.write_history_file,
self.history_file)
except IOError:
logging.error('Could not read history file')
except:
pass
# handle commands that exit the shell
def precmd(self, line):
# remove leading/trailing whitespace
line = re.sub('^\s+|\s+$', '', line)
# don't do anything on empty lines
if line == '':
return ''
# terminate the shell
if re.match('quit|exit|eof', line, re.I):
print
sys.exit(0)
# don't attempt to login for some commands
if re.match('help|login|logout|whoami|history|clear', line, re.I):
return line
# login before attempting to run a command
#if not self.session:
#pdb.set_trace()
#self.do_login('')
#if self.session == '': return ''
parts = line.split()
if len(parts):
command = parts[0]
else:
return ''
if len(parts[1:]):
args = ' '.join(parts[1:])
else:
args = ''
# print the help message if the user passes '--help'
if re.search('--help', line):
return 'help %s' % command
# should we look for an item in the history?
if command[0] != '!' or len(command) < 2:
return line
# remove the '!*' line from the history
self.remove_last_history_item()
history_match = False
if command[1] == '!':
# repeat the last command
line = readline.get_history_item(
readline.get_current_history_length())
if line:
history_match = True
else:
logging.warning('%s: event not found' % command)
return ''
# attempt to find a numbered history item
if not history_match:
try:
number = int(command[1:])
line = readline.get_history_item(number)
if line:
history_match = True
else:
raise Exception
except IndexError:
pass
except ValueError:
pass
# attempt to match the beginning of the string with a history item
if not history_match:
history_range = range(1, readline.get_current_history_length())
history_range.reverse()
for i in history_range:
item = readline.get_history_item(i)
if re.match(command[1:], item):
line = item
history_match = True
break
# append the arguments to the substituted command
if history_match:
line += ' %s' % args
readline.add_history(line)
print line
return line
else:
logging.warning('%s: event not found' % command)
return ''
# update the prompt with the SSM size
def postcmd(self, stop, line):
self.prompt = re.sub('##', str(len(self.ssm)), self.prompt_template)
# vim:ts=4:expandtab:
|
[
"[email protected]"
] | |
a98c0f87c5e54efc98415dca9576d0bcecc3346f
|
aae551baa369fda031f363c2afbdf1984467f16d
|
/Machine_Learning/Programming_Assignments/CS15B001_PA3/Code/q2/bernoulli.py
|
59000649f234d836785dc85871bffe40b30ef448
|
[] |
no_license
|
ameet-1997/Course_Assignments
|
37f7d4115baec383ccf029772efcf9c33beb2a23
|
629e9d5cfc6fa6cf37a96c5fcc33bc669cbdc59d
|
refs/heads/master
| 2021-05-16T16:23:32.731296
| 2018-02-03T05:57:01
| 2018-02-03T05:57:01
| 119,939,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,114
|
py
|
import pandas as pd
import numpy as np
from scipy import sparse
import os
import functions
import time
from sklearn.model_selection import KFold
from sklearn.metrics import precision_recall_fscore_support
from tabulate import tabulate
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.metrics import precision_recall_curve
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Load the data
data_matrix = functions.load_sparse_csr('data_sparse').todense()
labels_matrix = np.loadtxt('labels.csv', delimiter=',')
# Cross Validation
kf = KFold(n_splits=5)
counter = 0
[avr_prec, avr_rec, avr_fsc] = [.0,.0,.0]
for train_index, test_index in kf.split(data_matrix):
counter += 1
data_train, data_test = data_matrix[train_index], data_matrix[test_index]
labels_train, labels_test = labels_matrix[train_index], labels_matrix[test_index]
b = BernoulliNB()
b.fit(data_train, labels_train)
predicted_labels = b.predict(data_test)
# # Estimate the class priors
# spam_prior = float(np.count_nonzero(labels_train == 0))/labels_train.shape[0]
# ham_prior = float(np.count_nonzero(labels_train == 1))/labels_train.shape[0]
# # Estimate the conditional probabilities
# # Get all spam articles and get the column sum
# # Do the same for all ham articles
# # Add-1 smoothing is performed here
# cond_ham = ((np.count_nonzero(data_train[labels_train==1], axis=0)+1).astype(dtype=float))/(data_train[labels_train==1].shape[0]+2)
# cond_spam = ((np.count_nonzero(data_train[labels_train==0], axis=0)+1).astype(dtype=float))/(data_train[labels_train==0].shape[0]+2)
# # Using log so that there are no underflow problems
# predicted_labels = np.ones(shape=labels_test.shape, dtype=float)
# for i in range(predicted_labels.shape[0]):
# score_ham = np.sum(np.multiply(np.log(cond_ham), data_test[i,:]))+np.log(ham_prior)
# score_spam = np.sum(np.multiply(np.log(cond_spam), data_test[i,:]))+np.log(spam_prior)
# if score_spam > score_ham:
# predicted_labels[i] = 0
# else:
# predicted_labels[i] = 1
# print("Fold Number "+str(counter))
[prec,rec,fsc,sup] = precision_recall_fscore_support(labels_test, predicted_labels)
avr_prec += prec[1]
avr_rec += rec[1]
avr_fsc += fsc[1]
# print tabulate([prec, rec, fsc], headers=['Spam', 'Ham'])
# print("")
print("")
print("Average Scores for Spam Class")
print("Precision: "+str(avr_prec/5))
print("Recall: "+str(avr_rec/5))
print("FScore: "+str(avr_fsc/5))
# Plot the PR Curves
train_data, test_data, train_labels, test_labels = train_test_split(data_matrix, labels_matrix, test_size=0.33, random_state=42)
m = BernoulliNB()
m.fit(train_data, train_labels)
probab = m.predict_proba(test_data)
precision_, recall_, threshold_ = precision_recall_curve(test_labels, probab[:,1])
fig = plt.figure()
fig.suptitle('Precision Recall Curve')
ax = fig.add_subplot(111)
ax.set_xlabel('Precision')
ax.set_ylabel('Recall')
# ax.fill(precision_,np.zeros(shape=precision_.shape),'b')
p = [0]
r = [1]
p.extend(list(precision_))
r.extend(list(recall_))
ax.fill(p, r,'b', zorder=5)
plt.plot(p, r)
plt.show()
|
[
"[email protected]"
] | |
2241916c7d68776e94af575a2559596e236b1ca4
|
6c298f03496560276fb9f478cbefc218ecd24e9a
|
/VoiceInput/program/lib/voiceinput.py
|
7f661347d3c4a859be5930192ef02c22284a2b7f
|
[] |
no_license
|
koenschepens/OldPhone
|
1f3fccd6018e14e779373243a0e90a759a7425f9
|
5ac9247d0c9e08d6af8fb384479c53b48c174aa6
|
refs/heads/master
| 2021-01-10T08:31:43.368378
| 2016-03-26T19:06:07
| 2016-03-26T19:06:07
| 43,725,567
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,048
|
py
|
import sys
import xbmc, xbmcgui, xbmcaddon
try:
import simplejson
except ImportError:
import json as simplejson
import httplib
__addon__ = xbmcaddon.Addon()
__cwd__ = __addon__.getAddonInfo('path').decode("utf-8")
__language__ = __addon__.getLocalizedString
class InputWindow(xbmcgui.WindowXMLDialog):
def __init__( self, *args, **kwargs ):
self.Kodi14 = False
self.CTL_NUM_START = 48
self.CTL_NUM_END = 57
self.CTL_LABEL_EDIT = 310
self.strEdit = kwargs.get("default").decode('utf-8') or u""
self.strHeading = kwargs.get("heading") or ""
self.bIsConfirmed = False
self.oldPhone = True
self.keyType = LOWER
self.words = []
self.hzcode = ''
self.pos = 0
self.num = 0
xbmcgui.WindowXMLDialog.__init__(self)
xbmc.log(msg="HEE HALLO@!!", level=xbmc.LOGDEBUG)
def initControl(self):
pEdit = self.getControl(self.CTL_LABEL_EDIT)
px = pEdit.getX()
py = pEdit.getY()
pw = pEdit.getWidth()
ph = pEdit.getHeight()
self.listw = pw - 95
self.CTL_HZCODE = xbmcgui.ControlLabel(px, py + ph, 90, 30, '')
self.CTL_HZLIST = xbmcgui.ControlLabel(px + 95, py + ph, pw - 95, 30, '')
self.addControl(self.CTL_HZCODE)
self.addControl(self.CTL_HZLIST)
def getText(self):
return "MONGOL!"
class Keyboard:
def __init__( self, default='', heading='' ):
self.bIsConfirmed = False
self.strEdit = default
self.strHeading = heading
def doModal (self):
self.win = InputWindow("DialogKeyboard.xml", __cwd__, heading=self.strHeading, default=self.strEdit )
self.win.doModal()
self.bIsConfirmed = self.win.isConfirmed()
self.strEdit = self.win.getText()
del self.win
def setHeading(self, heading):
self.strHeading = "WHOWHOWWWWOOOOO"
def isConfirmed(self):
return self.bIsConfirmed
def getText(self):
return "youtube"
|
[
"[email protected]"
] | |
5505cd4011c837c9e22cf9e9d81addb8442e050d
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/apps/oozie/src/oozie/migrations/0005_initial.py
|
2688a433ed8dcc89995fc5f9b23a9defb2088449
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-06 18:55
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('oozie', '0004_initial'),
]
operations = [
migrations.AddField(
model_name='link',
name='child',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parent_node', to='oozie.Node', verbose_name=b''),
),
migrations.AddField(
model_name='link',
name='parent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='child_node', to='oozie.Node'),
),
migrations.AddField(
model_name='job',
name='owner',
field=models.ForeignKey(help_text='Person who can modify the job.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AddField(
model_name='history',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oozie.Job'),
),
]
|
[
"[email protected]"
] | |
5ba300fb8fe455146525b436819e316a5e780da1
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2811/61132/294777.py
|
4cbb3ce02f1703d0fb35813ef04ff2bc5e50a6e3
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
p,n=map(int,input().split())
l=[]
for i in range(n):
l.append(int(input()))
dic={}
for pos,i in enumerate(l):
key=i%p
if dic.get(key,'')=='':
print(pos+1)
break
else:
dic[key]=i
else:
print(-1)
|
[
"[email protected]"
] | |
e400e3f7cfee1b0808a278fe8e94120ceb12437e
|
692b907d07eee8ce3ee32a1fda74b6d92fd6c548
|
/tests/models/validators/v1_3_0/jsd_d9bdb9034df99dba.py
|
4f3ce36a7f1b34bd26fe19e07e1dc62094323ae1
|
[
"MIT"
] |
permissive
|
AltusConsulting/dnacentercli
|
04c9c7d00b25753a26c643994388dd4e23bf4c54
|
26ea46fdbd40fc30649ea1d8803158655aa545aa
|
refs/heads/master
| 2022-12-16T04:50:30.076420
| 2020-07-17T22:12:39
| 2020-07-17T22:12:39
| 212,206,213
| 0
| 0
|
MIT
| 2022-12-08T06:39:49
| 2019-10-01T21:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,451
|
py
|
# -*- coding: utf-8 -*-
"""DNA Center Get Site Count data model.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorD9BdB9034Df99Dba(object):
"""Get Site Count request schema definition."""
def __init__(self):
super(JSONSchemaValidatorD9BdB9034Df99Dba, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"response": {
"description":
"Response",
"type": [
"string",
"null"
]
},
"version": {
"description":
"Version",
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
[
"[email protected]"
] | |
0628946d4e9a280e8355cd0413d75bd4a43845dc
|
84e5297e214dd94105df7bbe627a506773d70224
|
/Assignment2/dnn_tf.py
|
478f858ded57e45f0034d15cb734f6130922bf28
|
[] |
no_license
|
toannguyen1904/VietAI-ML-Foundation-5
|
b02b1463d0b820088fa7400112d41d4291357172
|
5adcd49c88e4c886b15973254d56c07c15a8660d
|
refs/heads/master
| 2022-05-16T10:27:27.570181
| 2020-03-16T05:37:58
| 2020-03-16T05:37:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,419
|
py
|
"""dnn_tf_sol.py
Solution of deep neural network implementation using tensorflow
Author: Kien Huynh
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from util import *
from dnn_np import test
import pdb
def bat_classification():
# Load data from file
# Make sure that bat.dat is in data/
train_x, train_y, test_x, test_y = get_bat_data()
train_x, _, test_x = normalize(train_x, train_x, test_x)
test_y = test_y.flatten().astype(np.int32)
train_y = train_y.flatten().astype(np.int32)
num_class = (np.unique(train_y)).shape[0]
# DNN parameters
hidden_layers = [100, 100, 100]
learning_rate = 0.01
batch_size = 200
steps = 2000
# Specify that all features have real-value data
feature_columns = [tf.feature_column.numeric_column("x", shape=[train_x.shape[1]])]
# Available activition functions
# https://www.tensorflow.org/api_guides/python/nn#Activation_Functions
# tf.nn.relu
# tf.nn.elu
# tf.nn.sigmoid
# tf.nn.tanh
activation = tf.nn.relu
# [TODO 1.7] Create a neural network and train it using estimator
# Some available gradient descent optimization algorithms
# https://www.tensorflow.org/api_docs/python/tf/train#classes
# tf.train.GradientDescentOptimizer
# tf.train.AdadeltaOptimizer
# tf.train.AdagradOptimizer
# tf.train.AdagradDAOptimizer
# tf.train.MomentumOptimizer
# tf.train.AdamOptimizer
# tf.train.FtrlOptimizer
# tf.train.ProximalGradientDescentOptimizer
# tf.train.ProximalAdagradOptimizer
# tf.train.RMSPropOptimizer
# Create optimizer
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.005)
# build a deep neural network
# https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier
classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=hidden_layers,
n_classes=num_class,
activation_fn=activation,
optimizer=optimizer)
# Define the training inputs
# https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn
train_input_fn = tf.estimator.inputs.numpy_input_fn(x = {"x": train_x},
y = train_y,
batch_size=batch_size,
shuffle=True,
num_epochs=None)
# Train model.
classifier.train(
input_fn=train_input_fn,
steps=steps)
# Define the test inputs
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_x},
y=test_y,
num_epochs=1,
shuffle=False)
# Evaluate accuracy.
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_x},
num_epochs=1,
shuffle=False)
y_hat = classifier.predict(input_fn=predict_input_fn)
y_hat = list(y_hat)
y_hat = np.asarray([int(x['classes'][0]) for x in y_hat])
test(y_hat, test_y)
def mnist_classification():
# Load data from file
# Make sure that fashion-mnist/*.gz is in data/
train_x, train_y, val_x, val_y, test_x, test_y = get_mnist_data(1)
train_x, val_x, test_x = normalize(train_x, train_x, test_x)
train_y = train_y.flatten().astype(np.int32)
val_y = val_y.flatten().astype(np.int32)
test_y = test_y.flatten().astype(np.int32)
num_class = (np.unique(train_y)).shape[0]
pdb.set_trace()
# DNN parameters
hidden_layers = [100, 100, 100]
learning_rate = 0.01
batch_size = 200
steps = 500
# Specify that all features have real-value data
feature_columns = [tf.feature_column.numeric_column("x", shape=[train_x.shape[1]])]
# Choose activation function
activation = tf.nn.sigmoid
# Some available gradient descent optimization algorithms
# TODO: [YC1.7] Create optimizer
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.005)
# build a deep neural network
classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=hidden_layers,
n_classes=num_class,
activation_fn=activation,
optimizer=optimizer)
# Define the training inputs
# https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn
train_input_fn = tf.estimator.inputs.numpy_input_fn(x = {"x": train_x},
y = train_y,
batch_size=batch_size,
shuffle=True,
num_epochs=None)
# Train model.
classifier.train(
input_fn=train_input_fn,
steps=steps)
# Define the test inputs
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_x},
y=test_y,
num_epochs=1,
shuffle=False)
# Evaluate accuracy.
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_x},
num_epochs=1,
shuffle=False)
y_hat = classifier.predict(input_fn=predict_input_fn)
y_hat = list(y_hat)
y_hat = np.asarray([int(x['classes'][0]) for x in y_hat])
test(y_hat, test_y)
if __name__ == '__main__':
np.random.seed(2017)
plt.ion()
bat_classification()
mnist_classification()
|
[
"[email protected]"
] | |
6e8f312ce8d26da7d371c9bd295ee0598f010704
|
5cc1296f10af0d65691fd01a23221d6d85f4deff
|
/cotizacion/migrations/0009_auto_20150805_1400.py
|
f213b7ccb1a13cf363c1195baf3b10f04e54fea3
|
[] |
no_license
|
yusnelvy/mtvmcotizacion
|
e52b58fe8c50d3921d36490084de328c52e4e9ea
|
07d2bd5f36350b149c16a0aa514bb610b0cd3e18
|
refs/heads/master
| 2016-09-05T23:31:15.800940
| 2015-11-07T13:12:30
| 2015-11-07T13:12:30
| 35,440,629
| 0
| 0
| null | 2015-12-18T16:16:23
| 2015-05-11T18:01:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cotizacion', '0008_auto_20150727_1207'),
]
operations = [
migrations.AlterField(
model_name='tiempo_carga',
name='peso_max',
field=models.DecimalField(blank=True, default=0.0, max_digits=8, decimal_places=3),
),
migrations.AlterField(
model_name='tiempo_carga',
name='peso_min',
field=models.DecimalField(blank=True, default=0.0, max_digits=8, decimal_places=3),
),
migrations.AlterField(
model_name='vehiculo',
name='capacidad_peso',
field=models.DecimalField(max_digits=8, decimal_places=3),
),
migrations.AlterField(
model_name='vehiculo',
name='capacidad_volumen',
field=models.DecimalField(max_digits=8, decimal_places=3),
),
]
|
[
"[email protected]"
] | |
d510a984109e30d272424766c0f4ceedc20d77e2
|
ec5c35ac5163c4e81262a81a6a6c46667c01733d
|
/server/api.py
|
dfdfa338713c8c53b8fe3fb180871a407ed32b13
|
[] |
no_license
|
kotawiw/bytedance-exercise-2
|
27b32d81aa7e8040c1c8448acbe9c4ff20ff5b26
|
8db190487a6490ec852d8418d93ba62251a5437f
|
refs/heads/master
| 2022-12-24T00:04:53.047395
| 2020-09-23T11:48:13
| 2020-09-23T11:48:13
| 297,948,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,826
|
py
|
from flask import Blueprint, request, abort, jsonify
from flask import g
from server.auth import login_required
from server.models.users import User
from server.models.events import Event
from server.models.events import EventRegistration
bp = Blueprint("api", __name__, url_prefix="/api")
@bp.route("/events", methods=("GET",))
def query_events():
offset = request.args.get("offset", default=0, type=int)
limit = request.args.get("limit", default=10, type=int)
total_count, events = Event.query_events(offset=offset, limit=limit)
return jsonify(
totalCount=total_count,
values=[event_output(e) for e in events])
@bp.route("/events", methods=("POST",))
@login_required
def create_event():
user = g.user
event = Event.create(
user, request.json
)
return event_output(event)
@bp.route("/event/<string:event_id>", methods=("GET",))
def get_event(event_id):
event = Event.by_identifier(event_id)
if not event:
return abort(404, 'Event not found')
return event_output(event)
@bp.route("/event/<string:event_id>/registrations", methods=("GET",))
def get_registrations(event_id):
event = Event.by_identifier(event_id)
if not event:
return abort(404, 'Event not found')
registrations = EventRegistration.by_event(event)
return jsonify([registration_output(r) for r in registrations])
@bp.route("/event/<string:event_id>/registrations", methods=("PUT",))
def register_event(event_id):
event = Event.by_identifier(event_id)
if not event:
return abort(404, 'Event not found')
user = g.user
if not user:
return abort(401, 'Please login to register for an event')
register = EventRegistration.register(event, user)
return registration_output(register)
@bp.route("/event/<string:event_id>/registrations", methods=("DELETE",))
def unregister_event(event_id):
user = g.user
if not user:
return abort(401, 'Please login to unregister for an event')
event = Event.by_identifier(event_id)
if not event:
return abort(404, 'Event not found')
register = EventRegistration.by_event_user(event, user)
if not register:
return abort(404, 'Event registration not found')
EventRegistration.unregister(register)
return registration_output(register)
def event_output(event: Event):
return dict(
id=event.identifier,
name=event.name,
location=event.location,
description=event.description,
startTimestamp=event.start_timestamp,
endTimestamp=event.end_timestamp)
def registration_output(registration: EventRegistration):
# Todo: De-normalize registration info to include user email
user = User.query.get(registration.user_id)
return dict(
email=user.email
)
|
[
"[email protected]"
] | |
b9950dd4f6bb688de78a9a92c88f0ae70755ed6e
|
8f6a9ff4c63fd24d145088077d5da1c3e4caaa3a
|
/programming trade/easyhistory - download 备份修改/easyhistroy/history.py
|
ea5e4c058c768ff89e5b70d20e111adb96f0d2fc
|
[] |
no_license
|
liaofuwei/pythoncoding
|
6fd2afba0d27c4a4bbb4b2d321b3fa402a60d6fe
|
966bd99459be933cf48287412a40e0c7a3d0b8e5
|
refs/heads/master
| 2021-07-15T10:34:57.701528
| 2017-10-10T05:27:13
| 2017-10-10T05:27:13
| 107,651,470
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
# coding:utf-8
import os
import pandas as pd
import talib
class Indicator(object):
def __init__(self, stock_code, history):
self.stock_code = stock_code
self.history = history
def load_csv_files(self, path):
file_list = [f for f in os.listdir(path) if f.endswith('.csv')]
for stock_csv in file_list:
csv_ext_index_start = -4
stock_code = stock_csv[:csv_ext_index_start]
self.market[stock_code] = pd.read_csv(stock_csv, index_col='date')
def __getattr__(self, item):
def talib_func(*args, **kwargs):
str_args = ''.join(map(str, args))
if self.history.get(item + str_args) is not None:
return self.history
func = getattr(talib, item)
res_arr = func(self.history['close'].values, *args, **kwargs)
self.history[item + str_args] = res_arr
return self.history
return talib_func
class History(object):
def __init__(self, dtype='D', path='history'):
self.market = dict()
data_path = os.path.join(path, 'day', 'data')
self.load_csv_files(data_path)
def load_csv_files(self, path):
file_list = [f for f in os.listdir(path) if f.endswith('.csv')]
for stock_csv in file_list:
csv_ext_index_start = -4
stock_code = stock_csv[:csv_ext_index_start]
csv_path = os.path.join(path, stock_csv)
self.market[stock_code] = Indicator(stock_code, pd.read_csv(csv_path, index_col='date'))
def __getitem__(self, item):
return self.market[item]
|
[
"[email protected]"
] | |
c02a678107f5e807bc54b95fb1bc038e46931756
|
f338eb32c45d8d5d002a84798a7df7bb0403b3c4
|
/DQM/DTMonitorModule/test/DTkFactValidation_1_TEMPL_cfg.py
|
28873b4aebd3900356c5f720350f92f2c2e3d464
|
[] |
permissive
|
wouf/cmssw
|
0a8a8016e6bebc611f1277379e12bef130464afb
|
60da16aec83a0fc016cca9e2a5ed0768ba3b161c
|
refs/heads/CMSSW_7_3_X
| 2022-06-30T04:35:45.380754
| 2015-05-08T17:40:17
| 2015-05-08T17:40:17
| 463,028,972
| 0
| 0
|
Apache-2.0
| 2022-02-24T06:05:30
| 2022-02-24T06:05:26
| null |
UTF-8
|
Python
| false
| false
| 3,607
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('resolutionTest_step1',
'resolutionTest_step2',
'resolutionTest_step3'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR'),
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
resolution = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
),
noLineBreaks = cms.untracked.bool(True)
),
categories = cms.untracked.vstring('resolution'),
destinations = cms.untracked.vstring('cout')
)
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = "GLOBALTAGTEMPLATE"
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("RecoLocalMuon.Configuration.RecoLocalMuonCosmics_cff")
process.source = cms.Source("PoolSource",
debugFlag = cms.untracked.bool(True),
debugVebosity = cms.untracked.uint32(10),
fileNames = cms.untracked.vstring()
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.calibDB = cms.ESSource("PoolDBESSource",
process.CondDBSetup,
timetype = cms.string('runnumber'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('DTTtrigRcd'),
tag = cms.string('ttrig')
)),
connect = cms.string('sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_MUONCALIB/DTCALIB/RUNPERIODTEMPLATE/ttrig/ttrig_DUMPDBTEMPL_RUNNUMBERTEMPLATE.db'),
authenticationMethod = cms.untracked.uint32(0)
)
process.es_prefer_calibDB = cms.ESPrefer('PoolDBESSource','calibDB')
# if read from RAW
process.load("EventFilter.DTRawToDigi.dtunpacker_cfi")
process.eventInfoProvider = cms.EDFilter("EventCoordinatesSource",
eventInfoFolder = cms.untracked.string('EventInfo/')
)
process.DTkFactValidation = cms.EDAnalyzer("DTCalibValidation",
# Write the histos on file
OutputMEsInRootFile = cms.bool(True),
# Lable to retrieve 2D segments from the event
segment2DLabel = cms.untracked.string('dt2DSegments'),
OutputFileName = cms.string('residuals.root'),
# Lable to retrieve 4D segments from the event
segment4DLabel = cms.untracked.string('dt4DSegments'),
debug = cms.untracked.bool(False),
# Lable to retrieve RecHits from the event
recHits1DLabel = cms.untracked.string('dt1DRecHits')
)
process.FEVT = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'keep *_MEtoEDMConverter_*_*'),
fileName = cms.untracked.string('DQM.root')
)
process.load("DQMServices.Components.MEtoEDMConverter_cff")
process.dummyProducer = cms.EDProducer("ThingWithMergeProducer")
# if read from RAW
#process.firstStep = cms.Sequence(process.muonDTDigis*process.dt1DRecHits*process.dt2DSegments*process.dt4DSegments*process.DTkFactValidation)
process.firstStep = cms.Sequence(process.dummyProducer + process.muonDTDigis*process.dt1DRecHits*process.dt2DSegments*process.dt4DSegments*process.DTkFactValidation*process.MEtoEDMConverter)
#process.firstStep = cms.Sequence(process.dummyProducer + process.dt1DRecHits*process.dt2DSegments*process.dt4DSegments*process.DTkFactValidation*process.MEtoEDMConverter)
process.p = cms.Path(process.firstStep)
process.outpath = cms.EndPath(process.FEVT)
process.DQM.collectorHost = ''
|
[
"[email protected]"
] | |
5d009ec1750156835ab05bd369cef58aeaed239e
|
b4c93bad8ccc9007a7d3e7e1d1d4eb8388f6e988
|
/farmercoupon/migrations/0048_auto_20210322_1046.py
|
45f4e7b616e00e32a923afc76da686935d36cabb
|
[] |
no_license
|
flashdreiv/fis
|
39b60c010d0d989a34c01b39ea88f7fc3be0a87d
|
b93277785d6ad113a90a011f7c43b1e3e9209ec5
|
refs/heads/main
| 2023-04-02T12:46:32.249800
| 2021-03-31T00:27:29
| 2021-03-31T00:27:29
| 343,431,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
# Generated by Django 3.1.7 on 2021-03-22 02:46
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('farmercoupon', '0047_auto_20210321_1524'),
]
operations = [
migrations.AddField(
model_name='farmer',
name='crop',
field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[(1, 'Item title 2.1'), (2, 'Item title 2.2'), (3, 'Item title 2.3'), (4, 'Item title 2.4'), (5, 'Item title 2.5')], max_length=9, null=True),
),
migrations.AddField(
model_name='farmer',
name='land_area',
field=models.IntegerField(default=0),
),
]
|
[
"[email protected]"
] | |
60860eacc8024b7eec8832f1bace9276b752943b
|
9af43f9f52ab8726caacdd594980d5e0bf462c40
|
/flask_transmute/decorators.py
|
29e4c8463f456ffad3e1540e4880e4cebb3c4467
|
[] |
no_license
|
elindell/flask-transmute
|
3b28509fee071e606be0021bfdc63bff85b51a38
|
bd3c103c5eca9a5e4071f71be4a12460acddfd26
|
refs/heads/master
| 2021-01-22T09:16:45.945064
| 2016-04-04T08:49:08
| 2016-04-04T08:49:08
| 67,669,319
| 0
| 0
| null | 2016-09-08T04:48:59
| 2016-09-08T04:48:59
| null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
def updates(f):
"""
this labels a function as one that updates data.
"""
f.updates = True
return f
def creates(f):
"""
this labels a function as one that creates data.
"""
f.creates = True
return f
def deletes(f):
"""
this labels a function as one that deletes data.
"""
f.deletes = True
return f
def annotate(annotations):
"""
in python2, native annotions on parameters do not exist:
def foo(a : str, b: int) -> bool:
...
this provides a way to provide attribute annotations:
@annotate({"a": str, "b": int, "return": bool})
def foo(a, b):
...
"""
def decorate(func):
func.__annotations__ = annotations
return func
return decorate
|
[
"[email protected]"
] | |
94cb36fc55af1eb504fcbf88f2c20c31038bd4dc
|
917b85156ddfb653592b3b0994e7e7e9802a9eed
|
/ejerXML.py
|
c8789ca346bf35fd1f02bff24c1534fdec3609d4
|
[] |
no_license
|
antoniogomezvarela/XML
|
3d2f2e8e1949b4a7f335a0b7c6ea229544d816a4
|
c6dfeed3d782c4a28e56c7992414accf9fdcc660
|
refs/heads/master
| 2021-01-22T03:25:47.441160
| 2015-03-06T07:28:34
| 2015-03-06T07:28:34
| 31,011,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,135
|
py
|
# -*- coding: utf-8 -*-
from lxml import etree
from datetime import date
tree = etree.parse('becas_premios.xml')
documento = tree.getroot()
#MENU
print "1- Buscar beca o premio por teclado"
print "2- Mostrar becas y enlaces"
print "3- Buscar las becas y premios que su fecha de publicación este entre febrero y abril"
print "4- Contar cuantas becas y premios se han dado."
print "5- Mostrar las id de las becas y añadir cuantos dias ha estado abierta"
opcion= raw_input("Elige una opción: ")
#Ejercicio 1
if opcion == '1':
encontrado = False
identificacion = raw_input("Introduce una id: ")
for i in documento:
if i[0].text==identificacion:
encontrado = True
print "ID: ",i[0].text
print "Titulo: ",i[1].text
print "Fecha: ",i[2].text
print "Descripción: ",i[3].text
print "Estado: ",i[5].text
if encontrado == False:
print "Esa ID no existe"
elif opcion == '2':
for i in documento:
print "ID: ",i[0].text,", Enlace: ",i[4].text
elif opcion == '3':
for i in documento:
fecha1=i[2].text
fecha2=fecha1.split("-")
if fecha2[1] >= "02" and fecha2[1] <= "04":
print "ID: ",i[0].text,", Fecha: ",i[2].text
elif opcion == '4':
becas = 0
premios = 0
for i in documento:
titulo = i[1].text
titulo = titulo.split(" ")
if titulo[0] == "Becas":
becas += 1
elif titulo[0] == "Premios":
premios += 1
print "Número de becas concedidas: ",becas
print "Número de premios concedidos: ",premios
elif opcion == '5':
date_format = "%Y/%m/%d"
for i in documento:
incial = i.findall("plazopresentacion/plazopresentacion_item/incial")
final = i.findall("plazopresentacion/plazopresentacion_item/final")
inicial= str(incial[0].text)
final= str(final[0].text)
if inicial != "None" or final != "None":
inicial = inicial.split("T")
final = final.split("T")
inicial = inicial[0].split("-")
final = final[0].split("-")
d0 = date(int(inicial[0]),int(inicial[1]),int(inicial[2]))
d1 = date(int(final[0]),int(final[1]),int(final[2]))
dias = d1-d0
print "la beca ",i[0].text," estuvo abierta ",dias.days," dias"
else:
print "Elige una opción correcta"
|
[
"root@debian"
] |
root@debian
|
a29090ef119e51b024e2fc4af969d65ecaef476a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_155/1805.py
|
f216188bcb5e778686fc1da1297901988727a426
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
def get_min_members(smax, audience):
standing = 0
friends = 0
i = 1
standing += audience[0]
while i <= smax:
if standing < i:
new_friends = i - standing
standing += new_friends
friends += new_friends
standing += audience[i]
i += 1
return friends
# cases = [(4, "11111"), (1, "09"), (5, "110011"), (0, "1")]
t = input()
for i in range(t):
smax, audience = raw_input().split()
result = get_min_members(int(smax), map(int, audience))
print "Case #%d: %d" % (i+1, result)
|
[
"[email protected]"
] | |
e0e32be403a6963887949ef4f1269a652f11e196
|
89e6c3548fbdd06178aae712de1ff19004bc2faa
|
/my_django/contrib/localflavor/sk/forms.py
|
f5428d879572000d4ed3f57df9882da6f007f378
|
[] |
no_license
|
bhgv/ublog_git.hg.repo-django.python-engine
|
a3f3cdcbacc95ec98f022f9719d3b300dd6541d4
|
74cdae100bff5e8ab8fb9c3e8ba95623333c2d43
|
refs/heads/master
| 2020-03-23T01:04:07.431749
| 2018-07-25T12:59:21
| 2018-07-25T12:59:21
| 140,899,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
"""
Slovak-specific form helpers
"""
from __future__ import absolute_import
from my_django.contrib.localflavor.sk.sk_districts import DISTRICT_CHOICES
from my_django.contrib.localflavor.sk.sk_regions import REGION_CHOICES
from my_django.forms.fields import Select, RegexField
from my_django.utils.translation import ugettext_lazy as _
class SKRegionSelect(Select):
"""
A select widget widget with list of Slovak regions as choices.
"""
def __init__(self, attrs=None):
super(SKRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class SKDistrictSelect(Select):
"""
A select widget with list of Slovak districts as choices.
"""
def __init__(self, attrs=None):
super(SKDistrictSelect, self).__init__(attrs, choices=DISTRICT_CHOICES)
class SKPostalCodeField(RegexField):
"""
A form field that validates its input as Slovak postal code.
Valid form is XXXXX or XXX XX, where X represents integer.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XXXXX or XXX XX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(SKPostalCodeField, self).__init__(r'^\d{5}$|^\d{3} \d{2}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Validates the input and returns a string that contains only numbers.
Returns an empty string for empty values.
"""
v = super(SKPostalCodeField, self).clean(value)
return v.replace(' ', '')
|
[
"[email protected]"
] | |
e784cfeb07b1b4b44de67e5f78c4e17cfbf1338b
|
1d717c797e93b451f7da7c810a0fb4075b1050d5
|
/src/data/dataset/basic_dataset.py
|
bc875ea6516703ea40caa5028c2b7984ad5dd2fa
|
[] |
no_license
|
jessie0624/nlp-task
|
32338b08051a3ea192db2bf74c9c969bdff1f6ad
|
aaeeed86341356d9fd061664f6f7bccf2ac353d0
|
refs/heads/master
| 2023-01-24T12:06:13.323646
| 2020-12-10T08:38:23
| 2020-12-10T08:38:23
| 292,151,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
import numpy as np
from src.base import BaseDataset
class BasicDataset(BaseDataset):
def __init__(self, x: list, y: list, callbacks=None):
super().__init__(callbacks=callbacks)
self.x = x
self.y = y
self.sample() # 先获取候选的索引池(index pool)
def get_index_pool(self):
'''
index_pool用来保存每一次索引返回的list
:return:
'''
# 默认为x的长度,这里要保证是二维的,便于统一,即[[0], [1], [2],...]
index_pool = np.expand_dims(range(len(self.x)), axis=1).tolist()
return index_pool
def sort(self):
'''
按照x中数据的长度进行排序
'''
old_index_pool = self._index_pool
lengths = [len(item) for item in self.x]
sort_index = np.argsort(lengths)
self._index_pool = [old_index_pool[index] for index in sort_index]
def __getitem__(self, item: int):
x, y = self.x[item], self.y[item]
self._handle_callback_on_batch(x, y)
return x, y
|
[
"[email protected]"
] | |
971dd6b3cb304f9c7d87eacd5e07e92e1786bc2e
|
f8d181f293ce950f1a70bef1d023139d9e70a2c7
|
/tests/contrib/operators/test_gcp_vision_operator_system.py
|
2b75642d6f3a3c93aab282d82e823a4a09d01087
|
[
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
Piboonsak/airflow
|
d242f79561d893111ad73b9e3481b9180adecfd4
|
dce92a54190155898c75c0f3392d42fb28f1884a
|
refs/heads/master
| 2020-04-29T15:16:06.779329
| 2019-03-18T05:16:14
| 2019-03-18T05:16:14
| 176,222,528
| 1
| 0
|
Apache-2.0
| 2019-03-18T06:57:38
| 2019-03-18T06:57:38
| null |
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from tests.contrib.utils.base_gcp_system_test_case import DagGcpSystemTestCase, SKIP_TEST_WARNING
from tests.contrib.utils.gcp_authenticator import GCP_AI_KEY
@unittest.skipIf(DagGcpSystemTestCase.skip_check(GCP_AI_KEY), SKIP_TEST_WARNING)
class CloudVisionExampleDagsSystemTest(DagGcpSystemTestCase):
def __init__(self, method_name='runTest'):
super(CloudVisionExampleDagsSystemTest, self).__init__(
method_name, dag_id='example_gcp_vision', gcp_key=GCP_AI_KEY
)
def test_run_example_dag_function(self):
self._run_dag()
|
[
"[email protected]"
] | |
cfb58a7a49bde127229470f43e7c101d5f9d7168
|
ba1ddbc6b364dc2fd55f83ea807b50bf45ce3d1a
|
/PageObject/VivaVideo/home.py
|
23b61b58c20490654f07d632cf8e5bfc9c4414a4
|
[] |
no_license
|
zlmone/ATX-UI
|
81c58fa722586fe6fb20cd39e3a85afa6057db93
|
44bfa67ed2274c2eeb36f905d5bd482fd96a6707
|
refs/heads/master
| 2022-05-28T09:03:40.380824
| 2020-05-06T11:39:39
| 2020-05-06T11:39:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,770
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Public.Decorator import *
from Public.Test_data import *
log = Log()
class home_Page(BasePage):
'''创作页首页'''
# @teststep
# def wait_page(self):
# try:
# if self.d(resourceId="com.quvideo.xiaoying:id/iv_vip_home8_cut").wait(timeout=10):
# pass
# else:
# raise Exception('Not in Creation_Page')
# except Exception:
# raise Exception('Not in Creation_Page')
# @teststep
# def close_float_imag(self):
# if self.d(resourceId="com.quvideo.xiaoying:id/float_imageview").wait(timeout=5):
# log.i('关闭创作页浮窗图片')
# self.d(resourceId="com.quvideo.xiaoying:id/float_imageview").child(className="android.widget.ImageView",
# instance=1).click_exists(timeout=3)
# else:
# log.i('没有创作页浮窗图片,跳过')
# pass
@teststep
def close_popup(self):
log.i('关闭首页家庭政策弹窗')
try:
self.d(resourceId="com.quvideo.xiaoying:id/iv_close").click(3)
except:
log.i('弹窗未弹出或者已消除')
pass
@teststep
def close_ad_popup(self,timeout = 3):
log.i('关闭广告弹窗 ')
self.d(resourceId="com.quvideo.xiaoying:id/tt_insert_dislike_icon_img").click_exists(timeout=timeout)
@teststep
def click_template_btn(self):
log.i('点击底部拍同款按钮')
self.d(resourceId="com.quvideo.xiaoying:id/tv_home_tab", text="拍同款").click()
@teststep
def click_home_btn(self):
log.i('点击底部剪辑按钮')
self.d(resourceId="com.quvideo.xiaoying:id/tv_home_tab", text="剪辑").click()
@teststep
def click_me_btn(self):
log.i('点击底部我按钮')
self.d(resourceId="com.quvideo.xiaoying:id/tv_home_tab", text="我").click()
@teststep
def click_vip_btn(self):
log.i('点击VIP按钮')
self.d(resourceId="com.quvideo.xiaoying:id/iv_vip_home8_cut").click()
@teststep
def click_edit_btn(self):
log.i('点击视频剪辑')
self.d(resourceId="com.quvideo.xiaoying:id/iv_edit_home8_cut").click()
try:
self.d(resourceId="com.quvideo.xiaoying:id/imgbtn_help_exit").implicitly_wait(3).click()
except:
log.i("立刻升级页面已消除")
pass
@teststep
def click_mv_btn(self):
log.i('点击相册MV')
self.d(resourceId="com.quvideo.xiaoying:id/iv_mv_home8_cut").click()
@teststep
def click_draft_btn(self):
log.i('点击草稿')
self.d(resourceId="com.quvideo.xiaoying:id/tv_draft_icon_home8_cut",text= '草稿').click()
@teststep
def click_home_more(self):
log.i('点击素材中心查看更多按钮')
self.d(text="查看更多").click()
@teststep
def click_camera_btn(self):
log.i('点击拍摄按钮')
self.watch_device('取消|允许|始终允许')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight4_home8_cut").click()
time.sleep(5) # 等待相机加载完成
self.d.click(0.5, 0.5) # 点击对焦,取消弹出的滤镜
@teststep
def click_sec_addText(self):
log.i('点击次要功能位加字幕')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight0_home8_cut").click()
@teststep
def click_sec_Mixer(self):
log.i('点击次要功能位画中画')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight1_home8_cut").click()
@teststep
def click_sec_Mosaic(self):
log.i('点击次要功能位马赛克')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight2_home8_cut").click()\
@teststep
def click_sec_FAQ(self):
log.i('点击次要功能位新手教程')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight3_home8_cut").click()
@teststep
def click_sec_Capture(self):
log.i('点击次要功能位拍摄')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight4_home8_cut").click()
@teststep
def click_sec_musicExtraction(self):
log.i('点击次要功能位音频提取')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight5_home8_cut").click()
# @teststep
# def click_view_pager_btn(self, text):
# '''
# 次要功能位置,各个按钮的点击操作
# :param text: 次要功能位置的text名称
# :return:
# '''
# log.i('查找次要功能位 %s 并进行点击操作'% text)
# if self.d(text=text).wait(timeout=1):
# self.d(text=text).click()
# return True
# else:
# try:
# self.d(resourceId="com.quvideo.xiaoying:id/view_pager", scrollable=True).scroll.horiz.to(text=text)
# self.d(text=text).click()
# return True
# except UiObjectNotFoundError:
# log.i("找不到控件-->%s" % text)
# return False
# @teststep
# def select_studio_view(self, inst=1):
# '''
# 点击我的工作室的view 默认第一个
# :param inst: 0为第一个view 以此类推 1、2、3--> 一二三
# '''
# log.i('点击我的工作室第%s个草稿' % inst)
# self.d(resourceId="com.quvideo.xiaoying:id/layout_draft_item").child(className='android.widget.ImageView')[inst-1].click()
if __name__ == '__main__':
from Public.Log import Log
Log().set_logger('udid', './log.log')
BasePage().set_driver(None)
home_Page().close_ad_popup()
|
[
"[email protected]"
] | |
fe1cc4e8b6b8201c08c79ccc09f50d705606c468
|
69e7dca194ab7b190e1a72928e28aa3821b47cfb
|
/Concepts/Strings/49.py
|
579955f18e9b68d977d8b50ba8f8ff8b211b3947
|
[] |
no_license
|
Dinesh94Singh/PythonArchivedSolutions
|
a392891b431d47de0d5f606f7342a11b3127df4d
|
80cca595dc688ca67c1ebb45b339e724ec09c374
|
refs/heads/master
| 2023-06-14T14:56:44.470466
| 2021-07-11T06:07:38
| 2021-07-11T06:07:38
| 384,871,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
"""
49. Group Anagrams
Given an array of strings, group anagrams together.
Example:
Input: ["eat", "tea", "tan", "ate", "nat", "bat"],
Output:
[
["ate","eat","tea"],
["nat","tan"],
["bat"]
]
Note:
All inputs will be in lowercase.
The order of your output does not matter.
"""
import collections
def group_anagrams(strs):
dic = collections.defaultdict(list)
ans = []
for each_word in strs:
dic[tuple(sorted(each_word))].append(each_word)
for key, values in dic.items():
ans.append(values)
return ans
group_anagrams(["eat", "tea", "tan", "ate", "nat", "bat"])
|
[
"[email protected]"
] | |
31475d7e6cd976e2ad2ea6c3ecd3f56b4ae48fbc
|
326a026bcc6bad962159677110d78d3d836532ed
|
/markote/api/notebook.py
|
e05023873ff40b79701ec2540061e8c2d53ca0e2
|
[
"MIT"
] |
permissive
|
Frederick-S/markote
|
f63a5007fd0a70ce4b3ae9d03425ae9f9c8b54f3
|
095dabe3da83b5d8809593758661eb78fa527f49
|
refs/heads/master
| 2023-03-04T16:50:30.541147
| 2022-08-12T01:24:43
| 2022-08-12T01:24:43
| 110,396,888
| 9
| 2
|
MIT
| 2023-03-04T13:11:38
| 2017-11-12T02:04:32
|
Vue
|
UTF-8
|
Python
| false
| false
| 870
|
py
|
from flask import jsonify, request
from markote.api.api_blueprint import api_blueprint
from markote.oauth import oauth
@api_blueprint.route('/notebooks', methods=['GET'])
def get_notebooks():
oauth_client = oauth.microsoft_graph
response = oauth_client.get(
'me/onenote/notebooks?$select=id,displayName')
return jsonify(response.json()), response.status_code
@api_blueprint.route('/notebooks/<notebook_id>/sections', methods=['GET'])
def get_sections(notebook_id):
name = request.args.get('name')
query_filter = '$filter=displayName eq \'{0}\''.format(name) \
if name else ''
oauth_client = oauth.microsoft_graph
response = oauth_client.get(
'me/onenote/notebooks/{0}/sections?$select=id,displayName&{1}'.format(
notebook_id, query_filter))
return jsonify(response.json()), response.status_code
|
[
"[email protected]"
] | |
4b9a62611c764cd8d705fcf54fd46f2a5624deae
|
d9e26e516ab3863b6e7d00c4e3cdecf1af7028eb
|
/src/oaklib/io/rollup_report_writer.py
|
e4644c058309aeb0aeae82b0c4cc2fa52f2b5e04
|
[
"Apache-2.0"
] |
permissive
|
INCATools/ontology-access-kit
|
2f08a64b7308e8307d1aaac2a81764e7d98b5928
|
8d2a124f7af66fe2e796f9e0ece55585438796a5
|
refs/heads/main
| 2023-08-30T14:28:57.201198
| 2023-08-29T17:40:19
| 2023-08-29T17:40:19
| 475,072,415
| 67
| 15
|
Apache-2.0
| 2023-09-07T01:06:04
| 2022-03-28T15:50:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,444
|
py
|
from typing import Dict, List, TextIO
from airium import Airium
from linkml_runtime.dumpers import json_dumper, yaml_dumper
def format_object(curie, label):
if label:
return f"{label} [{curie}]"
else:
return curie
def add_association_group(doc: Airium, associations: List[Dict], subject: str, header_label: str):
associations_for_subject = [a for a in associations if a.get("subject") == subject]
if associations_for_subject:
with doc.div(klass="association-group"):
doc.div(_t=header_label, klass="association-group-header")
with doc.ul(klass="association-group-list"):
for association in associations_for_subject:
label = format_object(
association.get("object"), association.get("object_label")
)
doc.li(_t=label)
def generate_html(subjects: List[str], groups: List[Dict]) -> str:
doc = Airium()
doc("<!DOCTYPE html>")
with doc.html(lang="en"):
with doc.head():
doc.meta(charset="utf-8")
doc.title(_t="Rollup Table")
doc.style(
_t="""
.rollup-table {
border-collapse: collapse;
width: 100%;
}
.rollup-table tr {
vertical-align: top;
}
.rollup-table td {
padding: 0.25rem;
border-top: 1px solid black;
}
.primary-group-label {
font-weight: bold;
}
.association-group {
margin-bottom: 1rem;
}
.association-group-header {
font-style: italic;
}
.association-group-list {
margin: 0;
}
"""
)
with doc.body():
with doc.table(klass="rollup-table"):
with doc.tr():
doc.td(_t="Subject", klass="primary-group-label")
for subject in subjects:
doc.td(_t=subject)
for group in groups:
with doc.tr():
label = format_object(
group.get("group_object"), group.get("group_object_label")
)
doc.td(_t=label, klass="primary-group-label")
for subject in subjects:
with doc.td():
for sub_group in group.get("sub_groups", []):
add_association_group(
doc,
sub_group.get("associations", []),
subject,
format_object(
sub_group.get("group_object"),
sub_group.get("group_object_label"),
),
)
add_association_group(
doc, group.get("associations", []), subject, "Other"
)
return str(doc)
def write_report(subjects: List[str], groups: List[Dict], output: TextIO, format: str):
if format == "json":
output.write(json_dumper.dumps(groups, inject_type=False))
elif format == "yaml":
output.write(yaml_dumper.dumps(groups))
elif format == "html":
output.write(generate_html(subjects, groups))
else:
raise ValueError(f"Unsupported format: {format}")
|
[
"[email protected]"
] | |
b46a3f8bb2a7aa7189a03e9bb03385aa2adc1203
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/3207.py
|
49316f0601d1e454902936007d3f7d43574994a8
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
with open("a.in", 'r') as f:
T = int(f.readline())
for t in range(1, T+1):
S = set(range(1, 17))
for i in range(2):
n = int(f.readline())
for j in range(1, 5):
line = f.readline()
if n == j:
S = S & set(map(int, line.split()))
if len(S) == 0:
print("Case #%d: Volunteer cheated!" % t)
elif len(S) > 1:
print("Case #%d: Bad magician!" % t)
else:
print("Case #%d: %d" % (t, list(S)[0]))
|
[
"[email protected]"
] | |
b79a9b710f88b92e919b4b75f4e4d0094a5287ed
|
c7b31209cc7b5a015ca34d1174e7978730ce6733
|
/rpplugins/env_probes/environment_capture_stage.py
|
9bbe136a0b8d893af6e96f81148c9d987fbae7be
|
[
"MIT"
] |
permissive
|
gitter-badger/RenderPipeline
|
c244343def6dd33e55e78cd828f0c703b338ce1a
|
4d4bf4164c8dcb188f93e46749ba52de8f61b37f
|
refs/heads/master
| 2021-01-22T00:52:25.396315
| 2016-04-16T13:13:57
| 2016-04-16T13:15:27
| 56,395,593
| 0
| 0
| null | 2016-04-16T17:04:37
| 2016-04-16T17:04:37
| null |
UTF-8
|
Python
| false
| false
| 7,877
|
py
|
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import division
from rplibs.six.moves import range
from rplibs.six import itervalues
from panda3d.core import Camera, PerspectiveLens, Vec4, Vec3, PTAInt, GraphicsOutput
from rpcore.globals import Globals
from rpcore.image import Image
from rpcore.render_stage import RenderStage
class EnvironmentCaptureStage(RenderStage):
""" This stage renders the scene to a cubemap """
required_inputs = ["DefaultEnvmap", "AllLightsData", "maxLightIndex"]
required_pipes = []
def __init__(self, pipeline):
RenderStage.__init__(self, pipeline)
self.resolution = 128
self.diffuse_resolution = 4
self.regions = []
self.cameras = []
self.rig_node = Globals.render.attach_new_node("EnvmapCamRig")
self.pta_index = PTAInt.empty_array(1)
self.storage_tex = None
self.storage_tex_diffuse = None
def create(self):
self.target = self.create_target("CaptureScene")
self.target.size = self.resolution * 6, self.resolution
self.target.add_depth_attachment(bits=16)
self.target.add_color_attachment(bits=16, alpha=True)
self.target.prepare_render(None)
# Remove all unused display regions
internal_buffer = self.target.internal_buffer
internal_buffer.remove_all_display_regions()
internal_buffer.disable_clears()
internal_buffer.get_overlay_display_region().disable_clears()
self._setup_camera_rig()
self._create_store_targets()
self._create_filter_targets()
def _setup_camera_rig(self):
""" Setups the cameras to render a cubemap """
directions = (Vec3(1, 0, 0), Vec3(-1, 0, 0), Vec3(0, 1, 0),
Vec3(0, -1, 0), Vec3(0, 0, 1), Vec3(0, 0, -1))
# Prepare the display regions
for i in range(6):
region = self.target.internal_buffer.make_display_region(
i / 6, i / 6 + 1 / 6, 0, 1)
region.set_sort(25 + i)
region.set_active(True)
region.disable_clears()
# Set the correct clears
region.set_clear_depth_active(True)
region.set_clear_depth(1.0)
region.set_clear_color_active(True)
region.set_clear_color(Vec4(0))
lens = PerspectiveLens()
lens.set_fov(90)
lens.set_near_far(0.001, 1.0)
camera = Camera("EnvmapCam-" + str(i), lens)
camera_np = self.rig_node.attach_new_node(camera)
camera_np.look_at(camera_np, directions[i])
region.set_camera(camera_np)
self.regions.append(region)
self.cameras.append(camera_np)
self.cameras[0].set_r(90)
self.cameras[1].set_r(-90)
self.cameras[3].set_r(180)
self.cameras[5].set_r(180)
# Register cameras
for camera_np in self.cameras:
self._pipeline.tag_mgr.register_envmap_camera(camera_np.node())
def _create_store_targets(self):
""" Creates the targets which copy the result texture into the actual storage """
self.target_store = self.create_target("StoreCubemap")
self.target_store.size = self.resolution * 6, self.resolution
self.target_store.prepare_buffer()
self.target_store.set_shader_input("SourceTex", self.target.color_tex)
self.target_store.set_shader_input("DestTex", self.storage_tex)
self.target_store.set_shader_input("currentIndex", self.pta_index)
self.temporary_diffuse_map = Image.create_cube("DiffuseTemp", self.resolution, "RGBA16")
self.target_store_diff = self.create_target("StoreCubemapDiffuse")
self.target_store_diff.size = self.resolution * 6, self.resolution
self.target_store_diff.prepare_buffer()
self.target_store_diff.set_shader_input("SourceTex", self.target.color_tex)
self.target_store_diff.set_shader_input("DestTex", self.temporary_diffuse_map)
self.target_store_diff.set_shader_input("currentIndex", self.pta_index)
def _create_filter_targets(self):
""" Generates the targets which filter the specular cubemap """
self.filter_targets = []
mip = 0
size = self.resolution
while size > 1:
size = size // 2
mip += 1
target = self.create_target("FilterCubemap:{0}-{1}x{1}".format(mip, size))
target.size = size * 6, size
target.prepare_buffer()
target.set_shader_input("currentIndex", self.pta_index)
target.set_shader_input("currentMip", mip)
target.set_shader_input("SourceTex", self.storage_tex)
target.set_shader_input("DestTex", self.storage_tex, False, True, -1, mip, 0)
self.filter_targets.append(target)
# Target to filter the diffuse cubemap
self.filter_diffuse_target = self.create_target("FilterCubemapDiffuse")
self.filter_diffuse_target.size = self.diffuse_resolution * 6, self.diffuse_resolution
self.filter_diffuse_target.prepare_buffer()
self.filter_diffuse_target.set_shader_input("SourceTex", self.temporary_diffuse_map)
self.filter_diffuse_target.set_shader_input("DestTex", self.storage_tex_diffuse)
self.filter_diffuse_target.set_shader_input("currentIndex", self.pta_index)
def set_probe(self, probe):
self.rig_node.set_mat(probe.matrix)
self.pta_index[0] = probe.index
def update(self):
# First, disable all targets
for target in itervalues(self._targets):
target.active = False
# Check for updated faces
for i in range(6):
if self._pipeline.task_scheduler.is_scheduled("envprobes_capture_envmap_face" + str(i)):
self.regions[i].set_active(True)
# Check for filtering
if self._pipeline.task_scheduler.is_scheduled("envprobes_filter_and_store_envmap"):
self.target_store.active = True
self.target_store_diff.active = True
self.filter_diffuse_target.active = True
for target in self.filter_targets:
target.active = True
def set_shader_input(self, *args):
Globals.render.set_shader_input(*args)
def reload_shaders(self):
self.target_store.shader = self.load_plugin_shader(
"store_cubemap.frag.glsl")
self.target_store_diff.shader = self.load_plugin_shader(
"store_cubemap_diffuse.frag.glsl")
self.filter_diffuse_target.shader = self.load_plugin_shader(
"filter_cubemap_diffuse.frag.glsl")
for i, target in enumerate(self.filter_targets):
target.shader = self.load_plugin_shader("mips/{}.autogen.glsl".format(i))
|
[
"[email protected]"
] | |
9c69e890954b39c53456d3274149e26adb8cba6e
|
2cf4c28f533065153b23c3b4084bf905467f4e23
|
/utils/tensor_viewer/plugins/sandwich.py
|
4395a597082b8f236ed00847d43cfbb277d7c9a2
|
[] |
no_license
|
WilliamRo/tframe
|
94e75b4d7fd482ab5edeff2db966f4316390e32b
|
2ac00b2a05fd65529adb7edf7123b3eea6e5e6f2
|
refs/heads/master
| 2023-09-01T22:02:02.372416
| 2023-08-24T08:10:26
| 2023-08-24T08:10:26
| 92,593,033
| 17
| 7
| null | 2022-07-23T01:35:10
| 2017-05-27T10:55:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
import re
import numpy as np
import matplotlib
from matplotlib.ticker import FuncFormatter
from tframe.utils.tensor_viewer.plugin import Plugin, VariableWithView
def _recursive_modify(v_dict, level=0):
if len(v_dict) == 0: return
assert isinstance(v_dict, dict)
if isinstance(list(v_dict.values())[0], dict):
for e_key, e_dict in v_dict.items():
print('>> Modifying dict {} ...'.format(e_key))
_recursive_modify(e_dict, level=level + 1)
return
# Here the values in v_dict must be lists
for key in list(v_dict.keys()):
if not re.fullmatch(r'dL/dS\[\d+\]', key): continue
triangle_list = v_dict[key]
new_list = []
for triangle in triangle_list:
assert isinstance(triangle, np.ndarray) and len(triangle.shape) == 2
bottom = np.sum(triangle, axis=0, keepdims=True)
new_list.append(np.concatenate(
[triangle, np.zeros_like(bottom), bottom], axis=0))
v_dict[key] = new_list
def modifier(v_dict):
print('>> Modifying by sandwich ...')
_recursive_modify(v_dict)
plugin = Plugin(dict_modifier=modifier)
|
[
"[email protected]"
] | |
395b9d6b3eeb3dda9279993faf701f3d4c1cf382
|
5aa80aab7a75d76b0aa838bf8f74a276a12c876e
|
/src/ifmap/SConscript
|
91667c2cbadf3ccc91e2f3828066f422d4455f95
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
tungstenfabric/tf-controller
|
83b6d58afadb5697b540b5345711a5b2af90d201
|
f825fde287f4eb2089aba2225ca73eeab3888040
|
refs/heads/master
| 2023-08-28T02:56:27.329584
| 2023-08-20T12:15:38
| 2023-08-20T12:31:34
| 231,070,970
| 55
| 29
|
Apache-2.0
| 2023-07-23T01:38:17
| 2019-12-31T10:24:38
|
C++
|
UTF-8
|
Python
| false
| false
| 4,531
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
# -*- mode: python; -*-
Import('BuildEnv')
env = BuildEnv.Clone()
env.CppDisableExceptions()
env.Append(CPPPATH = env['TOP'])
env.Append(CPPPATH = [env['TOP'] + '/ifmap'])
env.Append(CPPPATH = [env['TOP'] + '/config-client-mgr'])
env.Append(CPPPATH = [env['TOP'] + '/base/sandesh'])
env.Append(CPPPATH = [env['TOP'] + '/database'])
env.Prepend(LIBS=['sandesh', 'http', 'http_parser', 'curl', 'io', 'base'])
except_env = BuildEnv.Clone()
except_env.Append(CPPPATH = [env['TOP'] + '/ifmap'])
except_env.Append(CPPPATH = [env['TOP'] + '/config-client-mgr'])
except_env.Append(CPPPATH = [env['TOP'] + '/base/sandesh'])
except_env.CppEnableExceptions()
except_env.Append(CPPPATH = env['TOP'])
SandeshGenFiles = env.SandeshGenCpp('ifmap_server_show.sandesh')
SandeshGenFiles += env.SandeshGenOnlyCpp('ifmap_server_show_internal.sandesh')
SandeshGenFiles += env.SandeshGenCpp('ifmap_log.sandesh')
SandeshGenSrcs = env.ExtractCpp(SandeshGenFiles)
sandesh_objs = []
for src in SandeshGenSrcs:
objname = src.replace('.cpp', '.o')
obj = except_env.Object(objname, src)
sandesh_objs.append(obj)
ifmap_server = except_env.Object('ifmap_server.o', 'ifmap_server.cc')
ifmap_server_show = except_env.Object('ifmap_server_show.o', 'ifmap_server_show.cc')
ifmap_xmpp = except_env.Object('ifmap_xmpp.o', 'ifmap_xmpp.cc')
ifmap_xmpp_client_show = except_env.Object('ifmap_xmpp_client_show.o', 'ifmap_xmpp_client_show.cc')
AgentSandeshGenFiles = env.SandeshGenCpp('ifmap_agent.sandesh')
AgentSandeshGenSrcs = env.ExtractCpp(AgentSandeshGenFiles)
libifmap_common = env.Library('ifmap_common',
['ifmap_dependency_tracker.cc',
'ifmap_table.cc',
'ifmap_link.cc',
'ifmap_link_table.cc',
'ifmap_node.cc',
'ifmap_object.cc',
'ifmap_log.cc'] + sandesh_objs)
# control-node
libifmap = env.Library('ifmap_server',
['ifmap_client.cc',
'ifmap_config_listener.cc',
'ifmap_encoder.cc',
'ifmap_exporter.cc',
'ifmap_factory.cc',
'ifmap_graph_walker.cc',
'ifmap_node_proxy.cc',
ifmap_server_show,
ifmap_server,
'ifmap_server_parser.cc',
'ifmap_server_table.cc',
'ifmap_update.cc',
'ifmap_update_queue.cc',
'ifmap_update_sender.cc',
'ifmap_util.cc',
'ifmap_uuid_mapper.cc',
ifmap_xmpp,
ifmap_xmpp_client_show,
] + sandesh_objs)
# agent-module
libifmap_agent = env.Library('ifmap_agent',
[ 'ifmap_agent_parser.cc',
'ifmap_agent_table.cc',
'ifmap_agent_sandesh.cc',
] + AgentSandeshGenSrcs)
test_suite = env.SConscript('client/SConscript', exports='BuildEnv',
duplicate = 0)
test_suite += env.SConscript('test/SConscript', exports='BuildEnv',
duplicate = 0)
def code_coverage(target, source, env):
import shutil
shutil.rmtree(target[0].path, ignore_errors = True)
# lcov --base-directory $ROOT -- directory . --zerocounters -q
import os
os.system('lcov --base-directory . --directory ' + Dir('.').path +
' --zerocounters -q')
# execute tests
import subprocess
ShEnv = {env['ENV_SHLIB_PATH']: 'build/lib'}
for test in test_suite:
cmd = test[0].path
logfile = open(cmd + '.log', 'w')
subprocess.call([cmd], stdout=logfile, env=ShEnv)
# lcov --base-directory $ROOT -- directory . -c -o ifmap_test.info
os.system('lcov --base-directory . --directory ' + Dir('.').path +
' -c -o ifmap_test.info')
# genhtml -o ifmap/test_coverage ifmap_test.info
os.system('genhtml -o ' + target[0].path +
' -t "test coverage" --num-spaces 4 ifmap_test.info')
if env['OPT'] == 'coverage':
test_coverage = env.Command(Dir('test_coverage'), '', code_coverage)
env.AlwaysBuild(test_coverage)
env.Alias('src/ifmap:test_coverage', test_coverage)
|
[
"[email protected]"
] | ||
13ab29e5ceffbff9de60963df7ec385ba55cad77
|
ee6fc02e8392ff780a4f0d1a5789776e4d0b6a29
|
/code/abc/150/abc150_b.py
|
c748c5aa7445dd342d4b813baf31ee0e24d77bef
|
[] |
no_license
|
mollinaca/ac
|
e99bb5d5c07159b3ef98cd7067424fa2751c0256
|
2f40dd4333c2b39573b75b45b06ad52cf36d75c3
|
refs/heads/master
| 2020-12-22T11:02:13.269855
| 2020-09-18T01:02:29
| 2020-09-18T01:02:29
| 236,757,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
n = int(input())
s = str(input())
count = 0
for i in range(0,n):
if s[i] == "A":
if i+2 <= n-1:
if s[i+1] == "B" and s[i+2] == "C":
count += 1
print (count)
|
[
"[email protected]"
] | |
d0b9760362c2c896a11394121621237ba007551c
|
64afcac06e3a64215d7d7152c4fa5662164a41e6
|
/src/jk_sql/DBColDef.py
|
e36c558919fedb53a78002e8dc3f70873f63d78a
|
[
"Apache-2.0"
] |
permissive
|
jkpubsrc/python-module-jk-sql
|
7b4f12783b8384540404fa60c469c911955202a6
|
cc716f4042af4cbc503056bd3f71cde9acd12ce2
|
refs/heads/master
| 2022-09-15T17:20:24.458796
| 2017-10-20T10:01:34
| 2017-10-20T10:01:34
| 107,655,550
| 0
| 1
| null | 2022-09-03T19:29:01
| 2017-10-20T08:58:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import sqlite3
from .EnumDBColType import EnumDBColType
from .EnumDBIndexType import EnumDBIndexType
#
# This class represents a definition of a column. Objects of this type are used to either define a column or get information about a table column.
#
class DBColDef(object):
def __init__(self, fieldName, fieldType, bIsNullable, indexType):
assert isinstance(fieldName, str)
assert isinstance(fieldType, EnumDBColType)
assert isinstance(bIsNullable, bool)
assert isinstance(indexType, EnumDBIndexType)
if fieldType == EnumDBColType.PK:
bIsNullable = False
indexType = EnumDBIndexType.NONE
self.__name = fieldName
self.__type = fieldType
self.__bIsNullable = bIsNullable
self.__indexType = indexType
#
@property
def index(self):
return self.__indexType
#
@property
def nullable(self):
return self.__bIsNullable
#
@property
def unique(self):
return self.__indexType == EnumDBIndexType.UNIQUE_INDEX
#
@property
def type(self):
return self.__type
#
@property
def name(self):
return self.__name
#
def isEqualWithoutIndex(self, other):
return (self.__name == other.name) and (self.__type == other.type) and (self.__bIsNullable == other.nullable)
#
def __ne__(self, other):
return (self.__name != other.name) or (self.__type != other.type) or (self.__bIsNullable != other.nullable) or (self.__indexType != other.index)
#
def __eq__(self, other):
return (self.__name == other.name) and (self.__type == other.type) and (self.__bIsNullable == other.nullable) and (self.__indexType == other.index)
#
def __str__(self):
return self.__type + ": " + self.__name
#
def __repr__(self):
return self.__type + ": " + self.__name
#
def __copy__(self):
return DBColDef(self.__name, self.__type, self.__bIsNullable, self.__indexType)
#
def __deepcopy__(self, memo):
return DBColDef(self.__name, self.__type, self.__bIsNullable, self.__indexType)
#
@staticmethod
def loadFromJSON(jsonDef):
t = jsonDef["type"]
i = jsonDef["index"]
return DBColDef(jsonDef["name"], EnumDBColType.parse(t), jsonDef["nullable"], EnumDBIndexType.parse(i))
#
def toJSON(self):
return {
"name" : self.__name,
"type" : str(self.__type),
"nullable" : self.__bIsNullable,
"index" : str(self.__indexType)
}
#
#
|
[
"[email protected]"
] | |
901d73e1e1b9fbab700e456ee163cba1d0d65fe4
|
9fcc6ed9d6ddff6d183a891066f6e2be5c3875e8
|
/pandasdmx/source/sgr.py
|
cb73a9a7c0c1aad9de3316676facf7c3269555fc
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"Python-2.0"
] |
permissive
|
daoluan/pandaSDMX
|
ea289db699d2516cf563194214d1e70adb61dca7
|
2efcb5a429a5306efd89bed4cd55946d1ad5067b
|
refs/heads/master
| 2020-07-12T21:37:20.617115
| 2019-08-28T11:09:59
| 2019-08-28T11:09:59
| 204,912,582
| 0
| 0
|
Apache-2.0
| 2019-08-28T19:08:08
| 2019-08-28T11:08:08
| null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
from . import Source as BaseSource
class Source(BaseSource):
_id = 'SGR'
def handle_response(self, response, content):
"""SGR responses do not specify content-type; set it directly."""
if response.headers.get('content-type', None) is None:
response.headers['content-type'] = 'application/xml'
return response, content
def modify_request_args(self, kwargs):
"""SGR is a data source but not a data provider.
Override the ``agency`` argument by setting ``agency='all'`` to
retrieve all data republished by SGR from different providers.
"""
kwargs.setdefault('provider', 'all')
|
[
"[email protected]"
] | |
cf85859497e9262ab0792ec4e552abbecf6d8798
|
68b7e05830d2480e848b0d1ff49f455e3c2e3a3c
|
/manage.py
|
70ae5959854c8281d4a31549726dba3ecf87c16d
|
[] |
no_license
|
Zauberzunge/Umfragen
|
24414567ad8dfeb89a5b7267841a08bf6d035625
|
3e57da7e87d2aebc596878800fd4fe8008f38944
|
refs/heads/master
| 2023-01-28T20:02:19.044334
| 2020-12-07T21:06:47
| 2020-12-07T21:06:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
from django.core.management.commands.runserver import Command as runserver
runserver.default_port = "8002"
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoProject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Could not import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"="
] |
=
|
646600322f93ff2c0453d17bf7823470b5dc6892
|
14421a12c4e80395567e676394d369fd9619bd32
|
/Scripts/PythonMidLvl/84/84test.py
|
06a05233ce41b852f56020436f8510d38948fc20
|
[] |
no_license
|
jawor92/Python-Udemy-Mobilo
|
7b331e8197233c3116e43e0b3c1110b9b878762e
|
8098508835121a1536c2753bc4eedbf17163c93d
|
refs/heads/master
| 2020-12-09T21:39:09.366604
| 2020-01-12T19:31:09
| 2020-01-12T19:31:09
| 233,423,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 6 22:09:49 2020
@author: Mateusz.Jaworski
"""
class MailToSantaClaus:
def __init__(self, presents):
self.presents = presents.copy()
def show_presents(self):
print(self.presents)
mail = MailToSantaClaus(['Teddy Bear', 'Teddy Bear House'])
mail.show_presents()
|
[
"[email protected]"
] | |
dfd5f1ab44402a21ebfea238e5b70d78f4c08847
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-mrsp.0/mrsp_ut=3.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=79/params.py
|
bbe63e8c0e1fb04e03a09bb6ca4fadafda488f37
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.530310',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.8',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 79,
'utils': 'uni-medium-3'}
|
[
"[email protected]"
] | |
6c2bda0345755e152e1819fa282be7e05a97e988
|
15e85b4d9527e7a87aded5b3c99ad9c785bca915
|
/data-storage-manager-sdk/python/simcore_dsm_sdk/configuration.py
|
422f971c74adb284286a59de28d37d9be9f11594
|
[
"MIT"
] |
permissive
|
mguidon/aiohttp-dsm
|
4161f9977d3dffbb727aa26cce4e9fb347aa4e21
|
612e4c7f6f73df7d6752269965c428fda0276191
|
refs/heads/master
| 2020-03-30T09:03:49.791406
| 2018-10-02T07:05:35
| 2018-10-02T07:05:35
| 151,058,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,125
|
py
|
# coding: utf-8
"""
dsm-api
dsm api # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class TypeWithDefault(type):
def __init__(cls, name, bases, dct):
super(TypeWithDefault, cls).__init__(name, bases, dct)
cls._default = None
def __call__(cls):
if cls._default is None:
cls._default = type.__call__(cls)
return copy.copy(cls._default)
def set_default(cls, default):
cls._default = copy.copy(default)
class Configuration(six.with_metaclass(TypeWithDefault, object)):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self):
"""Constructor"""
# Default Base url
self.host = "http://{host}:{port}/{version}"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("simcore_dsm_sdk")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if (self.api_key.get(identifier) and
self.api_key_prefix.get(identifier)):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 2.0.0\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
|
[
"[email protected]"
] | |
14b7d15f64f419181184d6af5c739890b8d7acaf
|
12a72da6848ae461b995ec2fc6c4e1827be82803
|
/common/monitor_bak.py
|
69ab77e717e2eebdea993ff2d36e9666824bb3cb
|
[] |
no_license
|
lim1942/coin_helper
|
f3ed40c07a049a00f052dfa3e59cee7eefe969cf
|
d34ce363371fd964d8c46d5dd04ca7c5eb7d35b4
|
refs/heads/main
| 2023-04-30T10:46:03.231440
| 2021-05-25T12:15:49
| 2021-05-25T12:15:49
| 366,247,314
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,442
|
py
|
import json
import redis
import requests
import traceback
from threading import Thread,Lock
from datetime import datetime,timedelta
from coin_helper.settings import REDIS_URL
class RedisTool(object):
def __init__(self):
self.R = redis.Redis.from_url(REDIS_URL,decode_responses=True)
self.P = self.R.pipeline(transaction=False)
def set(self,k,v,ex):
self.R.set(k,v,ex=ex)
def get(self,k):
return self.R.get(k)
class Monitor:
redis_obj = RedisTool()
def __init__(self,**kwargs):
self.kwargs = kwargs
self.notify_lock = Lock()
self.last_notify_time = {}
self.long_before_time = datetime.now() - timedelta(days=1)
def record(self,k,v,ex=10):
try:
return self.redis_obj.set(k,v,ex)
except:
traceback.print_exc()
def compare(self,k,v,k2):
pass
def notify(self,k,message):
Thread(target=self._notify,args=(k,message)).start()
def _notify(self,k,message):
notify_time = datetime.now()
with self.notify_lock:
if notify_time - timedelta(hours=6) >= self.last_notify_time.get(k,self.long_before_time):
webhook='https://oapi.dingtalk.com/robot/send?access_token=494a793fe8aa1146b93baeef9aba96cbfa725e2ce6230c0eaa37bb682e06eea8'
header = {
"Content-Type": "application/json",
"Charset": "UTF-8"}
data ={
"msgtype": "text",
"text": {
"content": f"触发价差信号 {message}"
},
"at": {
"atMobiles":[
"13750872274"
],
"isAtAll": False
}}
ret = requests.post(url=webhook,data=json.dumps(data),headers=header).text
self.last_notify_time[k] = notify_time
return ret
class OkexMonitor(Monitor):
def __init__(self,**kwargs):
super(OkexMonitor, self).__init__(**kwargs)
self.variance_threshold = 0.05
def compare(self,k,v,k2):
try:
v = float(v)
v2 = float(self.redis_obj.get(k2))
variance = abs(v - v2)
variance_rate = variance/v
if variance_rate > self.variance_threshold:
message = f"【{k}:{v}】与【{k2}:{v2}】差异率大于{self.variance_threshold}, 差值{round(variance,6)} 差率{round(variance_rate,6)}"
self.notify(k,message)
except:
pass
# print(k,k2)
def okex_record(self,item):
self.record(item['instrument_id'],item['price'])
def okex_compare_1(self,item):
"""okex永续币本位,永续USDT 币币 三个市场两两对比"""
try:
instrument_id = item['instrument_id']
if instrument_id.endswith('USDT-SWAP'):
self.compare(instrument_id,item['price'],item['instrument_id'].split('-')[0]+'-USDT')
self.compare(instrument_id,item['price'],item['instrument_id'].split('-')[0]+'-USD-SWAP')
# 币本位永续和币币比较
elif instrument_id.endswith('USD-SWAP'):
self.compare(instrument_id,item['price'],item['instrument_id'].split('-')[0]+'-USDT')
except:
traceback.print_exc()
|
[
"[email protected]"
] | |
05b60a337fe7a12315b91c9f03f05cbc27accb90
|
5e48579f65ab45469469a5cf0cbef82bf2323585
|
/CovIdMX withREact/Covid19AcuteMx_Support_App/account_app/forms.py
|
015094a334d599f574668d60ee456db90449a046
|
[] |
no_license
|
jayquake/React-Applications
|
9e5786e4402dfe9f4e33d4daef657adb40bae1c8
|
b9f27872846e7e8b7da94f77c2120755909db572
|
refs/heads/master
| 2022-12-30T04:45:11.591814
| 2020-10-11T13:29:05
| 2020-10-11T13:29:05
| 284,446,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,164
|
py
|
from django import forms
from django.db import transaction
from .models import User, Subject, DoctorProfile, PatientProfile, EMTProfile
from django.contrib.auth.forms import UserCreationForm
from . import models
class PatientRegisterForm(UserCreationForm):
interests = forms.ModelMultipleChoiceField(
queryset=Subject.objects.all(),
widget=forms.CheckboxSelectMultiple,
required=False
)
class Meta(UserCreationForm.Meta):
model = User
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_patient = True
user.save()
patient = PatientProfile.objects.create(user=user)
patient.interests.add(*self.cleaned_data.get('interests'))
return user
class DoctorRegisterForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
def save(self, commit=True):
user = super().save(commit=False)
user.is_doctor = True
if commit:
user.save()
return user
class PatientUpdateFrom(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'email']
class PatientProfileUpdateForm(forms.ModelForm):
class Meta:
model = PatientProfile
fields = ['image']
class DoctorUpdateFrom(forms.ModelForm):
class Meta:
model = User
fields = ['first_name', 'last_name', 'email']
class DoctorProfileUpdateForm(forms.ModelForm):
class Meta:
model = models.DoctorProfile
email = forms.EmailField()
first_name = forms.CharField(max_length=50)
last_name = forms.CharField(max_length=50)
about_me = forms.Textarea()
resume = forms.FileInput
job_title = forms.ChoiceField
fields = ['image', 'about_me', 'resume']
# class patientRegisterForm(UserCreationForm):
# USER_SCHOOL_CHOICES = ((1, 'High School'),
# (2, 'Some College'),
# (3, 'Associates Degree'),
# (4, 'Bachelors Degree'),
# (5, 'Masters Degree'),
# (6, 'Other'),
# )
# email = forms.EmailField()
# first_name = forms.CharField(max_length=50)
# last_name = forms.CharField(max_length=50)
# academics = forms.Select(choices=USER_SCHOOL_CHOICES)
#
# class Meta:
# model = User
# fields = ['first_name', 'last_name', 'username', 'email', 'password1', 'password2']
#
#
# class DoctorRegisterForm(UserCreationForm):
# USER_Grade_Taught_CHOICES = ((1, 'Kindergarten'),
# (2, 'first grade '),
# (3, 'second grade '),
# (4, 'third grade'),
# (5, 'Fourth Grade'),
# (6, 'Fifth Grade'),
# (7, 'Sixth Grade'),
# (8, 'Seventh Grade'),
# (9, 'Eighth Grade'),
# (10, 'Ninth Grade'),
# (11, ' Grade'),
# )
# email = forms.EmailField()
# first_name = forms.CharField(max_length=50)
# last_name = forms.CharField(max_length=50)
# highest_education_level = forms.Select()
# grade_taught = forms.SelectMultiple(USER_Grade_Taught_CHOICES)
#
# class Meta:
# model = User
# form_class = DoctorRegisterForm
# template_name = 'registration/signup_form.html'
# fields = ['first_name', 'last_name', 'username', 'email', 'password1', 'password2',]
#
#
# def get_context_data(self, **kwargs):
# kwargs['user_type'] = 'Doctor'
# return super().get_context_data(**kwargs)
#
#
#
#
#
#
# email = forms.EmailField()
# first_name = forms.CharField(max_length=50)
# last_name = forms.CharField(max_length=50)
# about_me = forms.Textarea(max_length=150)
# resume = forms.FileInput
# job_title = forms.ChoiceField
# languages = forms.LanguageField(max_length=8, blank=True)
# region = forms.RegionField(blank=True)
|
[
"[email protected]"
] | |
1a7048886021c154c279d665d513e857da759255
|
95e9ec4b3b0d86063da53a0e62e138cf794cce3a
|
/webroot/dqb/dqb/base/myjson.py
|
e6843b72d13a46c5fb705787579293028f384d2f
|
[] |
no_license
|
wjl626nice/1902
|
c3d350d91925a01628c9402cbceb32ebf812e43c
|
5a1a6dd59cdd903563389fa7c73a283e8657d731
|
refs/heads/master
| 2023-01-05T23:51:47.667675
| 2019-08-19T06:42:09
| 2019-08-19T06:42:09
| 180,686,044
| 4
| 1
| null | 2023-01-04T07:35:24
| 2019-04-11T00:46:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,822
|
py
|
from django.shortcuts import HttpResponse
from rest_framework.renderers import JSONRenderer
from base.errcode import err_number
class JSONResponse(HttpResponse):
"""
用于返回JSON数据.
"""
def __init__(self,code,data='',total=1,count=-1,**kwargs):
kwargs['content_type'] = 'application/json'
try:
content = JSONRenderer().render(data)
if code:
content = '{"code":' \
+ str(code) \
+ ',"msg":"' \
+ err_number[str(code)] \
+ '","data":[]}'
else:
if count < 0:
content = '{"code":'\
+str(code)\
+',"msg":"'\
+err_number[str(code)]\
+'","total":'\
+str(total)\
+',"data":'\
+str(content,encoding="utf-8")\
+'}'
else:
content = '{"code":' \
+ str(code) \
+ ',"msg":"' \
+ err_number[str(code)] \
+ '","total":' \
+ str(total) \
+ ',"count":' \
+ str(count) \
+ ',"data":' \
+ str(content, encoding="utf-8") \
+ '}'
except:
content = '{"code":' \
+ '-1' \
+ ',"msg":"返回有误","data":[]}'
super(JSONResponse, self).__init__(content, **kwargs)
|
[
"[email protected]"
] | |
f64e6334a50348abd20c1e2b1141f25c1a15d653
|
38bd99c72ca2521489ce1eb02b7604095b02b585
|
/src/1680-ConcatenationOfConsecutiveBinaryNumbers.py
|
67fc18efbe6b891b864fd59abb68a2db2a44bdad
|
[
"MIT"
] |
permissive
|
Jiezhi/myleetcode
|
eadbd7d9f1f0ea6a0ee15c2da9040dcfbd28b522
|
4dd1e54d8d08f7e6590bc76abd08ecaacaf775e5
|
refs/heads/master
| 2023-03-16T15:52:21.833622
| 2023-03-09T14:33:03
| 2023-03-09T14:33:03
| 139,965,948
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
#!/usr/bin/env python3
"""
CREATED AT: 2022-09-23
URL: https://leetcode.com/problems/concatenation-of-consecutive-binary-numbers/
GITHUB: https://github.com/Jiezhi/myleetcode
FileName: 1680-ConcatenationOfConsecutiveBinaryNumbers
Difficulty: Medium
Desc:
Tag:
See: https://leetcode.cn/problems/concatenation-of-consecutive-binary-numbers/solution/lian-jie-lian-xu-er-jin-zhi-shu-zi-by-ze-t40j/
"""
class Solution:
def concatenatedBinary(self, n: int) -> int:
"""
Runtime: 2060 ms, faster than 66.93%
Memory Usage: 13.9 MB, less than 80.31%
1 <= n <= 10^5
"""
module = 10 ** 9 + 7
ret, shift = 0, 0
for i in range(1, n + 1):
if i & (i - 1) == 0:
shift += 1
ret = ((ret << shift) + i) % module
return ret
def test():
assert Solution().concatenatedBinary(n=1) == 1
assert Solution().concatenatedBinary(n=3) == 27
assert Solution().concatenatedBinary(n=12) == 505379714
if __name__ == '__main__':
test()
|
[
"[email protected]"
] | |
9a6be77d3f1ab6b5515bb83d0b6a6eee5e09b43b
|
eda7fbf7bbc0614e6fc448d2f6e3fd1918dadcbe
|
/new-api-tests/applications/create-surface-caps-from-centerlines/create_surface_caps.py
|
e61f1afbfba81befc17a2e58529183112bb6877e
|
[] |
no_license
|
SimVascular/SimVascular-Tests
|
e97c136ad3bf3a7275d40c0323abca7817eb2eca
|
55018e1edcd070bce77ae5af4caf2105353d3697
|
refs/heads/master
| 2023-02-11T02:19:06.755815
| 2023-02-02T18:26:31
| 2023-02-02T18:26:31
| 42,211,398
| 2
| 10
| null | 2023-02-02T18:26:32
| 2015-09-10T00:06:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,496
|
py
|
#!/usr/bin/env python
"""This script is used to create an SV model from a closed segmentation surface.
The
"""
import argparse
import os
import sys
from centerlines import Centerlines
from surface import Surface
sys.path.insert(1, '../../graphics/')
import graphics as gr
def parse_args():
'''Parse command-line arguments.
'''
parser = argparse.ArgumentParser()
parser.add_argument("--clip-distance", type=float, default=0.0,
help="The distance from the end of a centerline branch to clip a surface.")
parser.add_argument("--clip-width-scale", type=float, default=1.0,
help="The width multiplied by the centerline branch end radius to define the width of the box used to clip a surface.")
parser.add_argument("--surface-file", required=True, help="Input surface (.vtp or .vtk) file.")
parser.add_argument("--mesh-scale", type=float, default=1.0,
help="The factor used to scale the fe volume meshing edge size. A larger scale creates a coarser mesh. The initial edge size is determined from the largest surface triangle.")
parser.add_argument("--remesh-scale", type=float, default=1.0,
help="The factor used to scale the surface remeshing edge size. A larger scale creates a coarser suface mesh. The initial edge size is determined from the largest surface triangle.")
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return args
def main():
# Get command-line arguments.
args = parse_args()
## Create renderer and graphics window.
win_width = 500
win_height = 500
renderer, renderer_window = gr.init_graphics(win_width, win_height)
## Read in the segmentation surface.
surface_file_name = args.surface_file
surface = Surface(gr, renderer_window, renderer)
surface.read(surface_file_name)
gr_geom = gr.add_geometry(renderer, surface.geometry, color=[0.8, 0.8, 1.0])
surface.vtk_actor = gr_geom
#gr_geom.GetProperty().SetOpacity(0.5)
## Create a Centerlines object used to clip the surface.
centerlines = Centerlines()
centerlines.graphics = gr
centerlines.surface = surface
centerlines.window = renderer_window
centerlines.renderer = renderer
centerlines.clip_distance = args.clip_distance
centerlines.clip_width_scale = args.clip_width_scale
centerlines.remesh_scale = args.remesh_scale
centerlines.mesh_scale = args.mesh_scale
print("---------- Alphanumeric Keys ----------")
print("a - Compute model automatically for a three vessel surface with flat ends.")
print("c - Compute centerlines.")
print("m - Create a model from the surface and centerlines.")
print("q - Quit")
print("s - Select a centerline source point.")
print("t - Select a centerline target point.")
print("u - Undo the selection of a centerline source or target point.")
## Create a mouse interactor for selecting centerline points.
picking_keys = ['s', 't']
event_table = {
'a': (surface.create_model_automatically, centerlines),
'c': (surface.compute_centerlines, surface),
'm': (centerlines.create_model, surface),
's': surface.add_centerlines_source_node,
't': surface.add_centerlines_target_node
}
interactor = gr.init_picking(renderer_window, renderer, surface.geometry, picking_keys, event_table)
## Display window.
interactor.Start()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.