repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
AndrewPashkin/python-tempo
|
src/tempo/django/forms.py
|
Python
|
bsd-3-clause
| 859
| 0
|
"""Provides Django-Admin form field."""
# coding=utf-8
from django.utils.translation import ugettext_lazy as _
from django.forms.fields import Field, ValidationError
from tempo.django.widgets import RecurrentEventSetWidget
from tempo.recurrenteventset import RecurrentEventSet
class RecurrentEven
|
tSetField(Field):
"""Form field, for usage in admin forms.
Represents RecurrentEventSet."""
# pylint: disable=no-init
widget = RecurrentEventSetWidget
def clean(self, value):
"""Cleans and validates RecurrentEventSet expression."""
# pylint: disable=no-self-use
if value is None:
return None
if not RecurrentEventSet.validate_json(value):
raise ValidationError(_('Invalid input.'),
code='
|
invalid')
return RecurrentEventSet.from_json(value)
|
projectatomic/atomic-reactor
|
tests/conftest.py
|
Python
|
bsd-3-clause
| 2,306
| 0.000867
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This
|
software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
import json
import pytest
import requests
import requests.exceptions
from tests.constants import LOCALHOST_REGISTRY_HTTP, DOCKER0_REGISTRY_HTTP, MOCK, TEST_IMAGE
from tests.util import uuid_value
from osbs.utils import Image
|
Name
from atomic_reactor.core import ContainerTasker
from atomic_reactor.constants import CONTAINER_DOCKERPY_BUILD_METHOD
from atomic_reactor.inner import DockerBuildWorkflow
from tests.constants import MOCK_SOURCE
if MOCK:
from tests.docker_mock import mock_docker
@pytest.fixture()
def temp_image_name():
return ImageName(repo=("atomic-reactor-tests-%s" % uuid_value()))
@pytest.fixture()
def is_registry_running():
"""
is docker registry running (at {docker0,lo}:5000)?
"""
try:
lo_response = requests.get(LOCALHOST_REGISTRY_HTTP)
except requests.exceptions.ConnectionError:
return False
if not lo_response.ok:
return False
try:
lo_response = requests.get(DOCKER0_REGISTRY_HTTP) # leap of faith
except requests.exceptions.ConnectionError:
return False
if not lo_response.ok:
return False
return True
@pytest.fixture(scope="module")
def docker_tasker():
if MOCK:
mock_docker()
ct = ContainerTasker(retry_times=0)
ct.build_method = CONTAINER_DOCKERPY_BUILD_METHOD
return ct
@pytest.fixture(params=[True, False])
def reactor_config_map(request):
return request.param
@pytest.fixture(params=[True, False])
def inspect_only(request):
return request.param
@pytest.fixture
def user_params(monkeypatch):
"""
Setting default image_tag in the env var USER_PARAMS. Any tests requiring
to create an instance of :class:`DockerBuildWorkflow` requires this fixture.
"""
monkeypatch.setenv('USER_PARAMS', json.dumps({'image_tag': TEST_IMAGE}))
@pytest.fixture
def workflow(user_params):
return DockerBuildWorkflow(source=MOCK_SOURCE)
@pytest.mark.optionalhook
def pytest_html_results_table_row(report, cells):
if report.passed or report.skipped:
del cells[:]
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/geometry/tests/test_geometry.py
|
Python
|
agpl-3.0
| 24,875
| 0.006151
|
from sympy import (Abs, C, Dummy, Max, Min, Rational, Float, S, Symbol, cos, oo,
pi, simplify, sqrt, symbols)
from sympy.geometry import (Circle, Curve, Ellipse, GeometryError, Line, Point,
Polygon, Ray, RegularPolygon, Segment, Triangle,
are_similar, convex_hull, intersection)
from sympy.utilities.pytest import raises, XFAIL
x = Symbol('x', real=True)
y = Symbol('y', real=True)
t = Symbol('t', real=True)
x1 = Symbol('x1', real=True)
x2 = Symbol('x2', real=True)
y1 = Symbol('y1', real=True)
y2 = Symbol('y2', real=True)
half = Rational(1,2)
def feq(a, b):
"""Test if two floating point values are 'equal'."""
t = Float("1.0E-10")
return -t < a-b < t
def test_curve():
s = Symbol('s')
z = Symbol('z')
# this curve is independent of the indicated parameter
C = Curve([2*s, s**2], (z, 0, 2))
assert C.parameter == z
assert C.functions == (2*s, s**2)
assert C.arbitrary_point() == Point(2*s, s**2)
assert C.arbitrary_point(z) == Point(2*s, s**2)
# this is how it is normally used
C = Curve([2*s, s**2], (s, 0, 2))
assert C.parameter == s
assert C.functions == (2*s, s**2)
t = Symbol('t')
assert C.arbitrary_point() != Point(2*t, t**2) # the t returned as assumptions
t = Symbol('t', real=True) # now t has the same assumptions so the test passes
assert C.arbitrary_point() == Point(2*t, t**2)
assert C.arbitrary_point(z) == Point(2*z, z**2)
assert C.arbitrary_point(C.parameter) == Point(2*s, s**2)
raises(ValueError, 'Curve((s, s + t), (s, 1, 2)).arbitrary_point()')
raises(ValueError, 'Curve((s, s + t), (t, 1, 2)).arbitrary_point(s)')
def test_point():
p1 = Point(x1, x2)
p2 = Point(y1, y2)
p3 = Point(0, 0)
p4 = Point(1, 1)
assert len(p1) == 1
assert p1 in p1
assert p1 not in p2
assert p2[1] == y2
assert (p3+p4) == p4
assert (p2-p1) == Point(y1-x1, y2-x2)
assert p4*5 == Point(5, 5)
assert -p2 == Point(-y1, -y2)
assert Point.midpoint(p3, p4) == Point(half, half)
assert Point.midpoint(p1, p4) == Point(half + half*x1, half + half*x2)
assert Point.midpoint(p2, p2) == p2
assert p2.midpoint(p2) == p2
assert Point.distance(p3, p4) == sqrt(2)
assert Point.distance(p1, p1) == 0
assert Point.distance(p3, p2) == sqrt(p2.x**2 + p2.y**2)
p1_1 = Point(x1, x1)
p1_2 = Point(y2, y2)
p1_3 = Point(x1 + 1, x1)
assert Point.is_colline
|
ar(p3)
assert Point.is_collinear(p3, p4)
assert Point.is_collinear(p3, p4, p1_1, p1_2)
assert Point.is_collinear(p3, p4, p1_1, p1_3) == False
x_pos = Symbol('x', real=True, positive=True)
p2_1 = Point(x_pos, 0)
p2_2 = Point(0, x_pos
|
)
p2_3 = Point(-x_pos, 0)
p2_4 = Point(0, -x_pos)
p2_5 = Point(x_pos, 5)
assert Point.is_concyclic(p2_1)
assert Point.is_concyclic(p2_1, p2_2)
assert Point.is_concyclic(p2_1, p2_2, p2_3, p2_4)
assert Point.is_concyclic(p2_1, p2_2, p2_3, p2_5) == False
def test_line():
p1 = Point(0, 0)
p2 = Point(1, 1)
p3 = Point(x1, x1)
p4 = Point(y1, y1)
p5 = Point(x1, 1 + x1)
p6 = Point(1, 0)
p7 = Point(0, 1)
p8 = Point(2, 0)
p9 = Point(2, 1)
l1 = Line(p1, p2)
l2 = Line(p3, p4)
l3 = Line(p3, p5)
l4 = Line(p1, p6)
l5 = Line(p1, p7)
l6 = Line(p8, p9)
l7 = Line(p2, p9)
# Basic stuff
assert Line((1, 1), slope=1) == Line((1, 1), (2, 2))
assert Line((1, 1), slope=oo) == Line((1, 1), (1, 2))
assert Line((1, 1), slope=-oo) == Line((1, 1), (1, 2))
raises(ValueError, 'Line((1, 1), 1)')
assert Line(p1, p2) == Line(p2, p1)
assert l1 == l2
assert l1 != l3
assert l1.slope == 1
assert l3.slope == oo
assert l4.slope == 0
assert l4.coefficients == (0, 1, 0)
assert l4.equation(x=x, y=y) == y
assert l5.slope == oo
assert l5.coefficients == (1, 0, 0)
assert l5.equation() == x
assert l6.equation() == x - 2
assert l7.equation() == y - 1
assert p1 in l1 # is p1 on the line l1?
assert p1 not in l3
assert simplify(l1.equation()) in (x-y, y-x)
assert simplify(l3.equation()) in (x-x1, x1-x)
assert l2.arbitrary_point() in l2
for ind in xrange(0, 5):
assert l3.random_point() in l3
# Orthogonality
p1_1 = Point(-x1, x1)
l1_1 = Line(p1, p1_1)
assert l1.perpendicular_line(p1) == l1_1
assert Line.is_perpendicular(l1, l1_1)
assert Line.is_perpendicular(l1 , l2) == False
# Parallelity
p2_1 = Point(-2*x1, 0)
l2_1 = Line(p3, p5)
assert l2.parallel_line(p1_1) == Line(p2_1, p1_1)
assert l2_1.parallel_line(p1) == Line(p1, Point(0, 2))
assert Line.is_parallel(l1, l2)
assert Line.is_parallel(l2, l3) == False
assert Line.is_parallel(l2, l2.parallel_line(p1_1))
assert Line.is_parallel(l2_1, l2_1.parallel_line(p1))
# Intersection
assert intersection(l1, p1) == [p1]
assert intersection(l1, p5) == []
assert intersection(l1, l2) in [[l1], [l2]]
assert intersection(l1, l1.parallel_line(p5)) == []
# Concurrency
l3_1 = Line(Point(5, x1), Point(-Rational(3,5), x1))
assert Line.is_concurrent(l1, l3)
assert Line.is_concurrent(l1, l3, l3_1)
assert Line.is_concurrent(l1, l1_1, l3) == False
# Projection
assert l2.projection(p4) == p4
assert l1.projection(p1_1) == p1
assert l3.projection(p2) == Point(x1, 1)
# Finding angles
l1_1 = Line(p1, Point(5, 0))
assert feq(Line.angle_between(l1, l1_1).evalf(), pi.evalf()/4)
# Testing Rays and Segments (very similar to Lines)
assert Ray((1, 1), angle=pi/4) == Ray((1, 1), (2, 2))
assert Ray((1, 1), angle=pi/2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=-pi/2) == Ray((1, 1), (1, 0))
assert Ray((1, 1), angle=-3*pi/2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=5*pi/2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=5.0*pi/2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=pi) == Ray((1, 1), (0, 1))
assert Ray((1, 1), angle=3.0*pi) == Ray((1, 1), (0, 1))
assert Ray((1, 1), angle=4.0*pi) == Ray((1, 1), (2, 1))
assert Ray((1, 1), angle=0) == Ray((1, 1), (2, 1))
# XXX don't know why this fails without str
assert str(Ray((1, 1), angle=4.2*pi)) == str(Ray(Point(1, 1), Point(2, 1 + C.tan(0.2*pi))))
assert Ray((1, 1), angle=5) == Ray((1, 1), (2, 1 + C.tan(5)))
raises(ValueError, 'Ray((1, 1), 1)')
r1 = Ray(p1, Point(-1, 5))
r2 = Ray(p1, Point(-1, 1))
r3 = Ray(p3, p5)
assert l1.projection(r1) == Ray(p1, p2)
assert l1.projection(r2) == p1
assert r3 != r1
t = Symbol('t', real=True)
assert Ray((1, 1), angle=pi/4).arbitrary_point() == Point(1/(1 - t), 1/(1 - t))
s1 = Segment(p1, p2)
s2 = Segment(p1, p1_1)
assert s1.midpoint == Point(Rational(1,2), Rational(1,2))
assert s2.length == sqrt( 2*(x1**2) )
assert s1.perpendicular_bisector() == Line(Point(0, 1), Point(1, 0))
assert Segment((1, 1), (2, 3)).arbitrary_point() == Point(1 + t, 1 + 2*t)
# Segment contains
a, b = symbols('a,b')
s = Segment((0, a), (0, b))
assert Point(0, (a + b)/2) in s
s = Segment((a, 0), (b, 0))
assert Point((a + b)/2, 0) in s
assert (Point(2*a, 0) in s) is False # XXX should be None?
# Testing distance from a Segment to an object
s1 = Segment(Point(0, 0), Point(1, 1))
s2 = Segment(Point(half, half), Point(1, 0))
pt1 = Point(0, 0)
pt2 = Point(Rational(3)/2, Rational(3)/2)
assert s1.distance(pt1) == 0
assert s2.distance(pt1) == 2**(half)/2
assert s2.distance(pt2) == 2**(half)
# Special cases of projection and intersection
r1 = Ray(Point(1, 1), Point(2, 2))
r2 = Ray(Point(2, 2), Point(0, 0))
r3 = Ray(Point(1, 1), Point(-1, -1))
r4 = Ray(Point(0, 4), Point(-1, -5))
assert intersection(r1, r2) == [Segment(Point(1, 1), Point(2, 2))]
assert intersection(r1, r3) == [Point(1, 1)]
assert r1.projection(r3) == Point(1, 1)
assert r1.projection(r4) == Segment(Point(1, 1), Point(2, 2))
r5 = Ray(Point(0, 0), Point(0, 1))
r6 = Ray(Point(0, 0), Point(0, 2)
|
CoderBotOrg/coderbotsrv
|
server/lib/cryptography/hazmat/backends/openssl/backend.py
|
Python
|
gpl-3.0
| 38,174
| 0
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import warnings
from contextlib import contextmanager
import six
from cryptography import utils
from cryptography.exceptions import (
InternalError, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import (
CMACBackend, CipherBackend, DSABackend, EllipticCurveBackend, HMACBackend,
HashBackend, PBKDF2HMACBackend, PEMSerializationBackend,
PKCS8SerializationBackend, RSABackend,
TraditionalO
|
penSSLSerializationBackend, X509Backend
)
from cryptography.hazmat.backends.openssl.ciphers import (
_AESCTRCipherContext, _CipherContext
)
from cryptography.hazmat.backends.openssl.cmac import _CMACContext
from cryptography.hazmat.backends.openssl.dsa import (
_DSAParameters, _DSAPrivateKey, _DSAPublicKey
)
from cryptography.hazmat.backends.openssl.ec import (
_EllipticCurvePrivateKey, _EllipticCurvePublicKey
)
from cryptography.hazmat.
|
backends.openssl.hashes import _HashContext
from cryptography.hazmat.backends.openssl.hmac import _HMACContext
from cryptography.hazmat.backends.openssl.rsa import (
_RSAPrivateKey, _RSAPublicKey
)
from cryptography.hazmat.backends.openssl.x509 import _Certificate
from cryptography.hazmat.bindings.openssl.binding import Binding
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
from cryptography.hazmat.primitives.asymmetric.padding import (
MGF1, OAEP, PKCS1v15, PSS
)
from cryptography.hazmat.primitives.ciphers.algorithms import (
AES, ARC4, Blowfish, CAST5, Camellia, IDEA, SEED, TripleDES
)
from cryptography.hazmat.primitives.ciphers.modes import (
CBC, CFB, CFB8, CTR, ECB, GCM, OFB
)
_MemoryBIO = collections.namedtuple("_MemoryBIO", ["bio", "char_ptr"])
_OpenSSLError = collections.namedtuple("_OpenSSLError",
["code", "lib", "func", "reason"])
@utils.register_interface(CipherBackend)
@utils.register_interface(CMACBackend)
@utils.register_interface(DSABackend)
@utils.register_interface(EllipticCurveBackend)
@utils.register_interface(HashBackend)
@utils.register_interface(HMACBackend)
@utils.register_interface(PBKDF2HMACBackend)
@utils.register_interface(PKCS8SerializationBackend)
@utils.register_interface(RSABackend)
@utils.register_interface(TraditionalOpenSSLSerializationBackend)
@utils.register_interface(PEMSerializationBackend)
@utils.register_interface(X509Backend)
class Backend(object):
"""
OpenSSL API binding interfaces.
"""
name = "openssl"
def __init__(self):
self._binding = Binding()
self._ffi = self._binding.ffi
self._lib = self._binding.lib
self._binding.init_static_locks()
# adds all ciphers/digests for EVP
self._lib.OpenSSL_add_all_algorithms()
# registers available SSL/TLS ciphers and digests
self._lib.SSL_library_init()
# loads error strings for libcrypto and libssl functions
self._lib.SSL_load_error_strings()
self._cipher_registry = {}
self._register_default_ciphers()
self.activate_osrandom_engine()
def activate_builtin_random(self):
# Obtain a new structural reference.
e = self._lib.ENGINE_get_default_RAND()
if e != self._ffi.NULL:
self._lib.ENGINE_unregister_RAND(e)
# Reset the RNG to use the new engine.
self._lib.RAND_cleanup()
# decrement the structural reference from get_default_RAND
res = self._lib.ENGINE_finish(e)
assert res == 1
def activate_osrandom_engine(self):
# Unregister and free the current engine.
self.activate_builtin_random()
# Fetches an engine by id and returns it. This creates a structural
# reference.
e = self._lib.ENGINE_by_id(self._lib.Cryptography_osrandom_engine_id)
assert e != self._ffi.NULL
# Initialize the engine for use. This adds a functional reference.
res = self._lib.ENGINE_init(e)
assert res == 1
# Set the engine as the default RAND provider.
res = self._lib.ENGINE_set_default_RAND(e)
assert res == 1
# Decrement the structural ref incremented by ENGINE_by_id.
res = self._lib.ENGINE_free(e)
assert res == 1
# Decrement the functional ref incremented by ENGINE_init.
res = self._lib.ENGINE_finish(e)
assert res == 1
# Reset the RNG to use the new engine.
self._lib.RAND_cleanup()
def openssl_version_text(self):
"""
Friendly string name of the loaded OpenSSL library. This is not
necessarily the same version as it was compiled against.
Example: OpenSSL 1.0.1e 11 Feb 2013
"""
return self._ffi.string(
self._lib.SSLeay_version(self._lib.SSLEAY_VERSION)
).decode("ascii")
def create_hmac_ctx(self, key, algorithm):
return _HMACContext(self, key, algorithm)
def hash_supported(self, algorithm):
digest = self._lib.EVP_get_digestbyname(algorithm.name.encode("ascii"))
return digest != self._ffi.NULL
def hmac_supported(self, algorithm):
return self.hash_supported(algorithm)
def create_hash_ctx(self, algorithm):
return _HashContext(self, algorithm)
def cipher_supported(self, cipher, mode):
if self._evp_cipher_supported(cipher, mode):
return True
elif isinstance(mode, CTR) and isinstance(cipher, AES):
return True
else:
return False
def _evp_cipher_supported(self, cipher, mode):
try:
adapter = self._cipher_registry[type(cipher), type(mode)]
except KeyError:
return False
evp_cipher = adapter(self, cipher, mode)
return self._ffi.NULL != evp_cipher
def register_cipher_adapter(self, cipher_cls, mode_cls, adapter):
if (cipher_cls, mode_cls) in self._cipher_registry:
raise ValueError("Duplicate registration for: {0} {1}.".format(
cipher_cls, mode_cls)
)
self._cipher_registry[cipher_cls, mode_cls] = adapter
def _register_default_ciphers(self):
for mode_cls in [CBC, CTR, ECB, OFB, CFB, CFB8]:
self.register_cipher_adapter(
AES,
mode_cls,
GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}")
)
for mode_cls in [CBC, CTR, ECB, OFB, CFB]:
self.register_cipher_adapter(
Camellia,
mode_cls,
GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}")
)
for mode_cls in [CBC, CFB, CFB8, OFB]:
self.register_cipher_adapter(
TripleDES,
mode_cls,
GetCipherByName("des-ede3-{mode.name}")
)
self.register_cipher_adapter(
TripleDES,
ECB,
GetCipherByName("des-ede3")
)
for mode_cls in [CBC, CFB, OFB, ECB]:
self.register_cipher_adapter(
Blowfish,
mode_cls,
GetCipherByName("bf-{mode.name}")
)
for mode_cls in [CBC, CFB, OFB, ECB]:
self.register_cipher_adapter(
SEED,
mode_cls,
GetCipherByName("seed-{mode.name}")
)
for cipher_cls, mode_cls in itertools.product(
[CAST5, IDEA],
[CBC, OFB, CFB, ECB],
):
self.register_cipher_adapter(
cipher_cls,
mode_cls,
GetCipherByName("{cipher.name}-{mode.name}")
)
self.register_cipher_adapter(
ARC4,
type(None),
GetCipherByName("rc4")
)
self.register_cipher_adapter(
AES,
|
pipian/libcueify
|
tests/swig/check_full_toc.py
|
Python
|
mit
| 8,463
| 0.002363
|
# check_full_toc.py - Unit tests for SWIG-based libcueify full TOC APIs
#
# Copyright (c) 2011 Ian Jacobi <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# KLUDGE to allow tests to work.
import sys
sys.path.insert(0, '../../build/swig/python')
import cueify
import struct
import unittest
# Create a binary track descriptor from a full TOC.
def TRACK_DESCRIPTOR(session, adr, ctrl, track,
abs_min, abs_sec, abs_frm, min, sec, frm):
return [session, (((adr & 0xF) << 4) | (ctrl & 0xF)), 0, track,
abs_min, abs_sec, abs_frm, 0, min, sec, frm]
serialized_mock_full_toc = [(((13 + 2 * 3) * 11 + 2) >> 8),
(((13 + 2 * 3) * 11 + 2) & 0xFF), 1, 2]
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 0xA0, 0, 0, 0, 1, cueify.SESSION_MODE_1, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 0xA1, 0, 0, 0, 12, 0, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 0xA2, 0, 0, 0, 51, 44, 26))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 1, 0, 0, 0, 0, 2, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 2, 0, 0, 0, 4, 47, 70))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 3, 0, 0, 0, 7, 42, 57))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 4, 0, 0, 0, 13, 47, 28))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 5, 0, 0, 0, 18, 28, 50))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 6, 0, 0, 0, 21, 56, 70))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 7, 0, 0, 0, 24, 56, 74))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 8, 0, 0, 0, 30, 10, 55))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 9, 0, 0, 0, 34, 17, 20))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 10, 0, 0, 0, 39, 18, 66))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 11, 0, 0, 0, 43, 16, 40))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 12, 0, 0, 0, 47, 27, 61))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 0xA0, 0, 0, 0, 13, cueify.SESSION_MODE_2, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 0xA1, 0, 0,
|
0, 13, 0, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 0xA2, 0, 0, 0, 57, 35, 13))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 13, 1, 2, 3, 54, 16, 26))
class TestFullTOCFunctions(unittest.TestCase):
def test_serialization(self):
# Test both deserialization and serialization (since, unlike
# in the C code, the Python library does not support directly
# specifying the mock TOC.
|
full_toc = cueify.FullTOC()
self.assertTrue(
full_toc.deserialize(
struct.pack(
"B" * len(serialized_mock_full_toc),
*serialized_mock_full_toc)))
s = full_toc.serialize()
self.assertEqual(full_toc.errorCode, cueify.OK)
self.assertEqual(len(s), len(serialized_mock_full_toc))
self.assertEqual(
s,
struct.pack(
"B" * len(serialized_mock_full_toc),
*serialized_mock_full_toc))
def test_getters(self):
full_toc = cueify.FullTOC()
self.assertTrue(
full_toc.deserialize(
struct.pack(
"B" * len(serialized_mock_full_toc),
*serialized_mock_full_toc)))
self.assertEqual(full_toc.firstSession, 1)
self.assertEqual(full_toc.lastSession, 2)
self.assertEqual(len(full_toc.tracks), 13)
self.assertEqual(full_toc.tracks[0].session, 1)
self.assertEqual(full_toc.tracks[12].session, 2)
self.assertEqual(full_toc.tracks[0].controlFlags, 4)
self.assertEqual(full_toc.tracks[12].controlFlags, 6)
self.assertEqual(full_toc.tracks[0].subQChannelFormat, 1)
self.assertEqual(full_toc.tracks[12].subQChannelFormat, 1)
self.assertEqual(len(full_toc.sessions), 2)
self.assertEqual(len(full_toc.sessions[0].pseudotracks), 3)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_FIRST_TRACK_PSEUDOTRACK].controlFlags, 4)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LAST_TRACK_PSEUDOTRACK].controlFlags, 4)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].controlFlags, 4)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].controlFlags, 6)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_FIRST_TRACK_PSEUDOTRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LAST_TRACK_PSEUDOTRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.tracks[0].pointAddress.min, 0)
self.assertEqual(full_toc.tracks[0].pointAddress.sec, 0)
self.assertEqual(full_toc.tracks[0].pointAddress.frm, 0)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].pointAddress.min, 0)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].pointAddress.sec, 0)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].pointAddress.frm, 0)
self.assertEqual(full_toc.tracks[12].pointAddress.min, 1)
self.assertEqual(full_toc.tracks[12].pointAddress.sec, 2)
self.assertEqual(full_toc.tracks[12].pointAddress.frm, 3)
self.assertEqual(full_toc.tracks[0].address.min, 0)
self.assertEqual(full_toc.tracks[0].address.sec, 2)
self.assertEqual(full_toc.tracks[0].address.frm, 0)
self.assertEqual(full_toc.tracks[12].address.min, 54)
self.assertEqual(full_toc.tracks[12].address.sec, 16)
self.assertEqual(full_toc.tracks[12].address.frm, 26)
self.assertEqual(full_toc.sessions[0].firstTrack, 1)
self.assertEqual(full_toc.sessions[1].firstTrack, 13)
self.assertEqual(full_toc.sessions[0].lastTrack, 12)
self.assertEqual(full_toc.sessions[1].lastTrack, 13)
self.assertEqual(full_toc.firstTrack, 1)
self.assertEqual(full_toc.lastTrack, 13)
self.assertEqual(full_toc.sessions[0].type, cueify.SESSION_MODE_1)
self.assertEqual(full_toc.sessions[1].type, cueify.SESSION_MODE_2)
self.assertEqual(full_toc.sessions[1].leadoutAddress.min, 57)
self.assertEqual(full_toc.sessions[1].leadoutAddress.sec, 35)
self.assertEqual(full_toc.sessions[1].leadoutAddress.frm, 13)
self.assertEqual(full_toc.discLength.min, 57)
self.assertEqual(full_toc.discLength.sec, 35)
self.assertEqual(full_toc.discLength.frm, 13)
self.assertEqual(full_toc.tracks[11].length.min, 4)
self.assertEqual(full_toc.tracks[11].length.sec, 16)
sel
|
rsalmaso/wagtail
|
wagtail/core/migrations/0017_change_edit_page_permission_description.py
|
Python
|
bsd-3-clause
| 771
| 0
|
# -*- coding: utf-8 -
|
*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0016_change_page_url_path_to_text_field"),
]
operations = [
migrations.AlterField(
model_name="grouppagepe
|
rmission",
name="permission_type",
field=models.CharField(
choices=[
("add", "Add/edit pages you own"),
("edit", "Edit any page"),
("publish", "Publish any page"),
("lock", "Lock/unlock any page"),
],
max_length=20,
verbose_name="Permission type",
),
preserve_default=True,
),
]
|
chris-schmitz/circuit-python-acrylic-nightlight
|
code/code.py
|
Python
|
mit
| 1,177
| 0.002549
|
from digitalio import DigitalInOut, Direction, Pull
import board
import time
import neopixel
led = DigitalInOut(board.D13)
led.direction = Direction.OUTPUT
pixelPin = board.D2
pixelNumber = 8
strip = neopixel.NeoPixel(pixelPin, pixelNumber, brightness=1, auto_write=False)
switch = DigitalInOut(board.D1)
switch.direction = Direction.INPUT
switch.pull = Pull.UP
def wheel(pos):
if (pos < 0) or (pos > 255):
return (0, 0, 0)
if (pos < 85):
return (int(pos * 3), int(255 - (pos * 3)), 0)
elif (pos < 170):
pos -= 85
return (in
|
t(255 - pos * 3), 0, int(pos * 3))
else:
pos -= 170
return (0, int(pos * 3), int(255 - pos * 3))
def rainbow_cycle(wait):
for outer in range(255):
for inner in range(len(strip)):
index = int((inner * 256 / len(strip)) + outer)
strip[inner] = wheel(index & 255)
strip.wri
|
te()
time.sleep(wait)
while True:
if switch.value:
led.value = False
strip.fill((0, 0, 0))
strip.write()
else:
led.value = True
# strip.fill((255, 0, 0))
rainbow_cycle(0.001)
# time.sleep(0.01)
|
ToonTownInfiniteRepo/ToontownInfinite
|
toontown/minigame/TargetGameGlobals.py
|
Python
|
mit
| 7,031
| 0.001991
|
from pandac.PandaModules import *
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
ENDLESS_GAME = config.GetBool('endless-ring-game', 0)
NUM_RING_GROUPS = 16
MAX_TOONXZ = 15.0
MAX_LAT = 5
MAX_FIELD_SPAN = 135
CollisionRadius = 1.5
CollideMask = ToontownGlobals.CatchGameBitmask
TARGET_RADIUS = MAX_TOONXZ / 3.0 * 0.9
targetColors = ((TTLocalizer.ColorRed, VBase4(1.0, 0.4, 0.2, 1.0)),
(TTLocalizer.ColorGreen, VBase4(0.0, 0.9, 0.2, 1.0)),
(TTLocalizer.ColorOrange, VBase4(1.0, 0.5, 0.25, 1.0)),
(TTLocalizer.ColorPurple, VBase4(1.0, 0.0, 1.0, 1.0)),
(TTLocalizer.ColorWhite, VBase4(1.0, 1.0, 1.0, 1.0)),
(TTLocalizer.ColorBlack, VBase4(0.0, 0.0, 0.0, 1.0)),
(TTLocalizer.ColorYellow, VBase4(1.0, 1.0, 0.2, 1.0)))
ENVIRON_LENGTH = 300
ENVIRON_WIDTH = 150.0
ringColorSelection = [(0, 1, 2),
3,
4,
5,
6]
colorRed = {}
colorRed['Red'] = 1.0
colorRed['Green'] = 0.0
colorRed['Blue'] = 0.0
colorRed['Alpha'] = 0.5
colorBlue = {}
colorBlue['Red'] = 0.0
colorBlue['Green'] = 0.0
colorBlue['Blue'] = 1.0
colorBlue['Alpha'] = 0.5
colorGreen = {}
colorGreen['Red'] = 0.0
colorGreen['Green'] = 1.0
colorGreen['Blue'] = 0.0
colorGreen['Alpha'] = 0.5
colorYellow = {}
colorYellow['Red'] = 1.0
colorYellow['Green'] = 1.0
colorYellow['Blue'] = 0.0
colorYellow['Alpha'] = 0.5
colorPurple = {}
colorPurple['Red'] = 0.75
colorPurple['Green'] = 0.0
colorPurple['Blue'] = 1.0
colorPurple['Alpha'] = 0.5
colorOrange = {}
colorOrange['Red'] = 1.0
colorOrange['Green'] = 0.6
colorOrange['Blue'] = 0.0
colorOrange['Alpha'] = 0.5
colorBlack = {}
colorBlack['Red'] = 0.0
colorBlack['Green'] = 0.0
colorBlack['Blue'] = 0.0
colorBlack['Alpha'] = 1.0
colorWhite = {}
colorWhite['Red'] = 1.0
colorWhite['Green'] = 1.0
colorWhite['Blue'] = 1.0
colorWhite['Alpha'] = 1.0
difficultyPatterns = {ToontownGlobals.ToontownCentral: [[8,
4,
2,
0],
[10,
16,
21,
28],
[31,
15,
7,
3.5],
[colorRed,
colorGreen,
colorBlue,
colorYellow],
[2,
2,
2,
1],
10,
2],
ToontownGlobals.DonaldsDock: [[7,
4,
2,
0],
[11,
17,
23,
32],
[29,
13,
6.5,
3.2],
[colorRed,
colorGreen,
colorBlue,
colorYellow],
[2,
2,
2,
1],
9,
2],
ToontownGlobals.DaisyGardens: [[6,
4,
2,
0],
[11,
18,
25,
34],
[29,
13,
6.5,
3.1],
[colorRed,
colorGreen,
colorBlue,
colorYellow],
[2,
2,
2,
|
1],
8,
2],
ToontownGlobals.MinniesMelodyland: [[6,
4,
2,
0],
[12,
19,
27,
37],
|
[28,
12,
6,
3.0],
[colorGreen,
colorBlue,
colorYellow,
colorPurple],
[2,
2,
2,
1],
8,
2],
ToontownGlobals.TheBrrrgh: [[5,
4,
2,
0],
[12,
20,
29,
40],
[25,
12,
5.5,
2.5],
[colorGreen,
colorBlue,
colorYellow,
colorPurple],
[2,
2,
2,
1],
7,
2],
ToontownGlobals.DonaldsDreamland: [[4,
3,
1,
0],
[12,
21,
31,
42],
[20,
10,
4.5,
2.0],
[colorBlue,
colorYellow,
colorPurple,
colorOrange],
[2,
2,
2,
1],
7,
2]}
|
SkyZH/CloudOJWatcher
|
ojrunnerlinux.py
|
Python
|
gpl-3.0
| 1,643
| 0.009738
|
import lorun
import os
import codecs
import random
import subprocess
import config
import sys
RESULT_MAP = [
2, 10, 5, 4, 3, 6, 11, 7, 12
]
class Runner:
def __init__(self):
return
def compile(self, judger, srcPath, outPath):
cmd = config.langCompile[judger.lang] % {'root': sys.path[0], 'src': srcPath, 'target': outPath}
p = subprocess.Popen(cmd, shell = True,
stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.STDOUT)
retval = p.wait()
return (retval, p.stdout.read())
def judge(self, judger, srcPath, outPath, inFile, ansFile, memlimit, timelimit):
cmd = config.langRun[judger.lang] % {'src': srcPath, 'target': outPath}
fout_path = "".join([sys.path[0], "/", "%s/%d.out" % (config.dataPath["tempPath"], random.randint(0, 65536))])
if os.path.exists(fout_path):
os.remove(fout_path)
fin = open(inFile, 'rU')
fout = open(fout_path, 'w')
runcfg = {
'args': cmd.split(" "),
'fd_in': fin.fileno(),
'fd_out': fout.fileno(),
'timelimit': int(timelimit),
'memorylimit': int(memlimit)
}
rst = lorun.run(runcfg)
fin.close()
fout.close()
if
|
rst['result'] == 0:
fans = open(ansFile, 'rU')
fout = open(fout_path, 'rU')
crst = lorun.check(fans.fileno(), fout.fileno())
fout.close()
fans.close()
return
|
(RESULT_MAP[crst], int(rst['memoryused']), int(rst['timeused']))
return (RESULT_MAP[rst['result']], 0, 0)
|
masroore/pynewscred
|
pynewscred/req_parser.py
|
Python
|
bsd-3-clause
| 7,407
| 0.00027
|
__author__ = 'Dr. Masroor Ehsan'
__email__ = '[email protected]'
__copyright__ = 'Copyright 2013, Dr. Masroor Ehsan'
__license__ = 'BSD'
__version__ = '0.1.1'
from datetime import datetime
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
# normal ElementTree install
import elementtree.ElementTree as etree
__all__ = ['parse_single_article', 'parse_story_set', 'parse_article_set']
def _find_and_set(key, rootNode, dict_obj, cb=None):
node = rootNode.find(key)
if node is not None:
dict_obj[key] = cb(node.text) if cb is not None else node.text
def _parse_datetime(input):
return datetime.strptime(input, "%Y-%m-%d %H:%M:%S")
def _parse_category_set(rootNode, tagName='category'):
categories = []
categoriesNode = rootNode.find('categories_set')
for categoryNode in categoriesNode.findall(tagName):
category = {}
_find_and_set('name', categoryNode, category)
_find_and_set('dashed_name', categoryNode, category)
if len(category) > 0:
categories.append(category)
return categories
def parse_category_set(content):
rootNode = etree.fromstring(content)
return _parse_category_set(rootNode)
def parse_single_article(content):
rootNode = etree.fromstring(content)
return _parse_single_article(rootNode)
def _parse_single_topic(rootNode):
topic = {}
_find_and_set('name', rootNode, topic)
_find_and_set('topic_group', rootNode, topic)
_find_and_set('topic_subclassification', rootNode, topic)
_find_and_set('score', rootNode, topic, float)
_find_and_set('image_url', rootNode, topic)
_find_and_set('link', rootNode, topic)
_find_and_set('guid', rootNode, topic)
_find_and_set('topic_classification', rootNode, topic)
_find_and_set('description', rootNode, topic)
return topic if len(topic) > 0 else None
def _parse_topic_set(rootNode):
topicSetNode = rootNode.find('topic_set')
topic_set = []
if topicSetNode is not None:
for node in topicSetNode.findall('topic'):
topic = _parse_single_topic(node)
if topic is not None:
topic_set.append(topic)
return topic_set if len(topic_set) > 0 else None
def _parse_thumbnail(rootNode, dict_obj):
thumbNode = rootNode.find('thumbnail')
if thumbNode is not None:
thumb = {}
_find_and_set('original_image', thumbNode, thumb)
_find_and_set('link', thumbNode, thumb)
if len(thumb) > 0:
dict_obj['thumbnail'] = thumb
def _parse_single_article(rootNode):
article = {}
_find_and_set('description', rootNode, article)
_find_and_set('title', rootNode, article)
_find_and_set('created_at', rootNode, article, _parse_datetime)
_find_and_set('published_at', rootNode, article, _parse_datetime)
_find_and_set('score', rootNode, article, float)
_find_and_set('link', rootNode, article)
_find_and_set('guid', rootNode, article)
catNode = rootNode.find('category')
article['category'] = {
'name': catNode.find('name').text,
'dashed_name': catNode.find('dashed_name').text}
authorSetNode = rootNode.find('author_set')
if authorSetNode is not None:
article['author_set'] = []
for authorNode in authorSetNode.findall('author'):
author = {
'guid': authorNode.find('guid').text,
'first_name': authorNode.find('first_name').text,
'last_name': authorNode.find('last_name').text,
}
article['author_set'].append(author)
topic_set = _parse_topic_set(rootNode)
if topic_set:
article['topic_set'] = topic_set
srcNode = rootNode.find('source')
source_dict = {}
_find_and_set('website', srcNode, source_dict)
_find_and_set('name', srcNode, source_dict)
_find_and_set('circulation', srcNode, source_dict, int)
_find_and_set('country', srcNode, source_dict)
_find_and_set('company_type', srcNode, source_dict)
_find_and_set('founded', srcNode, source_dict)
_find_and_set('staff_authors', srcNode, source_dict, int)
_find_and_set('frequency', srcNode, source_dict)
_find_and_set
|
('owner', srcNode, source_dict)
_find_and_set('guid', srcNode, source_dict)
_find_and_set('is_blog', srcNode, source_dict, bool)
_find_and_set('thumbnail', srcNode, source_dict)
|
_find_and_set('description', srcNode, source_dict)
mediaNode = srcNode.find('media_type')
media_dict = {}
_find_and_set('name', mediaNode, media_dict)
_find_and_set('dashed_name', mediaNode, media_dict)
if len(media_dict) > 0:
source_dict['media_type'] = media_dict
if len(source_dict) > 0:
article['source'] = source_dict
return article
def _parse_author_set(rootNode):
authorSetNode = rootNode.find('author_set')
authors = []
if authorSetNode is not None:
for node in authorSetNode.findall('author'):
author = {}
_find_and_set('guid', node, author)
_find_and_set('name', node, author)
if len(author) > 0:
authors.append(author)
return authors if len(authors) > 0 else None
def _parse_story_set_article(rootNode):
article = {}
_find_and_set('description', rootNode, article)
_find_and_set('title', rootNode, article)
_find_and_set('published_at', rootNode, article, _parse_datetime)
_find_and_set('link', rootNode, article)
_find_and_set('guid', rootNode, article)
categories = _parse_category_set(rootNode, tagName='categories')
if categories is not None:
article['categories_set'] = categories
sourceNode = rootNode.find('source')
if sourceNode is not None:
source_dict = {}
_find_and_set('name', sourceNode, source_dict)
_find_and_set('guid', sourceNode, source_dict)
if len(source_dict) > 0:
article['source'] = source_dict
author_set = _parse_author_set(rootNode)
if author_set is not None:
article['author_set'] = author_set
return article
def _parse_story_node(rootNode):
story = {}
_find_and_set('num_articles', rootNode, story, int)
_find_and_set('guid', rootNode, story)
articles = []
for articleNode in rootNode.find('article_set').findall('article'):
article = _parse_story_set_article(articleNode)
if article is not None:
articles.append(article)
if len(articles) > 0:
story['article_set'] = articles
return story
def parse_story_set(content):
rootNode = etree.fromstring(content)
story_set = []
for storyNode in rootNode.findall('story'):
story_set.append(_parse_story_node(storyNode))
return story_set
def parse_article_set(content):
rootNode = etree.fromstring(content)
#<article_set num_found="197218">
article_set = []
for storyNode in rootNode.findall('article'):
article_set.append(_parse_single_article(storyNode))
return article_set
|
nelhage/taktician
|
python/tak/ptn/ptn.py
|
Python
|
mit
| 2,895
| 0.018307
|
import tak
from . import tps
import attr
import re
@attr.s
class PTN(object):
tags = attr.ib()
moves = attr.ib()
@classmethod
def parse(cls, text):
head, tail = text.split("\n\n", 1)
tags_ = re.findall(r'^\[(\w+) "([^"]+)"\]$', head, re.M)
tags = dict(tags_)
tail = re.sub(r'{[^}]+}', ' ', tail)
moves = []
tokens = re.split(r'\s+', tail)
for t in tokens:
if t == '--':
continue
if re.search(r'\A(0|R|F|1|1/2)-(0|R|F|1|1/2)\Z', t):
continue
if re.match(r'\A\d+\.\Z', t):
continue
if t == '':
continue
t = re.sub(r"['!?]+$", '', t)
m = parse_move(t)
moves.append(m)
return cls(tags = tags, moves = moves)
def initial_position(self):
if 'TPS' in self.tags:
return tps.parse_tps(self.tags['TPS'])
return tak.Position.from_config(
tak.Config(size = int(self.tags['Size'])))
slide_map = {
'-': tak.MoveType.SLIDE_DOWN,
'+': tak.MoveType.SLIDE_UP,
'<': tak.MoveType.SLIDE_LEFT,
'>': tak.MoveType.SLIDE_RIGHT,
}
slide_rmap = dict((v, k) for (k, v) in slide_map.items())
place_map = {
'': tak.MoveType.PLACE_FLAT,
'S': tak.MoveType.PLACE_STANDING,
'C': tak.MoveType.PLACE_CAPSTONE,
'F': tak.MoveType.PLACE_FLAT,
}
place_rmap = {
tak.MoveType.PLACE_FLAT: '',
tak.MoveType.PLACE_STANDING: 'S',
tak.MoveType.PLACE_CAPSTONE: 'C',
}
def parse_move(move):
m = re.search(r'\A([CFS]?)([1-8]?)([a-h])([1-8])([<>+-]?)([1-8]*)[CFS]?\Z', move)
if
|
not m:
raise BadMove(move, "malformed move")
stone, pickup, file, rank, d
|
ir, drops = m.groups()
x = ord(file) - ord('a')
y = ord(rank) - ord('1')
if pickup and not dir:
raise BadMove(move, "pick up but no direction")
typ = None
if dir:
typ = slide_map[dir]
else:
typ = place_map[stone]
slides = None
if drops:
slides = tuple(ord(c) - ord('0') for c in drops)
if (drops or pickup) and not dir:
raise BadMove(move, "pickup/drop without a direction")
if dir and not pickup and not slides:
pickup = '1'
if pickup and not slides:
slides = (int(pickup),)
if pickup and int(pickup) != sum(slides):
raise BadMove(move, "inconsistent pickup and drop: {0} v {1}".format(pickup, drops))
return tak.Move(x, y, typ, slides)
def format_move(move):
bits = []
bits.append(place_rmap.get(move.type, ''))
if move.type.is_slide():
pickup = sum(move.slides)
if pickup != 1:
bits.append(pickup)
bits.append(chr(move.x + ord('a')))
bits.append(chr(move.y + ord('1')))
if move.type.is_slide():
bits.append(slide_rmap[move.type])
if len(move.slides) > 1:
bits += [chr(d + ord('0')) for d in move.slides]
return ''.join(map(str, bits))
class BadMove(Exception):
def __init__(self, move, error):
self.move = move
self.error = error
super().__init__("{0}: {1}".format(error, move))
|
rob-metalinkage/django-gazetteer
|
gazetteer/fixtures/mapstory_tm_world_config.py
|
Python
|
cc0-1.0
| 2,139
| 0.02618
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# do this when > 1.6!!!
# from django.db import migrations, models
from gazetteer.models import GazSource,GazSourceConfig,LocationTypeField,CodeFieldConfig,NameFieldConfig
from skosxl.models import Concept, Scheme, MapRelation
from gazetteer.settings import TARGET_NAMESPACE_FT
def load_base_ft():
(sch,created) = Scheme.objects.get_or_create(uri=TARGET_NAMESPACE_FT[:-1], defaults = { 'pref_label' :"Gaz Feature types" })
try:
(ft,created) = Concept.objects.get_or_create(term="ADMIN", defaults = { 'pref_label' :"Populated Place", 'definit
|
ion':"Populated place"} , scheme = sch)
except:
pass
# now set up cross references from NGA feature types namespace
# now set up harvest config
def
|
load_ft_mappings() :
pass
def load_config() :
try:
GazSourceConfig.objects.filter(name="TM_WorldBoundaries").delete()
except:
pass
config=GazSourceConfig.objects.create(lat_field="lat", name="TM_WorldBoundaries", long_field="lon")
NameFieldConfig.objects.create(config=config,language="en", as_default=True, languageNamespace="", field="name", languageField="")
LocationTypeField.objects.create(field='"ADMIN"',namespace=TARGET_NAMESPACE_FT, config=config)
CodeFieldConfig.objects.create(config=config,field="iso3",namespace="http://mapstory.org/id/countries/iso3")
CodeFieldConfig.objects.create(config=config,field="iso2",namespace="http://mapstory.org/id/countries/iso2")
CodeFieldConfig.objects.create(config=config,field="un",namespace="http://mapstory.org/id/countries/un")
CodeFieldConfig.objects.create(config=config,field="fips",namespace="http://mapstory.org/id/countries/fips")
(s,created) = GazSource.objects.get_or_create(source="tm_world_borders", config=config, source_type="mapstory")
print (s,created)
"""
class Migration(migrations.Migration):
initial = True
dependencies = [
#('yourappname', '0001_initial'),
]
operations = [
migrations.RunPython(load_ft_mappings),
migrations.RunPython(load_config),
]
"""
|
asedunov/intellij-community
|
python/testData/breadcrumbs/dictKey.py
|
Python
|
apache-2.0
| 25
| 0.04
|
a = {"abc": "d<caret>ef
|
"}
|
|
akeeton/MTGO-scry-bug-test
|
MTGO-scry-bug-test.sikuli/MTGO-scry-bug-test.py
|
Python
|
mit
| 5,599
| 0.008573
|
# Make your image, region, and location changes then change the from-import
# to match.
from configurables_akeeton_desktop import *
import hashlib
import java.awt.Toolkit
import json
import os
import shutil
import time
Settings.ActionLogs = True
Settings.InfoLogs = True
Settings.DebugLogs = True
Settings.LogTime = True
Settings.AutoWaitTimeout = AUTO_WAIT_TIMEOUT_SECONDS
TEMP_DIR_PREFIX = time.strftime("MTGO-scry-bug_%Y-%m-%d_%H-%M-%S", time.gmtime())
TEMP_PATH = tempfile.mkdtemp(prefix=TEMP_DIR_PREFIX)
attempts = 0
def main():
global attempts
attempts += 1
ATTEMPT_NUM_PATH = get_attempt_number_path(attempts)
HITS_PATH = os.path.join(ATTEMPT_NUM_PATH, HITS_DIR)
MISSES_PATH = os.path.join(ATTEMPT_NUM_PATH, MISSES_DIR)
print "TEMP_PATH:", TEMP_PATH
print "ATTEMPT_NUM_PATH", ATTEMPT_NUM_PATH
print "HITS_PATH:", HITS_PATH
print "MISSES_PATH:", MISSES_PATH
os.mkdir(ATTEMPT_NUM_PATH)
os.mkdir(HITS_PATH)
os.mkdir(MISSES_PATH)
iterations = 0
hits = 0
card_hash_to_times_card_sent_to_bottom = ['card_hash_to_times_card_sent_to_bottom', ZeroValueDict()]
card_hash_to_times_card_sent_to_bottom_and_drawn = ['card_hash_to_times_card_sent_to_bottom_and_drawn', ZeroValueDict()]
card_hash_to_times_card_drawn = ['card_hash_to_times_card_drawn', ZeroValueDict()]
card_hash_to_capture = ['card_hash_to_capture', {}]
while True:
REGION_PLAY.wait("play.png")
time.sleep(0.5)
REGION_PLAY.click(LOCATION_PLAY)
time.sleep(1.0)
REGION_MULLIGAN_KEEP.wait("mulligan_keep.png")
for i in range(0, 7):
REGION_MULLIGAN_KEEP.wait("mulligan_highlighted_keep.png")
time.sleep(2.0) # I swear if I have to keep incrementing this value...
REGION_MULLIGAN_KEEP.click(LOCATION_MULLIGAN)
time.sleep(1.0)
REGION_TEMPORARY_ZONE.wait("temporary_zone.png")
time.sleep(0.5)
click(LOCATION_TEMPORARY_ZONE_CARD)
time.sleep(0.5)
REGION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY.click(LOCATION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY)
time.sleep(0.1)
REGION_CHAT_PUT_A_CARD_ON_THE_BOTTOM_OF_THE_LIBRARY.wait("chat_put_a_card_on_the_bottom_of_the_library.png")
time.sleep(0.1)
card_sent_to_bottom_capture = capture(REGION_CARD_PREVIEW_CAPTURE)
hover(LOCATION_FIRST_CARD_IN_HAND) # Update the preview with the drawn card.
time.sleep(0.5)
card_drawn_capture = capture(REGION_CARD_PREVIEW_CAPTURE)
copy_path = ""
card_sent_to_bottom_hash = hash_file(card_sent_to_bottom_capture)
card_drawn_hash = hash_file(card_drawn_capture)
card_hash_to_times_card_sent_to_bottom[1][card_sent_to_bottom_hash] += 1
card_hash_to_times_card_drawn[1][card_drawn_hash] += 1
if card_sent_to_bottom_hash == card_drawn_hash:
hits += 1
card_hash_to_times_card_sent_to_bottom_and_drawn[1][card_sent_to_bottom_hash] += 1
copy_path = HITS_PATH
else:
copy_path = MISSES_PATH
iterations += 1
print "{0}/{1}".format(hits, iterations)
card_sent_to_bottom_capture_dest_path = os.path.join(copy_path, str(iterations) + "_bottom.png")
card_drawn_capture_dest_path = os.path.join(copy_path, str(iterations) + "_drawn.png")
shutil.move(card_sent_t
|
o_bottom_capture, card_sent_to_bottom_capture_dest_path)
shutil.move(card_drawn_capture, card_drawn_capture_dest_path)
card_hash_to_capture[1][card_sent_to_bottom_hash] = card_sent_to_bottom_capture_dest_path
|
card_hash_to_capture[1][card_drawn_hash] = card_drawn_capture_dest_path
with open(os.path.join(ATTEMPT_NUM_PATH, 'stats.json'), 'w') as stats_file:
json.dump(card_hash_to_times_card_sent_to_bottom_and_drawn, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
json.dump(card_hash_to_times_card_sent_to_bottom, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
json.dump(card_hash_to_times_card_drawn, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
json.dump(card_hash_to_capture, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
stats_file.write('{0}/{1}'.format(hits, iterations))
click(LOCATION_X_CLOSE)
REGION_CONCEDE_MATCH_BUTTON.wait("concede_match.png")
time.sleep(0.1)
type('\n')
class ZeroValueDict(dict):
def __missing__(self, key):
return 0
def hash_file(file_path):
hasher = hashlib.md5()
with open(file_path, 'rb') as opened_file:
buf = opened_file.read()
hasher.update(buf)
return hasher.hexdigest()
def get_attempt_number_path(attempts):
return os.path.join(TEMP_PATH, 'attempt_{0}'.format(attempts))
if __name__ == '__main__':
while True:
try:
main()
except FindFailed as e:
for i in range(0, TIMES_TO_BEEP_ON_FIND_FAIlED):
java.awt.Toolkit.getDefaultToolkit().beep()
time.sleep(1.0)
print e
with open(os.path.join(get_attempt_number_path(attempts), 'error.log'), 'w') as errorlog:
errorlog.write(str(e))
raise e # Replace this with a way to reset MTGO to a starting state so we can try again.
|
nanoscopy/afm-calibrator
|
nanoscopy/audio.py
|
Python
|
mit
| 2,692
| 0.015602
|
import pyaudio
import struct
from threading import Thread, Condition
import time
from logging import thread
import socket
CHUNK = 2**12
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
class AudioReader(Thread):
def __init__(self, raw = False, remote = False, host = 'localhost', port = 9999):
Thread.__init__(self)
self.active = False
self.listeners = []
self.condition = Condition()
self.quit = False
self.raw = raw
self.remote = remote
self.host = host
self.port = port
def pause(self):
self.active = False
def play(self):
self.active = True
self.condition.acquire()
self.condition.notify()
self.condition.release()
def stop(self):
if not self.active:
self.play()
self.active = False
self.quit = True
def readData(self):
self.condition.acquire()
self.condition.wait()
self.condition.release()
self.stream = pyaudio.PyAudio().open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
while self.active:
data = self.stream.read(CHUNK)
if not self.raw:
count = len(data) / 2
fmt = "%dh" % (count)
shorts = struct.unpack(fmt, data)
else:
shorts = data
for l in self.listeners:
l(shorts)
self.stream.close()
def readRemoteData(self):
self.condition.acquire()
self.condition.wait()
self.condition.release()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
buf = []
while self.active:
data = self.socket.recv((CHUNK*2-len(buf))*2)
if not self.raw:
count = len(
|
data) / 2
fmt = "%dh" % (count)
|
shorts = struct.unpack(fmt, data)
buf.extend(shorts)
if len(buf)>=CHUNK*2:
for l in self.listeners:
l(buf)
buf=[]
else:
for l in self.listeners:
l(data)
self.socket.close()
def run(self):
while not self.quit:
if not self.remote:
self.readData()
else:
self.readRemoteData()
|
tweepy/tweepy
|
tweepy/mixins.py
|
Python
|
mit
| 949
| 0.001054
|
# Tweepy
# Copyright 2009-2022 Joshua Roesslein
# See LICENSE for details.
from collections.abc import Mapping
class EqualityComparableID:
__slots__ = ()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.id == other.id
|
return NotImplemented
class HashableID(EqualityComparableID):
__slots__ = ()
def __hash__(self):
return self.id
class DataMappi
|
ng(Mapping):
__slots__ = ()
def __contains__(self, item):
return item in self.data
def __getattr__(self, name):
try:
return self.data[name]
except KeyError:
raise AttributeError from None
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError from None
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
|
bvillasen/phyGPU
|
tools/tools.py
|
Python
|
gpl-3.0
| 1,660
| 0.037349
|
import sys, os
def timeSplit( ETR ):
h = int(ETR/3600)
m = int(ETR - 3600*h)/60
s = int(ETR - 3600*h - 60*m)
return h, m, s
def printProgress( current, total, deltaIter, deltaTime ):
terminalString = "\rProgress: "
if total==0: total+=1
percent = 100.*current/total
nDots = int(percent/5)
dotsString = "[" + nDots*"." + (20-nDots)*" " + "]"
percentString = "{0:.0f}%".format(percent)
ETR = deltaTime*(total - current)/float(deltaIter)
hours = int(ETR/3600)
minutes = int(ETR - 3600*hours)/60
seconds = int(ETR - 3600*hours - 60*minutes)
|
ETRstring = " ETR= {0}:{1:02}:{2:02} ".format(hours, minutes, seconds)
if deltaTime < 0.0001: ETRstring = " ETR= "
terminalString += dotsString + percentString + ETRstring
sys.
|
stdout. write(terminalString)
sys.stdout.flush()
def printProgressTime( current, total, deltaTime ):
terminalString = "\rProgress: "
if total==0: total+=1
percent = 100.*current/total
nDots = int(percent/5)
dotsString = "[" + nDots*"." + (20-nDots)*" " + "]"
percentString = "{0:.0f}%".format(percent)
if current != 0:
ETR = (deltaTime*(total - current))/float(current)
#print ETR
hours = int(ETR/3600)
minutes = int(ETR - 3600*hours)/60
seconds = int(ETR - 3600*hours - 60*minutes)
ETRstring = " ETR= {0}:{1:02}:{2:02} ".format(hours, minutes, seconds)
else: ETRstring = " ETR= "
if deltaTime < 0.0001: ETRstring = " ETR= "
terminalString += dotsString + percentString + ETRstring
sys.stdout. write(terminalString)
sys.stdout.flush()
def ensureDirectory( dirName ):
if not os.path.exists(dirName):
os.makedirs(dirName)
|
warwick-one-metre/opsd
|
warwick/observatory/operations/dome/astrohaven/__init__.py
|
Python
|
gpl-3.0
| 4,416
| 0.001812
|
#
# This file is part of opsd.
#
# opsd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# opsd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with opsd. If not, see <http://www.gnu.org/licenses/>.
"""Interface to allow the dome controller to operate an Astrohaven dome via domed"""
from warwick.observatory.dome import (
CommandStatus as DomeCommandStatus,
DomeShutterStatus,
DomeHeartbeatStatus)
from warwick.observatory.operations.constants import DomeStatus
from warwick.observatory.common import daemons, validation
CONFIG_SCHEMA = {
'type': 'object',
'additionalProperties': ['module'],
'required': [
'daemon', 'movement_timeout', 'heartbeat_timeout'
],
'properties': {
'daemon': {
'type': 'string',
'daemon_name': True
},
'movement_timeout': {
'type': 'number',
'minimum': 0
},
'heartbeat_timeout': {
'type': 'number',
'minimum': 0
}
}
}
def validate_config(config_json):
return validation.validation_errors(config_json, CONFIG_SCHEMA, {
'daemon_name': validation.daemon_name_validator,
})
class DomeInterface:
"""Interface to allow the dome controller to operate an Astrohaven dome via domed"""
def __init__(self, dome_config_json):
self._daemon = getattr(daemons, dome_config_json['daemon'])
# Communications
|
timeout when opening or closing the dome (takes up to ~80 seconds for the onemetre dome)
self._movement_timeout = dome_config
|
_json['movement_timeout']
# Timeout period (seconds) for the dome controller
# The dome heartbeat is pinged once per LOOP_DELAY when the dome is under
# automatic control and is fully open or fully closed. This timeout should
# be large enough to account for the time it takes to open and close the dome
self._heartbeat_timeout = dome_config_json['heartbeat_timeout']
def query_status(self):
with self._daemon.connect() as dome:
status = dome.status()
if status['heartbeat_status'] in [DomeHeartbeatStatus.TrippedClosing,
DomeHeartbeatStatus.TrippedIdle]:
return DomeStatus.Timeout
if status['shutter_a'] == DomeShutterStatus.Closed and \
status['shutter_b'] == DomeShutterStatus.Closed:
return DomeStatus.Closed
if status['shutter_a'] in [DomeShutterStatus.Opening, DomeShutterStatus.Closing] or \
status['shutter_b'] in [DomeShutterStatus.Opening, DomeShutterStatus.Closing]:
return DomeStatus.Moving
return DomeStatus.Open
def ping_heartbeat(self):
print('dome: sending heartbeat ping')
with self._daemon.connect() as dome:
ret = dome.set_heartbeat_timer(self._heartbeat_timeout)
return ret == DomeCommandStatus.Succeeded
def disable_heartbeat(self):
print('dome: disabling heartbeat')
with self._daemon.connect() as dome:
ret = dome.set_heartbeat_timer(self._heartbeat_timeout)
return ret == DomeCommandStatus.Succeeded
def close(self):
print('dome: sending heartbeat ping before closing')
with self._daemon.connect() as dome:
dome.set_heartbeat_timer(self._heartbeat_timeout)
print('dome: closing')
with self._daemon.connect(timeout=self._movement_timeout) as dome:
ret = dome.close_shutters('ba')
return ret == DomeCommandStatus.Succeeded
def open(self):
print('dome: sending heartbeat ping before opening')
with self._daemon.connect() as dome:
dome.set_heartbeat_timer(self._heartbeat_timeout)
print('dome: opening')
with self._daemon.connect(timeout=self._movement_timeout) as dome:
ret = dome.open_shutters('ab')
return ret == DomeCommandStatus.Succeeded
|
rslnautic/practica-verificacion
|
src/tweet_crawler.py
|
Python
|
apache-2.0
| 1,614
| 0.004337
|
"""Twitter crawler script"""
import tweepy
from database import MongoDB
class Twitter(object): # pylint: disable=too-few-public-methods
"""Class Twitter"""
def __init__(self):
self.consumer_key = "40GvlhlFPNbVGkZnPncPH8DgB"
self.consumer_secret = "G595ceskX8iVH34rsuLSqpFROL0brp8ezzZR2dGvTKvcpPsKPw"
self.access_token = "397905190-LXMFC0clhtDxx5cITBWVFqVUKNQBKuqM06Ls4k5n"
self.access_token_secret = "nPzoHy5UwzOPUZVZO3JhBFRL3WgdM0jJKignxIzQ6nAS1"
self.auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth.set_access_token(self.access_token, self.access_token_secret)
self.api = tweepy.API(self.auth)
# Method to print our tweets
def print_tweets(self, count=1):
tweets = self._user_timeline(count)
for tweet in tweets:
print tweet.encode('utf-8')
# Method to save our tweets
def save_tweets(self, count=1):
database = MongoDB("verificacion")
coll = database.collection("tweets")
tweets = self._user_timeline(count)
for tweet in tweets:
coll.in
|
sert({"tweet": tweet})
# Returns the *count* numbers of tweets of your timeline and save it into a database
def _user_timeline(self, count=200):
tweets = []
public_tweets = self.api
|
.user_timeline(id=self.auth.get_username(), count=count)
for tweet in public_tweets:
text = tweet.text
tweets.append(text)
return tweets
if __name__ == '__main__':
twepp = Twitter()
twepp.print_tweets(10)
twepp.save_tweets(10)
|
IanLewis/kay
|
kay/management/preparse.py
|
Python
|
bsd-3-clause
| 3,612
| 0.013566
|
# -*- coding: utf-8 -*-
"""
Kay preparse management command.
:Copyright: (c) 2009 Accense Technology, Inc.
Takashi Matsuo <[email protected]>,
|
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from os import listdir, path, mkdir
from werkzeug.utils import import_string
import kay
import kay.app
from kay.utils import local
from kay.utils.jinja2utils.compiler import compile_dir
from kay.manag
|
ement.utils import print_status
IGNORE_FILENAMES = {
'kay': ('debug', 'app_template'),
'app': ('kay'),
}
def find_template_dir(target_path, ignore_filenames):
ret = []
for filename in listdir(target_path):
target_fullpath = path.join(target_path, filename)
if path.isdir(target_fullpath):
if filename.startswith(".") or filename in ignore_filenames:
continue
if filename == "templates":
ret.append(target_fullpath)
else:
ret = ret + find_template_dir(target_fullpath, ignore_filenames)
else:
continue
return ret
def do_preparse_bundle():
"""
Pre compile all the jinja2 templates in Kay itself.
"""
print_status("Compiling bundled templates...")
app = kay.app.get_application()
env = app.app.jinja2_env
for dir in find_template_dir(kay.KAY_DIR, ('debug','app_template')):
dest = prepare_destdir(dir)
print_status("Now compiling templates in %s to %s." % (dir, dest))
compile_dir(env, dir, dest)
print_status("Finished compiling bundled templates...")
def do_preparse_apps():
"""
Pre compile all the jinja2 templates in your applications.
"""
from kay.conf import LazySettings
print_status("Compiling templates...")
application = kay.app.get_application()
applications = [application]
settings_treated = []
for key, settings_name in \
application.app.app_settings.PER_DOMAIN_SETTINGS.iteritems():
if not settings_name in settings_treated:
applications.append(kay.app.get_application(
settings=LazySettings(settings_module=settings_name)))
settings_treated.append(settings_name)
for app in applications:
compile_app_templates(app.app) # pass KayApp instance
for key, submount_app in app.mounts.iteritems():
if isinstance(submount_app, kay.app.KayApp):
compile_app_templates(submount_app)
print_status("Finished compiling templates...")
def prepare_destdir(dir):
def replace_dirname(orig):
if 'templates' in orig:
return orig.replace('templates', 'templates_compiled')
else:
return orig+'_compiled'
dest = replace_dirname(dir)
if path.isdir(dest):
for d, subdirs, files in os.walk(dest):
for f in files:
compiled_filename = "%s/%s" % (d, f)
orig_filename = compiled_filename.replace(dest, dir)
if not path.isfile(orig_filename):
os.unlink(compiled_filename)
print_status("%s does not exist. So, '%s' is removed." % (
orig_filename, compiled_filename))
else:
mkdir(dest)
return dest
def compile_app_templates(app):
env = app.jinja2_env
target_dirs = [dir for dir in app.app_settings.TEMPLATE_DIRS\
if os.path.isdir(dir)]
for app in app.app_settings.INSTALLED_APPS:
if app.startswith("kay."):
continue
mod = import_string(app)
target_dirs.extend(find_template_dir(os.path.dirname(mod.__file__),
('kay')))
for dir in target_dirs:
dest = prepare_destdir(dir)
print_status("Now compiling templates in %s to %s." % (dir, dest))
compile_dir(env, dir, dest)
|
HeyIamJames/Data_Structures
|
stack.py
|
Python
|
gpl-2.0
| 248
| 0.008065
|
from linked_list import LinkedList
class Stack(o
|
bject):
def __init__(self, iterable=None):
self._list = LinkedList(iterable)
def push(self,
|
val):
self._list.insert(val)
def pop(self):
return self._list.pop()
|
apmckinlay/csuneido
|
vs2019scintilla/scripts/Dependencies.py
|
Python
|
gpl-2.0
| 5,533
| 0.023857
|
#!/usr/bin/env python
# Dependencies.py - discover, read, and write dependencies file for make.
# The format like the output from "g++ -MM" which produces a
# list of header (.h) files used by source files (.cxx).
# As a module, provides
# FindPathToHeader(header, includePath) -> path
# FindHeadersInFile(filePath) -> [headers]
# FindHeadersInFileRecursive(filePath, includePath, renames) -> [paths]
# FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames) -> [dependencies]
# ExtractDependencies(input) -> [dependencies]
# TextFromDependencies(dependencies)
# WriteDependencies(output, dependencies)
# UpdateDependencies(filepath, dependencies)
# PathStem(p) -> stem
# InsertSynonym(dependencies, current, additional) -> [dependencies]
# If run as a script reads from stdin and writes to stdout.
# Only tested with ASCII file names.
# Copyright 2019 by Neil Hodgson <[email protected]>
# The License.txt file describes the conditions under which this software may be distributed.
# Requires Python 2.7 or later
import codecs, glob, os, sys
if __name__ == "__main__":
import FileGenerator
else:
from . import FileGenerator
continuationLineEnd = " \\"
def FindPathToHeader(header, includePath):
for incDir in includePath:
relPath = os.path.join(incDir, header)
if os.path.exists(relPath):
return relPath
return ""
fhifCache = {} # Remember the includes in each file. ~5x speed up.
def FindHeadersInFile(filePath):
if filePath not in fhifCache:
headers = []
with codecs.open(filePath, "r", "utf-8") as f:
for line in f:
if line.strip().startswith("#include"):
parts = line.split()
if len(parts) > 1:
header = parts[1]
if header[0] != '<': # No system headers
headers.append(header.strip('"'))
fhifCache[filePath] = headers
return fhifCache[filePath]
def FindHeadersInFileRecursive(filePath, includePath, renames):
headerPaths = []
for header in FindHeadersInFile(filePath):
if header in renames:
header = renames[header]
relPath = FindPathToHeader(header, includePath)
if relPath and relPath not in headerPaths:
headerPaths.append(relPath)
subHeaders = FindHeadersInFileRecursive(relPath, includePath, renames)
headerPaths.extend(sh for sh in subHeaders if sh not in headerPaths)
return headerPaths
def RemoveStart(relPath, start):
if relPath.startswith(start):
return relPath[len(start):]
return relPath
def ciKey(f):
return f.lower()
def FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames={}):
deps = []
for sourceGlob in sourceGlobs:
sourceFiles = glob.glob(sourceGlob)
# Sorting the files minimizes deltas as order returned by OS may be arbitrary
sourceFiles.sort(key=ciKey)
for sourceName in sourceFiles:
objName = os.path.splitext(os.path.basename(sourceName))[0]+objExt
headerPaths = FindHeadersInFileRecursive(sourceName, includePath, renames)
depsForSource = [sourceName] + headerPaths
depsToAppend = [RemoveStart(fn.replace("\\", "/"), startDirectory) for
fn in depsForSource]
deps.append([objName, depsToAppend])
return deps
def PathStem(p):
""" Return the stem of a filename: "CallTip.o" -> "CallTip" """
return os.path.splitext(os.path.basename(p))[0]
def InsertSynonym(dependencies, current, additional):
""" Insert a copy of one object file with dependencies under a different name.
U
|
sed when one source file is used to create two object files with different
preprocessor definitions. """
result = []
for dep in dependencies:
result.append(dep)
if (dep[0] == current):
depAdd = [additional, dep
|
[1]]
result.append(depAdd)
return result
def ExtractDependencies(input):
""" Create a list of dependencies from input list of lines
Each element contains the name of the object and a list of
files that it depends on.
Dependencies that contain "/usr/" are removed as they are system headers. """
deps = []
for line in input:
headersLine = line.startswith(" ") or line.startswith("\t")
line = line.strip()
isContinued = line.endswith("\\")
line = line.rstrip("\\ ")
fileNames = line.strip().split(" ")
if not headersLine:
# its a source file line, there may be headers too
sourceLine = fileNames[0].rstrip(":")
fileNames = fileNames[1:]
deps.append([sourceLine, []])
deps[-1][1].extend(header for header in fileNames if "/usr/" not in header)
return deps
def TextFromDependencies(dependencies):
""" Convert a list of dependencies to text. """
text = ""
indentHeaders = "\t"
joinHeaders = continuationLineEnd + os.linesep + indentHeaders
for dep in dependencies:
object, headers = dep
text += object + ":"
for header in headers:
text += joinHeaders
text += header
if headers:
text += os.linesep
return text
def UpdateDependencies(filepath, dependencies, comment=""):
""" Write a dependencies file if different from dependencies. """
FileGenerator.UpdateFile(os.path.abspath(filepath), comment.rstrip() + os.linesep +
TextFromDependencies(dependencies))
def WriteDependencies(output, dependencies):
""" Write a list of dependencies out to a stream. """
output.write(TextFromDependencies(dependencies))
if __name__ == "__main__":
""" Act as a filter that reformats input dependencies to one per line. """
inputLines = sys.stdin.readlines()
deps = ExtractDependencies(inputLines)
WriteDependencies(sys.stdout, deps)
|
c0710204/edx-platform
|
common/lib/xmodule/xmodule/imageannotation_module.py
|
Python
|
agpl-3.0
| 7,154
| 0.002935
|
"""
Module for Image annotations using annotator.
"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions, html_to_text
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class AnnotatableFields(object):
""" Fields for `ImageModule` and `ImageDescriptor`. """
data = String(help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
<json>
navigatorSizeRatio: 0.25,
wrapHorizontal: false,
showNavigator: true,
navigatorPosition: "BOTTOM_LEFT",
showNavigationControl: true,
tileSources: [{"profile": "http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level2", "scale_factors": [1, 2, 4, 8, 16, 32, 64], "tile_height": 1024, "height": 3466, "width": 113793, "tile_width": 1024, "qualities": ["native", "bitonal", "grey", "color"], "formats": ["jpg", "png", "gif"], "@context": "http://library.stanford.edu/iiif/image-api/1.1/context.json", "@id": "http://54.187.32.48/loris/suzhou_orig.jp2"}],
</json>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Image Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='professor:green,teachingAssistant:blue',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class ImageAnnotationModule(AnnotatableFields, XModule):
'''Image Annotation Module'''
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/html/display.coffee'),
resource_string(__name__, 'js/src/annotatable/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'imageannotation'
def __init__(self, *args, **kwargs):
super(ImageAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.openseadragonjson = html_to_text(etree.tostring(xmltree.find('json'), encoding='unicode'))
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and return
|
s them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters
|
to template. """
context = {
'display_name': self.display_name_with_default,
'instructions_html': self.instructions,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'tag': self.instructor_tags,
'openseadragonjson': self.openseadragonjson,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('imageannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class ImageAnnotationDescriptor(AnnotatableFields, RawDescriptor): # pylint: disable=abstract-method
''' Image annotation descriptor '''
module_class = ImageAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(ImageAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
ImageAnnotationDescriptor.annotation_storage_url,
ImageAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
|
Senbjorn/mipt_lab_2016
|
lab_19/task_H.py
|
Python
|
gpl-3.0
| 1,281
| 0.042155
|
#task_H
def dijkstra(start, graph):
n = len(graph)
D = [None] * n
D[start] = 0
index = 0
Q = [sta
|
rt]
while index < len(Q):
v = Q[index]
index += 1
for u in graph[v]:
if D[u] == None or D[v] + min(graph[v][u]) < D[u]:
D[u] = D[v] + min(graph[v][u])
Q.append(u)
return D
def reverse(graph):
n = len(graph)
graph_reversed = {x: {} for x, y in zip(range(n), range(n))}
for i in range(n):
for v in graph[i]:
for w in graph[i][v]:
add(graph_reve
|
rsed, v, i, w)
def add(graph, a, b, w):
if b in graph[a]:
grph[a][b].append(w)
else:
graph[a][b] = [w]
def min_vertex(x, D, graph):
A = {v: w + D[v] for v, w in zip([u for u in graph[x].keys if D[u] != None], [min(graph[x][u]) for u in graph[x].keys if D[u] != None])}
L = list(A.items)
min_i = L[0][0]
min_v = L[0][1]
for v in A:
if A[v] < min_v:
min_v = A[v]
min_i = v
return min_i
def path(graph, D, s, f):
graph = reverse(graph)
x = f
P = [f]
while x != s:
x = min_vertex(x, D, graph)
P.append(x)
return P[-1::-1]
n, m, s, f = tuple(map(int, input().split()))
graph = {x: {} for x, y in zip(range(n), range(n))}
for i in range(m):
a, b, w = tuple(map(int, input().split()))
add(graph, a, b, w)
add(graph, b, a, w)
D = dijkstra(s, graph)
print(*path(graph, D, s, f))
|
meta-it/misc-addons
|
attachment_large_object/tests/__init__.py
|
Python
|
lgpl-3.0
| 78
| 0
|
from . import test_attachment
fast_su
|
ite = [test_attachment,
|
]
|
dakiri/splunk-app-twitter
|
twitter2/django/twitter2/views.py
|
Python
|
apache-2.0
| 944
| 0.004237
|
from .forms import SetupForm
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shor
|
tcuts import redirect
from splunkdj.decorators.render import render_to
from splunkdj.setup import create_setup_view_context
@login_required
def home(request):
# Redirect to the default view, which happens to be a non-framework view
return redirect('/en-us/app/twitter2/twitter_general')
|
@render_to('twitter2:setup.html')
@login_required
def setup(request):
result = create_setup_view_context(
request,
SetupForm,
reverse('twitter2:home'))
# HACK: Workaround DVPL-4647 (Splunk 6.1 and below):
# Refresh current app's state so that non-framework views
# observe when the app becomes configured.
service = request.service
app_name = service.namespace['app']
service.apps[app_name].post('_reload')
return result
|
illume/numpy3k
|
numpy/testing/decorators.py
|
Python
|
bsd-3-clause
| 9,735
| 0.003287
|
"""Decorators for labeling test objects
Decorators that merely return a modified version of the original
function object are straightforward. Decorators that return a new
function object need to use
nose.tools.make_decorator(original_function)(decorator) in returning
the decorator, in order to preserve metadata such as function name,
setup and teardown functions and so on - see nose.tools for more
information.
"""
import warnings
import sys
def slow(t):
"""Labels a test as 'slow'.
The exact definition of a slow test is obviously both subjective and
hardware-dependent, but in general any individual test that requires more
than a second or two should be labeled as slow (the whole suite consits of
thousands of tests, so even a second is significant)."""
t.slow = True
return t
def setastest(tf=True):
''' Signals to nose that this function is or is not a test
Parameters
----------
tf : bool
If True specifies this is a test, not a test otherwise
e.g
>>> from numpy.testing.decorators import setastest
>>> @setastest(False)
... def func_with_test_in_name(arg1, arg2): pass
...
>>>
This decorator cannot use the nose namespace, because it can be
called from a non-test module. See also istest and nottest in
nose.tools
'''
def set_test(t):
t.__test__ = tf
return t
return set_test
def skipif(skip_condition, msg=None):
''' Make function raise SkipTest exception if skip_condition is true
Parameters
----------
skip_condition : bool or callable.
Flag to determine whether to skip test. If the condition is a
callable, it is used at runtime to dynamically make the decision. This
is useful for tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a SkipTest exception
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Allow for both boolean or callable skip conditions.
if callable(skip_condition):
skip_val = lambda : skip_condition()
else:
skip_val = lambda : skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None:
out = 'Test skipped due to test condition'
else:
out = '\n'+msg
return "Skipping test: %s%s" % (func.__name__,out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
for x in f(*args, **kwargs):
yield x
# Choose the right skipper to use when building the actual decorator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
def knownfailureif(fail_condition, msg=None):
''' Make function raise KnownFailureTest exception if fail_condition is true
Parameters
----------
fail_condition : bool or callable.
Flag to determine whether to mark test as known failure (True)
or not (False). If the condition is a callable, it is used at
runtime to dynamically make the decision. This is useful for
tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a KnownFailureTest exception
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if callable(fail_condition):
fail_val = lambda : fai
|
l_condition()
else:
fail_val = lambda : fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
from noseclasses import KnownFailureTest
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest, msg
else:
return f(*args, **kwargs)
return nose.tools.make_decora
|
tor(f)(knownfailer)
return knownfail_decorator
# The following two classes are copied from python 2.6 warnings module (context
# manager)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
if category:
self._category_name = category.__name__
else:
self._category_name = None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class WarningManager:
def __init__(self, record=False, module=None):
self._record = record
if module is None:
self._module = sys.modules['warnings']
else:
self._module = module
self._entered = False
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
def deprecated(conditional=True):
"""This decorator can be used to filter Deprecation Warning, to avoid
printing them during the test suite run, while checking that the test
actually raises a DeprecationWarning.
Parameters
----------
conditional : bool or callable.
Flag to determine whether to mark test as deprecated or not. If the
condition is a
|
aronsky/home-assistant
|
tests/components/advantage_air/test_sensor.py
|
Python
|
apache-2.0
| 4,781
| 0.000627
|
"""Test the Advantage Air Sensor Platform."""
from datetime import timedelta
from json import loads
from homeassistant.components.
|
advantage_air.const import DOMAIN as ADVANTAGE_AIR_DOMAIN
from homeassistant.components.advantage_air.sensor import (
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
ADVANTAGE_AIR_SET_COUNTDOWN_VALUE,
)
from homeassistant.config_entries import RELOAD_AFTER_UPDATE_DELAY
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import entity_registry as er
from homeassistant.util import dt
from tests.common import async_fire_t
|
ime_changed
from tests.components.advantage_air import (
TEST_SET_RESPONSE,
TEST_SET_URL,
TEST_SYSTEM_DATA,
TEST_SYSTEM_URL,
add_mock_config,
)
async def test_sensor_platform(hass, aioclient_mock):
"""Test sensor platform."""
aioclient_mock.get(
TEST_SYSTEM_URL,
text=TEST_SYSTEM_DATA,
)
aioclient_mock.get(
TEST_SET_URL,
text=TEST_SET_RESPONSE,
)
await add_mock_config(hass)
registry = er.async_get(hass)
assert len(aioclient_mock.mock_calls) == 1
# Test First TimeToOn Sensor
entity_id = "sensor.ac_one_time_to_on"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 0
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-timetoOn"
value = 20
await hass.services.async_call(
ADVANTAGE_AIR_DOMAIN,
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
{ATTR_ENTITY_ID: [entity_id], ADVANTAGE_AIR_SET_COUNTDOWN_VALUE: value},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 3
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["countDownToOn"] == value
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
# Test First TimeToOff Sensor
entity_id = "sensor.ac_one_time_to_off"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 10
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-timetoOff"
value = 0
await hass.services.async_call(
ADVANTAGE_AIR_DOMAIN,
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
{ATTR_ENTITY_ID: [entity_id], ADVANTAGE_AIR_SET_COUNTDOWN_VALUE: value},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 5
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["countDownToOff"] == value
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
# Test First Zone Vent Sensor
entity_id = "sensor.zone_open_with_sensor_vent"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 100
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-vent"
# Test Second Zone Vent Sensor
entity_id = "sensor.zone_closed_with_sensor_vent"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 0
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z02-vent"
# Test First Zone Signal Sensor
entity_id = "sensor.zone_open_with_sensor_signal"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 40
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-signal"
# Test Second Zone Signal Sensor
entity_id = "sensor.zone_closed_with_sensor_signal"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 10
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z02-signal"
# Test First Zone Temp Sensor (disabled by default)
entity_id = "sensor.zone_open_with_sensor_temperature"
assert not hass.states.get(entity_id)
registry.async_update_entity(entity_id=entity_id, disabled_by=None)
await hass.async_block_till_done()
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(seconds=RELOAD_AFTER_UPDATE_DELAY + 1),
)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 25
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-temp"
|
MostlyOpen/odoo_addons
|
myo_survey/wizard/__init__.py
|
Python
|
agpl-3.0
| 936
| 0
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at y
|
our option) any later version.
#
# This program is distributed in the hope that it will be usef
|
ul,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import survey_update_wizard
|
approvals/ApprovalTests.Python
|
approvaltests/reporters/generic_diff_reporter.py
|
Python
|
apache-2.0
| 2,752
| 0.00109
|
import subprocess
from typing import List, Optional
from approvaltests import ensure_file_exists
from approvaltests.command import Command
from approvaltests.core.reporter import Reporter
from approvaltests.utils import to_json
PROGRAM_FILES = "{ProgramFiles}"
class GenericDiffReporterConfig:
def __init__(self, name: str, path: str, extra_args: Optional[List[str]] = None):
self.name = name
self.path = path
self.extra_args = extra_args or []
def serialize(self):
result = [self.name, self.path]
if self.extra_args:
result.append(self.extra_args)
return result
def create_config(config) -> GenericDiffReporterConfig:
return GenericDiffReporterConfig(*config)
class GenericDiffReporter(Reporter):
"""
A reporter that launches
an external diff tool given by config.
"""
@staticmethod
def create(diff_tool_path: str) -> "GenericDiffReporter":
return GenericDiffReporter(create_config(["custom", diff_tool_path]))
def __init__(self, config: GenericDiffReporterConfig) -> None:
s
|
elf.name = config.name
self.path = self.expand_program_files(config.path)
self.extra_args = config.extra_args
def __str__(self) -> str:
if self.extra_args:
config = {
"name": self.name,
|
"path": self.path,
"arguments": self.extra_args,
}
else:
config = {"name": self.name, "path": self.path}
return to_json(config)
@staticmethod
def run_command(command_array):
subprocess.Popen(command_array)
def get_command(self, received: str, approved: str) -> List[str]:
return [self.path] + self.extra_args + [received, approved]
def report(self, received_path: str, approved_path: str) -> bool:
if not self.is_working():
return False
ensure_file_exists(approved_path)
command_array = self.get_command(received_path, approved_path)
self.run_command(command_array)
return True
def is_working(self) -> bool:
found = Command(self.path).locate()
if not found:
return False
else:
self.path = found
return True
@staticmethod
def expand_program_files(path: str) -> str:
if PROGRAM_FILES not in path:
return path
for candidate in [
r"C:/Program Files",
r"C:/Program Files (x86)",
r"C:/ProgramW6432",
]:
possible = path.replace(PROGRAM_FILES, candidate)
if Command.executable(possible):
return possible
return path.replace(PROGRAM_FILES, "C:/Program Files")
|
jeblair/GitPython
|
git/objects/commit.py
|
Python
|
bsd-3-clause
| 20,799
| 0.002164
|
# commit.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from gitdb import IStream
from git.util import (
hex_to_bin,
Actor,
Iterable,
Stats,
finalize_process
)
from git.diff import Diffable
from .tree import Tree
from . import base
from .util import (
Traversable,
Serializable,
parse_date,
altz_to_utctz_str,
parse_actor_and_date,
from_timestamp,
)
from git.compat import text_type
from time import (
time,
daylight,
altzone,
timezone,
localtime
)
import os
from io import BytesIO
import logging
log = logging.getLogger('git.objects.commit')
log.addHandler(logging.NullHandler())
__all__ = ('Commit', )
class Commit(base.Object, Iterable, Diffable, Traversable, Serializable):
"""Wraps a git Commit object.
This class will act lazily on some of its attributes and will query the
value on demand only if it involves calling the git binary."""
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_date = "GIT_AUTHOR_DATE"
env_committer_date = "GIT_COMMITTER_DATE"
# CONFIGURATION KEYS
conf_encoding = 'i18n.commitencoding'
# INVARIANTS
default_encoding = "UTF-8"
# object configuration
type = "commit"
__slots__ = ("tree",
"author", "authored_date", "author_tz_offset",
"committer", "committed_date", "committer_tz_offset",
"message", "parents", "encoding", "gpgsig")
_id_attribute_ = "hexsha"
def __init__(self, repo, binsha, tree=None, author=None, authored_date=None, author_tz_offset=None,
committer=None, committed_date=None, committer_tz_offset=None,
message=None, parents=None, encoding=None, gpgsig=None):
"""Instantiate a new Commit. All keyword arguments taking None as default will
be implicitly set on first query.
:param binsha: 20 byte sha1
:param parents: tuple( Commit, ... )
is a tuple of commit ids or actual Commits
:param tree: Tree
Tree object
:param author: Actor
is the author string ( will be implicitly converted into an Actor object )
:param authored_date: int_seconds_since_epoch
is the authored DateTime - use time.gmtime() to convert it into a
different format
:param author_tz_offset: int_seconds_west_of_utc
is the timezone that the authored_date is in
:param committer: Actor
is the committer string
:param committed_date: int_seconds_since_epoch
is the committed DateTime - use time.gmtime() to convert it into a
different format
:param committer_tz_offset: int_seconds_west_of_utc
is the timezone that the committed_date is in
:param message: string
is the commit message
:param encoding: string
encoding of the message, defaults to UTF-8
:param parents:
List or tuple of Commit objects which are our parent(s) in the commit
dependency graph
:return: git.Commit
:note:
Timezone information is in the same format and in the same sign
as what time.altzone returns. The sign is inverted compared to git's
UTC timezone."""
super(Commit, self).__init__(repo, binsha)
if tree is not None:
assert isinstance(tree, Tree), "Tree needs to be a Tree instance, was %s" % type(tree)
if tree is not None:
self.tree = tree
if author is not None:
self.author = author
if authored_date is not None:
self.authored_date = authored_date
if author_tz_offset is not None:
self.author_tz_offset = author_tz_offset
if committer is not None:
self.committer = committer
if committed_date is not None:
self.committed_date = committed_date
if committer_tz_offset is not None:
self.committer_tz_offset = committer_tz_offset
if message is not None:
self.message = message
if parents is not None:
self.parents = parents
if encoding is not None:
self.encoding = encoding
if gpgsig is not None:
self.gpgsig = gpgsig
@classmethod
def _get_intermediate_items(cls, commit):
return commit.parents
def _set_cache_(self, attr):
if attr in Commit.__slots__:
# read the data in a chunk, its faster - then provide a file wrapper
binsha, typename, self.size, stream = self.repo.odb.stream(self.binsha) # @UnusedVariable
self._deserialize(BytesIO(stream.read()))
else:
super(Commit, self)._set_cache_(attr)
# END handle attrs
@property
def authored_datetime(self):
return from_timestamp(self.authored_date, self.author_tz_offset)
@property
def committed_datetime(self):
return from_timestamp(self.committed_date, self.committer_tz_offset)
@property
def summary(self):
""":return: First line of the commit message"""
return self.message.split('\n', 1)[0]
def count(self, paths='', **kwargs):
"""Count the number of commits reachable from this commit
:param paths:
is an optional path or a list of paths restricting the return value
to commits actually containing the paths
:param kwargs:
Additional options to be passed to git-rev-list. They must not alter
the output style of the command, or parsing will yield incorrect results
:return: int defining the number of reachable commits"""
# yes, it makes a difference whether empty paths are given or not in our case
# as the empty paths version will ignore merge commits for some reason.
if paths:
return len(self.repo.git.rev_list(self.hexsha, '--', paths, **kwargs).splitlines())
else:
return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
@property
def name_rev(self):
"""
:return:
String describing the commits hex sha based on the closest Reference.
Mostly useful for UI purposes"""
return self.repo.git.name_rev(self)
@classmethod
def
|
iter_items(cls, repo, rev, paths='', **kwargs):
"""Find all commits matching the given criteria.
:param repo: is the Repo
:param rev: revision specifier, see git-rev-parse for viable options
:param paths:
is an optional path or list of paths, if set only Commits that include the path
or paths will be considered
:param kwargs:
optional keyword arguments to git rev-l
|
ist where
``max_count`` is the maximum number of commits to fetch
``skip`` is the number of commits to skip
``since`` all commits since i.e. '1970-01-01'
:return: iterator yielding Commit items"""
if 'pretty' in kwargs:
raise ValueError("--pretty cannot be used as parsing expects single sha's only")
# END handle pretty
# use -- in any case, to prevent possibility of ambiguous arguments
# see https://github.com/gitpython-developers/GitPython/issues/264
args = ['--']
if paths:
args.extend((paths, ))
# END if paths
proc = repo.git.rev_list(rev, args, as_process=True, **kwargs)
return cls._iter_from_process_or_stream(repo, proc)
def iter_parents(self, paths='', **kwargs):
"""Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of self """
# skip ourselves
|
NickyChan/NBASchedule
|
main.py
|
Python
|
gpl-3.0
| 7,776
| 0.010973
|
# -*- coding: utf-8 -*-
# 中文对齐不能用python自带的函数,需要自己根据中文长度增/减空格
# Python 2.7.12 & matplotlib 2.0.0
import re
from urllib2 import *
import matplotlib.pyplot as plt
#Get a set of records from nba.hupu.com due to given team
def getDataSet(team):
statUserAgent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36'
statHeaders = {'User-Agent': statUserAgent}
statRequest = Request('https://nba.hupu.com/schedule/' + team, headers=statHeaders)
statResponse = urlopen(statRequest, timeout = 10)
statHtml = statResponse.read()
#Load Game information using regular expression
statPattern = re.compile(
'''<tr.*?<a.*?>(.*?)</a>.*?>(.*?)</a>.*?<td.*?>(.*?)</td>.*?<td.*?>(.*?)</td>.*?<td.*?>(.*?)</td>.*?</tr>''',
re.S)
#store all the records that corresponds to the RE rule
statDataSet = re.findall(statPattern, statHtml)
return statDataSet
class Record:
# initialization
def __init__(self, team):
self.team = team #Name of the team in Chinese
self.num = 0 #Number of the game
self.place = '' #Home/Road
self.done = True #True is the game is finished, else False
self.result = '' #The result of the game in Chinese
self.opp = '' #Opppnent of the game in Chinese
self.score = '' #The score of 2 teams in string like " XX:XX"
self.scoreSelf = '' #The score of team
self.scoreOpp = '' #The score of opponent
self.scoreDiff = 0 #The difference in scores (positive if win/negative if lose)
self.dateTime = '' #The date and time of this game (Beijing Time) in string
self.date = '' #The date of this game
self.time = '' #The time of this game
# Load seperated data from a list generated by regular expression decomposing
def Load(self, statRecord, count):
#Get the number of this game record
self.num = count
#if this games is unfinished
if statRecord[3].strip() == '-':
self.done = False
#To find out if it is a Home Game or a Road Game
if statRecord[0] == self.team:
self.place = '客'
self.opp = statRecord[1]
else:
self.place = '主'
self.opp = statRecord[0]
#if the game is finished
else:
#Get the result of this game
if statRecord[3].strip() == '胜':
self.result = '胜'
else:
self.result = '负'
if statRecord[0] == self.team:
self.place = '客'
self.opp = statRecord[1]
#Get the score of this game
self.scoreSelf = re.findall(r'^\d+', statRecord[2].strip())[0]
self.scoreOpp = re.findall(r'\d+$', statRecord[2].strip())[0]
self.score = self.scoreSelf + '-' + self.scoreOpp
else:
self.place = '主'
self.opp = statRecord[0]
self.scoreSelf = re.findall(r'\d+$', statRecord[2].strip())[0]
self.scoreOpp = re.findall(r'^\d+', statRecord[2].strip())[0]
self.score = self.scoreOpp + '-' + self.scoreSelf
self.scoreDiff = eval(self.scoreSelf) - eval(self.scoreOpp)
#Get the date and time of this game
self.dateTime = statRecord[4]
self.date = self.dateTime.split()[0]
self.time = self.dateTime.split()[1]
# Print game message
def Print(self):
#Trick here to fill in suitable spaces to align Chinese
spaceExtraSelf = ' '
spaceExtraOpp = ' '
if len(self.team) == 9: spaceExtraSelf = ' '
if len(self.team) == 5: spaceExtraSelf = ' '
if len(self.opp) == 9: spaceExtraOpp = ' '
if len(self.opp) == 5: spaceExtraOpp = ' '
if self.done == True:
if self.place == '客':
print ('Game %2s %s%10s VS %-10s%s %3s : %3s %+4d %s' % (
self.num, spaceExtraSelf, self.team, self.opp, spaceExtraOpp, self.scoreSelf, self.scoreOpp,
self.scoreDiff, self.dateTime))
if self.place == '主':
print ('Game %2s %s%10s VS %-10s%s %3s : %3s %+4d %s' % (
self.num, spaceExtraOpp, self.opp, self.team, spaceExtraSelf, self.scoreOpp, self.scoreSelf,
self.scoreDiff, self.dateTime))
else:
if self.place == '客':
print ('Game %2s %s%10s VS %-10s%s %s' % (
self.num, spaceExtraSelf, self.team, self.opp, spaceExtraOpp, self.dateTime))
if self.place == '主':
print ('Game %2s %s%10s VS %-10s%s %s' % (
self.num, spaceExtraOpp, self.opp, self.team, spaceExtraSelf, self.dateTime))
if __name__ == "__main__":
#Dictionary of team's English and Chinese names
#We need english names to implement url and Chinese name to print in Console
teams = {'spurs': '马刺', 'rockets': '火箭', 'grizzlies': '灰熊', 'pelicans':'鹈鹕', 'mavericks':'小牛',
'warriors': '勇士', 'clippers':'快船', 'kings': '国王', 'lakers': '湖人', 'suns': '太阳',
'jazz': '爵士', 'thunder': '雷霆', 'blazers': '开拓者', 'nuggets': '掘金', 'timberwolves': '森林狼',
'celtics': '凯尔特人', 'raptors': '猛龙', 'knicks': '尼克斯', '76ers': '76人', 'nets': '篮网',
'wizards': '奇才', 'hawks': '老鹰', 'heat': '热火', 'hornets': '黄蜂', 'magic': '魔术',
'cavaliers': '骑士', 'bucks':'雄鹿', 'bulls': '公牛', 'pacers': '步行者', 'pistons': '活塞'}
for team in teams:
#Comment this if and unindent codes below to get all 30 team's results
if team == 'rockets':
statDataSet = getDataSet(team)
countGame = 0
countWin = 0
countLose = 0
streak = ''
streakCount = 0
results = []
#Count Wins and Loses and print every record
for statRecord in statDataSet:
countGame += 1
record = Record(teams[team])
|
record.Load(statRecord, countGame)
if record.done == True:
results.append(record.scoreDiff)
if record.result == '胜':
countWin += 1
else:
countLose += 1
record.Print()
#Reverse the records to check the Win/Lose streak
statDataSet.reverse()
for statRecord in statDataSet:
record = Record(teams[tea
|
m])
record.Load(statRecord, countGame)
if streak == '':
streak = record.result
streakCount = 1
continue
if record.result == streak:
streakCount += 1
else:
break
#Plot results one by one
x = range(0, len(results))
figure = plt.figure()
plt.plot(x, results, 'r-', alpha = 0.6, label = 'dot')
plt.plot(x, results, 'ro', label = 'line')
plt.title(team.upper() + ' +/- Overall' )
plt.xlabel('Game NO.')
plt.ylabel('+/-')
plt.grid(True)
figure.set_size_inches(12,4)
plt.legend(loc = 'upper right')
plt.show()
print('Total : %d W / %d L %d 连%s中' % (countWin, countLose, streakCount, streak))
|
mpolednik/reddit-button-hue
|
app/discovery/bridges.py
|
Python
|
mit
| 649
| 0
|
import threading
import upnp
import nupnp
class DiscoveryThread(threading.Thread):
|
def __init__(self, bridges):
super(DiscoveryThread, self).__init__()
self.bridges = bri
|
dges
self.upnp_thread = upnp.UPnPDiscoveryThread(self.bridges)
self.nupnp_thread = nupnp.NUPnPDiscoveryThread(self.bridges)
def run(self):
self.upnp_thread.start()
self.nupnp_thread.start()
self.upnp_thread.join()
self.nupnp_thread.join()
def discover():
bridges = set()
discovery_thread = DiscoveryThread(bridges)
discovery_thread.start()
discovery_thread.join()
return bridges
|
kohr-h/odl
|
odl/contrib/solvers/functional/__init__.py
|
Python
|
mpl-2.0
| 416
| 0
|
# Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to
|
the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
__all__ = ()
from .nonl
|
ocalmeans_functionals import *
__all__ += nonlocalmeans_functionals.__all__
|
AhmedHani/Python-Neural-Networks-API
|
OptimizationAlgorithms/RPROP.py
|
Python
|
mit
| 310
| 0.003226
|
__author__ =
|
'Ahmed Hani Ibrahim'
from LearningAlgorithm import *
class RPROP(LearningAlgorithm):
def learn(self, learningRate, input, output, network):
"""
:param learningRate:
:param input:
:param output:
|
:param network:
:return:
"""
pass
|
h2oloopan/easymerge
|
EasyMerge/tests/reddit/r2/r2/controllers/promotecontroller.py
|
Python
|
mit
| 34,376
| 0.000698
|
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2013 reddit
# Inc. All Rights Reserved.
###############################################################################
from datetime import datetime, timedelta
from babel.numbers import format_number
import json
import urllib
from pylons import c, g, request
from pylons.i18n import _, N_
from r2.controllers.api import ApiController
from r2.controllers.listingcontroller import ListingController
from r2.controllers.reddit_base import RedditController
from r2.lib import cssfilter, inventory, promote
from r2.lib.authorize import get_account_info, edit_profile, PROFILE_LIMIT
from r2.lib.db import queries
from r2.lib.errors import errors
from r2.lib.media import force_thumbnail, thumbnail_url
from r2.lib.memoize import memoize
from r2.lib.menus import NamedButton, NavButton, NavMenu
from r2.lib.pages import (
LinkInfoPage,
PaymentForm,
PromoteInventory,
PromotePage,
PromoteLinkForm,
PromoteLinkNew,
PromoteReport,
Reddit,
RefundPage,
RenderableCampaign,
Roadblocks,
UploadedImage,
)
from r2.lib.pages.things import wrap_links
from r2.lib.system_messages import user_added_messages
from r2.lib.utils import make_offset_date, to_date, to36
from r2.lib.validator import (
json_validate,
nop,
noresponse,
VAccountByName,
ValidAddress,
validate,
validatedForm,
ValidCard,
ValidIP,
VBid,
VBoolean,
VByName,
VDate,
VDateRange,
VExistingUname,
VFloat,
VImageType,
VInt,
VLength,
VLink,
VLocation,
VModhash,
VOneOf,
VPriority,
VPromoCampaign,
VRatelimit,
VSelfText,
VShamedDomain,
VSponsor,
VSponsorAdmin,
VSponsorAdminOrAdminSecret,
VSubmitSR,
VTitle,
VUploadLength,
VUrl,
)
from r2.models import (
Account,
calc_impressions,
Frontpage,
get_promote_srid,
Link,
Message,
NotFound,
PromoCampaign,
PromotionLog,
PromotionWeights,
PromotedLinkRoadblock,
Subreddit,
)
def campaign_has_oversold_error(form, campaign):
if campaign.priority.inventory_override:
return
target = Subreddit._by_name(campaign.sr_name) if campaign.sr_name else None
return has_oversold_error(form, campaign, campaign.start_date,
campaign.end_date, campaign.bid, campaign.cpm,
target, campaign.location)
def has_oversold_error(form, campaign, start, end, bid, cpm, target, location):
ndays = (to_date(end) - to_date(start)).days
total_request = calc_impressions(bid, cpm)
daily_request = int(total_request / ndays)
oversold = inventory.get_oversold(target or Frontpage, start, end,
daily_request, ignore=campaign,
location=location)
if oversold:
min_daily = min(oversold.values())
available = min_daily * ndays
msg_params = {
'available': format_number(available, locale=c.locale),
'target': target.name if target else 'the frontpage',
'start': start.strftime('%m/%d/%Y'),
'end': end.strftime('%m/%d/%Y'),
}
c.errors.add(errors.OVERSOLD_DETAIL, field='bid',
msg_params=msg_params)
form.has_errors('bid', errors.OVERSOLD_DETAIL)
return True
class PromoteController(RedditController):
@validate(VSponsor())
def GET_new_promo(self):
return PromotePage(title=_("create sponsored link"),
content=PromoteLinkNew()).render()
@validate(VSponsor('link'),
link=VLink('link'))
|
def GET_edit_promo(self, link):
if not link or link.promoted is None:
return self.abort404()
rendered = wrap_links(link, skip=False)
form = PromoteLinkForm(link, rendered)
page = Reddit(title=_("edit sponsored link"), content=form,
show_sidebar=False, extension_handling=False)
return page.render()
# admin only because the route might change
@validate(VSponsorAdmin('campaign'),
campaign=VProm
|
oCampaign('campaign'))
def GET_edit_promo_campaign(self, campaign):
if not campaign:
return self.abort404()
link = Link._byID(campaign.link_id)
return self.redirect(promote.promo_edit_url(link))
@validate(VSponsorAdmin(),
link=VLink("link"),
campaign=VPromoCampaign("campaign"))
def GET_refund(self, link, campaign):
if campaign.link_id != link._id:
return self.abort404()
content = RefundPage(link, campaign)
return Reddit("refund", content=content, show_sidebar=False).render()
@validate(VSponsorAdmin())
def GET_roadblock(self):
return PromotePage(title=_("manage roadblocks"),
content=Roadblocks()).render()
@validate(VSponsor("link"),
link=VLink("link"),
campaign=VPromoCampaign("campaign"))
def GET_pay(self, link, campaign):
# no need for admins to play in the credit card area
if c.user_is_loggedin and c.user._id != link.author_id:
return self.abort404()
if not campaign.link_id == link._id:
return self.abort404()
if g.authorizenetapi:
data = get_account_info(c.user)
content = PaymentForm(link, campaign,
customer_id=data.customerProfileId,
profiles=data.paymentProfiles,
max_profiles=PROFILE_LIMIT)
else:
content = None
res = LinkInfoPage(link=link,
content=content,
show_sidebar=False)
return res.render()
@validate(VSponsorAdminOrAdminSecret('secret'),
start=VDate('startdate'),
end=VDate('enddate'),
link_text=nop('link_text'),
owner=VAccountByName('owner'))
def GET_report(self, start, end, link_text=None, owner=None):
now = datetime.now(g.tz).replace(hour=0, minute=0, second=0,
microsecond=0)
end = end or now - timedelta(days=1)
start = start or end - timedelta(days=7)
links = []
bad_links = []
owner_name = owner.name if owner else ''
if owner:
promo_weights = PromotionWeights.get_campaigns(start, end,
author_id=owner._id)
campaign_ids = [pw.promo_idx for pw in promo_weights]
campaigns = PromoCampaign._byID(campaign_ids, data=True)
link_ids = {camp.link_id for camp in campaigns.itervalues()}
links.extend(Link._byID(link_ids, data=True, return_dict=False))
if link_text is not None:
id36s = link_text.replace(',', ' ').split()
try:
links_from_text = Link._byID36(id36s, data=True)
except NotFound:
links_from_text = {}
bad_links = [id36 for id36 in id36s if id36 not
|
mtrgroup/django-mtr-utils
|
mtr/utils/urls.py
|
Python
|
mit
| 186
| 0
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'mtr.utils.views',
url(r'^model/(?P<nam
|
e>.+)/pk/(?P<pk>\d+)$',
'model_label', name='model_label
|
')
)
|
begea/X-Serv-14.5-Sumador-Simple
|
check.py
|
Python
|
gpl-3.0
| 1,680
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script de comprobación de entrega de ejercicio
Para ejecutarlo, desde la shell:
$ python check.py login_github
"""
import os
import random
import sys
ejercicio = 'X-Serv-14.5-Sumador-Simple'
student_files = [
'servidor-sumador.py'
]
repo_files = [
'check.py',
'README.md',
'LICENSE',
'.gitignore',
'.git'
]
files = student_files + repo_files
if len(sys.argv) != 2:
print
sys.exit("Usage: $ python check.py login_github")
repo_git = "http://github.com/" + sys.argv[1] + "/" + ejercicio
aleatorio = str(int(random.random() * 1000000))
error = 0
print
print "Clonando el repositorio " + repo_git + "\n"
os.system('git clone ' + repo_git + ' /tmp/' + aleatorio + ' > /dev/null 2>&1')
try:
github_file_list = os.listdir('/tmp/' + aleatorio)
except OSError:
error = 1
print "Error: No se ha podido acceder al repositorio " + repo_git + "."
print
sys.exit()
if len(github_file_list) != len(files):
error = 1
print "Error: número de ficheros en el repositorio incorrecto"
for filename in files:
if filename not in github_file_list:
error = 1
print "\tE
|
rror: " + filename + " no encontrado en el repositorio."
if not error:
print "Parece que la entrega se ha realizado bien."
print
print "La salida de pep8 es: (si todo va bien, no ha de mostrar nada)"
print
for filename in student_files:
if filename
|
in github_file_list:
os.system('pep8 --repeat --show-source --statistics /tmp/'
+ aleatorio + '/' + filename)
else:
print "Fichero " + filename + " no encontrado en el repositorio."
print
|
Gandi/pyramid_kvs
|
setup.py
|
Python
|
gpl-2.0
| 1,633
| 0.000612
|
import os
import re
import sys
from setuptools import setup, find_packages
PY3 = sys.version_info[0] == 3
here = os.path.abspath(os.path.dirname(__file__))
name = 'pyramid_kvs'
with open(os.path.join(here, 'README.rst')) as readme:
README = readme.read()
with open(os.path.join(here, 'CHANGES.rst')) as changes:
CHANGES = changes.read()
with open(os.path.join(here, name, '__init__.py')) as v_file:
version = re.compile(r".*__version__ = '(.*?)'",
re.S).match(v_file.read()).group(1)
requires = ['pyramid', 'redis >= 3.0']
if PY3:
requires.
|
append('python3-memcached')
else:
requires.append('python-memcached')
tests_require = ['nose', 'coverage']
if sys.version_info < (2, 7):
tests_require += ['unittest2']
extras_require = {'test': tests_require}
setup(name=name.replace('_', '-'),
version=version,
description='Session and cache for Pyramid',
long_description=README + '\n\n' + CHANGES,
classifiers=[
|
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
author='Gandi',
author_email='[email protected]',
url='https://github.com/Gandi/pyramid_kvs',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='{0}.tests'.format(name),
install_requires=requires,
tests_require=tests_require,
extras_require=extras_require
)
|
udbhav/kishore
|
kishore/tests/music.py
|
Python
|
mit
| 2,056
| 0.002432
|
from django.core.urlresolvers import reverse
from kishore.models import Artist, Song, Release
from base import KishoreTestCase
class ArtistTestCase(KishoreTestCase):
def test_index(self):
resp = self.client.get(reverse('kishore_artists_index'))
self.assertEqual(resp.status_code, 200)
def test_detail(self):
a = Artist.objects.get(pk=1)
resp = self.client.get(a.get_absolute_url())
self.assertEqual(resp.status_code, 200)
class SongTestCase(KishoreTestCase):
def test_index(self):
resp = self.client.get(reverse('kishore_songs_index'))
self.assertEqual(resp.status_code, 200)
def test_detail(self):
s = Song.objects.get(pk=1)
resp = self.client.get(s.get_absolute_url())
self.assertEqual(resp.status_code, 200)
def test_player_html(self):
with self.settings(KISHORE_AUDIO_PLAYER="kishore.models.SoundcloudPlayer"):
s = Song.objects.get(pk=1)
self.assertTrue(s.get_player_html())
# try non-streamable song
s = Song.objects.get(pk=2)
self.assertFalse(s.get_player_html())
def test_download_link(self):
s = Song.objects.get(pk=1)
self.assertTrue(s.download_link())
# try non-downloadable song
s = Song.objects.get(pk=2)
self.assertFalse(s.download_link())
class ReleaseTestCase(KishoreTestCase):
def test_index(self):
resp = self.client.get(reverse('kishore_releases_index'))
self.assertEqual(resp.status_code, 200)
def test_detail(self):
r = Release.objects.get(pk=1)
resp = self.client.get(r.get_absolute_url())
self.assertEqual(resp.status_code, 200)
def test_player_html(self):
|
with self.settings(KISHORE_AUDIO_PLAYER="kishore.models.SoundcloudPlayer"):
r = Release.objects.get(pk=1)
self.assertTrue(r.get_player_html())
# try non-streamable
r
|
= Release.objects.get(pk=2)
self.assertFalse(r.get_player_html())
|
msegado/edx-platform
|
lms/djangoapps/monitoring/__init__.py
|
Python
|
agpl-3.0
| 41
| 0
|
"""
LMS speci
|
fic monitoring helpers.
"""
| |
etherkit/OpenBeacon2
|
macos/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/rthooks/pyi_rth_enchant.py
|
Python
|
gpl-3.0
| 968
| 0.004132
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: Apache-2.0
#------------------------------------------------------
|
-----------------------
import os
import sys
# On Mac OS X tell enchant library where to look for enchant backends (aspell, myspell, ...).
# Enchant is looking for backends in directory 'PREFIX/lib/enchant'
# Note: env. var. ENCHANT_PREFIX_DIR is implemented only in the development version:
# https://github.com/AbiWord/enchant
# https://git
|
hub.com/AbiWord/enchant/pull/2
# TODO Test this rthook.
if sys.platform.startswith('darwin'):
os.environ['ENCHANT_PREFIX_DIR'] = os.path.join(sys._MEIPASS, 'enchant')
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/chronos/autots/model/auto_seq2seq.py
|
Python
|
apache-2.0
| 4,638
| 0.002803
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.orca.automl.model.base_pytorch_model import PytorchModelBuilder
from zoo.orca.automl.auto_estimator import AutoEstimator
from zoo.chronos.model.Seq2Seq_pytorch import model_creator
from .base_automodel import BasePytorchAutomodel
class AutoSeq2Seq(BasePytorchAutomodel):
def __init__(self,
input_feature_num,
output_target_num,
past_seq_len,
future_seq_len,
optimizer,
loss,
metric,
lr=0.001,
lstm_hidden_dim=128,
lstm_layer_num=2,
dropout=0.25,
teacher_forcing=False,
backend="torch",
logs_dir="/tmp/auto_seq2seq",
cpus_per_trial=1,
name="auto_seq2seq",
remote_dir=None,
):
"""
Create an AutoSeq2Seq.
:param input_feature_num: Int. The number of features in the input
:param output_target_num: Int. The number of targets in the output
:param past_seq_len: Int. The number of historical steps used for forecasting.
:param future_seq_len: Int. The number of future steps to forecast.
:param optimizer: String or pyTorch optimizer creator function or
tf.keras optimizer instance.
:param loss: String or pytorch/tf.keras loss instance or pytorch loss creator function.
:param metric: String. The evaluation metric name to optimize. e.g. "mse"
:param lr: float or hp sampling function from a float space. Learning rate.
e.g. hp.choice([0.001, 0.003, 0.01])
:param lstm_hidden_d
|
im: LSTM hidden channel for decoder and encoder.
hp.grid_search([32, 64, 128])
:param lstm_layer_num: LSTM la
|
yer number for decoder and encoder.
e.g. hp.randint(1, 4)
:param dropout: float or hp sampling function from a float space. Learning rate. Dropout
rate. e.g. hp.uniform(0.1, 0.3)
:param teacher_forcing: If use teacher forcing in training. e.g. hp.choice([True, False])
:param backend: The backend of the Seq2Seq model. We only support backend as "torch"
for now.
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_seq2seq"
:param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1.
:param name: name of the AutoSeq2Seq. It defaults to "auto_seq2seq"
:param remote_dir: String. Remote directory to sync training results and checkpoints. It
defaults to None and doesn't take effects while running in local. While running in
cluster, it defaults to "hdfs:///tmp/{name}".
"""
super().__init__()
# todo: support search for past_seq_len.
# todo: add input check.
if backend != "torch":
raise ValueError(f"We only support backend as torch. Got {backend}")
self.search_space = dict(
input_feature_num=input_feature_num,
output_feature_num=output_target_num,
past_seq_len=past_seq_len,
future_seq_len=future_seq_len,
lstm_hidden_dim=lstm_hidden_dim,
lstm_layer_num=lstm_layer_num,
lr=lr,
dropout=dropout,
teacher_forcing=teacher_forcing
)
self.metric = metric
model_builder = PytorchModelBuilder(model_creator=model_creator,
optimizer_creator=optimizer,
loss_creator=loss,
)
self.auto_est = AutoEstimator(model_builder=model_builder,
logs_dir=logs_dir,
resources_per_trial={"cpu": cpus_per_trial},
remote_dir=remote_dir,
name=name)
|
GadgeurX/NetworkLiberator
|
Daemon/Utils.py
|
Python
|
gpl-3.0
| 1,119
| 0.007149
|
import netifaces
from netaddr import *
for inter in netifaces.interfaces():
addrs = netifaces.ifaddresses(inter)
try:
print(addrs)
print(addrs[netifaces.AF_INET][0]["addr"])
print(addrs[netifaces.AF_INET][0]["broadcast"])
print(addrs[netifaces.AF_INET][0]["netmask"])
local_ip = addrs[netifaces.AF_INET][0]["addr"]
broadcast = addrs[netifaces.AF_INET][0]["broadcast"]
netmask = addrs[netifaces.AF_INET][0]["netmask"]
mac = addrs[netifaces.AF_LINK][0]["addr"]
gws = netifaces.gateways()
gateway = gws['default'][netifaces.AF_INET][0]
interface = inter
ips = []
for ip in IPNetwork(broadcast + '/' + str(IPNetwork('0.0.0.0/' + netmask).pref
|
ixlen)).iter_hosts():
ips.append(str(ip))
except:
print("Error")
def get_lan_ip():
global local_ip
return local_ip
def get_broadcast_ip():
global broadcast
return broadcast
def get_all_
|
ips():
global ips
return ips
def get_gateway():
global gateway
return gateway
def get_mac():
global mac
return mac
|
DES-SL/EasyLens
|
easylens/Scripts/des_script.py
|
Python
|
mit
| 1,168
| 0.006849
|
__author__ = 'sibirrer'
#this file is ment to be a shell script to be run with Monch cluster
# set up the scene
from cosmoHammer.util.MpiUtil import MpiPool
import time
import sys
import pickle
import dill
start_time = time.time()
#path2load = '/mnt/lnec/sibirrer/input.txt'
path2load = str(sys.argv[1])
f = open(path2load, 'rb')
[lensDES, walkerRatio, n_burn, n_run, mean_start, sigma_start, lowerLimit, upperLimit, path2dump] = dill.load(f)
f.close()
end_time = time.time()
#print end_time - start_time, 'time used for initialisation'
# run the computation
from easylens.Fitting.mcmc import MCMC_sampler
sampler = MCMC_sampler(lensDES, fix_center=False)
samples = sampler.mcmc_CH(walkerRatio, n_run, n_burn, mean_start, sigma_start, lowerLimit, upperLimit, threadCount=1, init_pos=None, mpi_monch=True)
# save the output
pool = MpiPool(None)
if pool
|
.isMaster():
f = open(path2dump, 'wb')
pickle.dump(samples, f)
f.close()
end_time = time.time()
print(end_time - start_time, 'total time needed for computation')
print('R
|
esult saved in:', path2dump)
print('============ CONGRATULATION, YOUR JOB WAS SUCCESSFUL ================ ')
|
SCM-NV/qmworks-namd
|
scripts/pyxaid/plot_average_energy.py
|
Python
|
mit
| 3,318
| 0
|
#! /usr/bin/env python
"""
This program plots the average electronic energy during a NAMD simulatons
averaged over several initial conditions.
It plots both the SH and SE population based energies.
Example:
plot_average_energy.py -p . -nstates 26 -nconds 6
Note that the number of states is the same as given in the pyxaid output.
It must include the ground state as well.
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import argparse
def plot_stuff(outs, pops):
"""
energies - a vector of energy values that can be plotted
"""
dim_x = np.arange(outs.shape[0])
plot = np.column_stack((outs, pops))
plt.xlabel('Time (fs)')
plt.ylabel('Energy (eV)')
plt.plot(dim_x, plot[:, 0:])
fileName = "Average_Energy.png"
plt.show()
plt.savefig(fileName, format='png', dpi=300)
def read_energies(path, fn, nstates, nconds):
inpfile = os.path.join(path, fn)
cols = tuple(range(
|
5, nstates * 2 + 5, 2))
xs = np.stack(np.loadtxt(f'{inpfile}{j}', usecols=cols)
for j in range(nconds)).transpose()
# Rows = timeframes ; Columns = states ; tensor = initial conditions
xs = xs.swapaxes(0, 1)
return xs
def read_pops(path, fn, nstates, nconds):
inpfile = os.path.join(path, fn)
cols
|
= tuple(range(3, nstates * 2 + 3, 2))
xs = np.stack(np.loadtxt(f'{inpfile}{j}', usecols=cols)
for j in range(nconds)).transpose()
# Rows = timeframes ; Columns = states ; tensor = initial conditions
xs = xs.swapaxes(0, 1)
return xs
def main(path_output, nstates, nconds):
outs = read_pops(path_output, 'out', nstates, nconds)
pops = read_pops(path_output, 'me_pop', nstates, nconds)
energies = read_energies(path_output, 'me_energies', nstates, nconds)
# Weighted state energy for a given SH or SH population at time t
eav_outs = energies * outs
eav_pops = energies * pops
# Ensamble average over initial conditions of the electronic energy
# as a function of time
el_ene_outs = np.average(np.sum(eav_outs, axis=1), axis=1)
el_ene_pops = np.average(np.sum(eav_pops, axis=1), axis=1)
# Ensamble average scaled to the lowest excitation energy.
# This way the cooling converge to 0.
lowest_hl_gap = np.average(np.amin(energies[:, 1:, :], axis=1), axis=1)
ene_outs_ref0 = el_ene_outs - lowest_hl_gap
ene_pops_ref0 = el_ene_pops - lowest_hl_gap
plot_stuff(ene_outs_ref0, ene_pops_ref0)
def read_cmd_line(parser):
"""
Parse Command line options.
"""
args = parser.parse_args()
attributes = ['p', 'nstates', 'nconds']
return [getattr(args, p) for p in attributes]
# ============<>===============
if __name__ == "__main__":
msg = "plot_states_pops -p <path/to/output>\
-nstates <number of states computed>\
-nconds <number of initial conditions>"
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('-p', required=True,
help='path to the Hamiltonian files in Pyxaid format')
parser.add_argument('-nstates', type=int, required=True,
help='Number of states')
parser.add_argument('-nconds', type=int, required=True,
help='Number of initial conditions')
main(*read_cmd_line(parser))
|
nanjj/softlayer-python
|
SoftLayer/CLI/firewall/detail.py
|
Python
|
mit
| 1,514
| 0
|
"""Detail firewall."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import firewall
from SoftLayer.CLI import formatting
from SoftLayer import utils
@click.command()
@click.argument('identifier')
@environment.pass_env
def cli(env, identifier):
"""Detail firewall."""
mgr = SoftLayer.FirewallManager(env.client)
firewall_type, firewall_id = firewall.parse_id(identifier)
if firewall_type == 'vlan':
rules = mgr.get_dedicated_fwl_rules(firewall_id)
else:
rules = mgr.get_standard_fwl_rules(firewall_id)
env.fout(get_rules_table(rules))
def get_rules_table(rules):
"""Helper to format the rules into a table.
:param list rules: A list containing the rules of
|
the firewall
:returns: a formatted table of the firewall rules
"""
table = formatting.Table(['#', 'action', 'protocol', 'src_ip', 'src_mask',
'dest', 'dest_mask'])
table.sortby = '#'
for rule in rules:
table.add_row([
rule['orderValue'],
rule['action'],
rule['protocol'],
rule['sourceIpAddress'],
utils.lookup(rule, 'sourceIpSubnetMask'),
'%s:%s-%s' % (rule['destinationIpAd
|
dress'],
rule['destinationPortRangeStart'],
rule['destinationPortRangeEnd']),
utils.lookup(rule, 'destinationIpSubnetMask')])
return table
|
TheIoTLearningInitiative/CodeLabs
|
Sandbox/Edison_Bluetooth/projects/gardening-system/spp.py
|
Python
|
apache-2.0
| 3,138
| 0.015296
|
#!/usr/bin/python
# Python modules imports
from optparse import OptionParser, make_option
import pyupm_grove as g
import os, sys, socket, uuid, dbus, dbus.service
import dbus.mainloop.glib
#import gardening_system
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
# Set up constants
BUS_NAME = 'org.bluez'
AGENT_INTERFACE = 'org.bluez.Agent1'
PROFILE_INTERFACE = 'org.bluez.Profile1'
# Trusted device function
def set_trusted(path):
props = dbus.Interface(bus.get_object("org.bluez", path), "org.freedesktop.DBus.Properties")
props.Set("org.bluez.Device1", "Trusted", True)
# Agent class
class Agent(dbus.service.Object):
@dbus.service.method(AGENT_INTERFACE, in_signature="ou", out_signature="")
def RequestConfirmation(self, device, passkey):
print("\nEnsure this passkey matches with the one in your device: %06d\nPress [ENTER] to continue" % passkey)
set_trusted(device)
return
#Profile class
class Profile(dbus.service.Object):
fd = -1
@dbus.service.method(PROFILE_INTERFACE, in_signature="oha{sv}", out_signature="")
def NewConnection(self, path, fd, properties):
self.fd = fd.take()
device_path = os.path.basename(path)
print("\nConnected to %s\nPress [ENTER] to continue" % device_path)
server_sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.settimeout(1)
server_sock.send("Hello, this is Edison!")
try:
|
while True:
try:
data = server_sock.recv(1024)
gardening_system.function(data)
if data == 'b':
server_sock.send(gardening_system.requestData())
except socket.timeout:
pass
|
gardening_system.myProgram()
except IOError:
pass
server_sock.close()
print("\nYour device is now disconnected\nPress [ENTER] to continue")
def bluetoothConnection():
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
obj = bus.get_object(BUS_NAME, "/org/bluez");
profile_manager = dbus.Interface(obj, "org.bluez.ProfileManager1")
profile_path = "/foo/bar/profile"
auto_connect = {"AutoConnect": False}
profile_uuid = "1101"
profile = Profile(bus, profile_path)
profile_manager.RegisterProfile(profile_path, profile_uuid, auto_connect)
mainloop = GObject.MainLoop()
mainloop.run()
if __name__ == '__main__':
# Generic dbus config
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
obj = bus.get_object(BUS_NAME, "/org/bluez");
# Agent config
agent_capability = "KeyboardDisplay"
agent_path = "/test/agent"
agent = Agent(bus, agent_path)
agent_manager = dbus.Interface(obj, "org.bluez.AgentManager1")
agent_manager.RegisterAgent(agent_path, agent_capability)
agent_manager.RequestDefaultAgent(agent_path)
# Mainloop
mainloop = GObject.MainLoop()
mainloop.run()
|
thinkl33t/mqtt2telegram
|
scripts/test.py
|
Python
|
lgpl-3.0
| 54
| 0.018519
|
#!../venv/bin/python
import sys
print (sys.argv[1
|
:])
| |
Osmose/kitsune
|
kitsune/search/tests/test_search_advanced.py
|
Python
|
bsd-3-clause
| 31,926
| 0
|
import json
from datetime import datetime, timedelta
from django.contrib.contenttypes.models import ContentType
from nose.tools import eq_
from kitsune import search as constants
from kitsune.access.tests import permission
from kitsune.forums.tests import forum, post, restricted_forum, thread
from kitsune.products.tests import product, topic
from kitsune.questions.tests import question, answer, answervote, questionvote
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.sumo.tests import LocalizingClient
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import group, user
from kitsune.wiki.tests import document, revision, helpful_vote
class AdvancedSearchTests(ElasticTestCase):
client_class = LocalizingClient
def test_json_format(self):
"""JSON without callback should return application/json"""
response = self.client.get(reverse('search'), {
'q': 'bookmarks',
'a': '1',
'format': 'json',
}, follow=True)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/json')
def test_json_callback_validation(self):
"""Various json callbacks -- validation"""
response = self.client.get(reverse('search'), {
'q': 'bookmarks',
'a': '1',
'format': 'json',
'callback': 'callback',
}, follow=True)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/x-javascript')
def test_json_empty_query_a_1(self):
"""Empty query returns JSON format"""
response = self.client.get(reverse('search'), {
'format': 'json', 'a': 1,
}, follow=True)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/json')
def test_json_empty_query_a_2(self):
"""Empty query asking for form returns 400"""
# Test with flags for advanced search or not
response = self.client.get(reverse('search'), {
'format': 'json', 'a': 2,
}, follow=True)
eq_(response.status_code, 400)
eq_(response['Content-Type'], 'application/json')
def test_search_products(self):
p = product(title=u'Product One', slug='product', save=True)
doc1 = document(title=u'cookies', locale='en-US', category=10,
save=True)
revision(document=doc1, is_approved=True, save=True)
doc1.products.add(p)
doc1.save()
self.refresh()
response = self.client.get(
reverse('search.advanced'),
{'a': '1', 'product': 'product', 'q': 'cookies', 'w': '1'})
assert "We couldn't find any results for" not in response.content
eq_(200, response.status_code)
assert 'Product One' in response.content
def test_search_multiple_products(self):
p = product(title=u'Product One', slug='product-one', save=True)
p2 = product(title=u'Product Two', slug='product-two', save=True)
doc1 = document(title=u'cookies', locale='en-US', category=10,
save=True)
revision(document=doc1, is_approved=True, save=True)
doc1.products.add(p)
doc1.products.add(p2)
doc1.save()
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'a': '1',
'product': ['product-one', 'product-two'],
'q': 'cookies',
'w': '1',
})
assert "We couldn't find any results for" not in response.content
eq_(200, response.status_code)
assert 'Product One, Product Two' in response.content
def test_wiki_no_query(self):
"""Tests advanced search with no query"""
doc = document(locale=u'en-US', category=10, save=True)
doc.tags.add(u'desktop')
revision(document=doc, is_approved=True, save=True)
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'q': '', 'tags': 'desktop', 'w': '1', 'a': '1',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
def test_questions_sortby(self):
"""Tests advanced search for questions with a sortby"""
question(title=u'tags tags tags', save=True)
self.refresh()
# Advanced search for questions with sortby set to 3 which is
# '-replies' which is different between Sphinx and ES.
response = self.client.get(reverse('search.advanced'), {
'q': 'tags', 'tags': 'desktop', 'w': '2', 'a': '1', 'sortby': '3',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
def test_sortby_documents_helpful(self):
"""Tests advanced search with a sortby_documents by helpful"""
r1 = revision(is_approved=True, save=True)
r2 = revision(is_approved=True, save=True)
helpful_vote(revision=r2, helpful=True, save=True)
# Note: We have to wipe and rebuild the index because new
# helpful_votes don't update the index data.
self.setup_indexes()
self.reindex_and_refresh()
# r2.document should come first with 1 vote.
response = self.client.get(reverse('search.advanced'), {
'w': '1', 'a': '1', 'sortby_documents': 'helpful',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(r2.document.title, content['results'][0]['title'])
# Vote twice on r1, now it should come first.
helpful_vote(revision=r1, helpful=True, save=True)
helpful_vote(revision=r1, helpful=True, save=True)
self.setup_indexes()
self.reindex_and_refresh()
response = self.client.get(reverse('search.advanced'), {
'w': '1', 'a': '1', 'sortby_documents': 'helpful',
'format': 'json'})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(r1.document.title, content['results'][0]['title'])
def test_questions_num_votes(self):
"""Tests advanced search for questions num_votes filter"""
q = question(title=u'tags tags tags', save=True)
# Add two question votes
questionvote(question=q, save=True)
questionvote(question=q, save=True)
self.refresh()
# Advanced search for questions with num_votes > 5. The above
# question should be not in this set.
response = self.client.get(reverse('search.advanced'), {
'q': '', 'tags': 'desktop', 'w': '2', 'a': '1',
'num_voted': 2, 'num_votes': 5,
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 0)
# Advanced search for questions with num_votes < 1. The above
# question should be not in this set.
response = self.client.get(reverse('search.advanced'), {
'q': '', 'tags': 'desktop', 'w': '2', 'a': '1',
'num_voted': 1, 'num_votes': 1,
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 0)
|
def test_num_votes_none(self):
"""Tests num_voted filtering where num_votes is ''"""
q = question(save=True)
questionvote(question=q, save=True)
self.refresh()
qs = {'q': '', 'w': 2, 'a': 1, 'num_voted': 2, 'num_votes': ''}
response = self.client.get(reverse('search.advanced'), qs)
eq_(200, response.status_code)
def test_forums_search(self):
"""This tests whether forum posts show up in searches"""
thread1 =
|
thread(title=u'crash', save=True)
post(thread=thread1, save=True)
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '0', 'created_date': '',
|
paulovn/artifact-manager
|
lib/artmgr/transport/local.py
|
Python
|
gpl-2.0
| 4,422
| 0.015604
|
# ********************************************************************** <====
from artmgr.transport.basew import BaseWTransport
# ********************************************************************** ====>
import os
import sys
import errno
import stat
import re
# chunksize for reading/writing local files
CHUNK = 8192
# ---------------------------------------------------------------------
def mkpath_recursive(path):
"""Test a local path and, if it does not exist, create it recursively"""
try:
mode = os.stat( path ).st_mode
if not stat.S_ISDIR(mode):
raise InvalidArgumentError("parent path '"+str(path)+"' not a dir")
except OSError as e:
if e.errno != errno.ENOENT:
raise
(head,tail) = os.path.split( path )
if head:
mkpath_recursive( head )
os.mkdir( path )
# ---------------------------------------------------------------------
class LocalTransport( BaseWTransport ):
"""
A full R/W transport instance that uses a locally visible directory to
store and read all artifact data
"""
def __init__( self, basedir, subrepo ):
"""
Constructor
@param basedir (str): local folder to use
@param subrepo (str): name of the repository we are dealing with
"""
if not basedir:
raise InvalidArgumentError("Empty basedir in local transport")
if not subrepo:
raise InvalidArgumentError("Empty subrepo in local transport")
self._basedir = os.path.join(basedir,subrepo)
super(LocalTransport,self).__init__()
def init_base( self ):
"""Ensure the base path for the repository exists"""
mkpath_recursive( self._basedir )
def get( self, sourcename, dest ):
"""
Read a file into a file-like destination.
@param sourcename (str): name of the file in remote repo
@param dest (file): an object with a write() method
@return (bool): \c True if ok, \c False if the file does not exist
"""
name = os.path.join(self._basedir,sourcename)
try:
with open(name, 'rb') as f:
while True:
bytes = f.read( CHUNK )
if not bytes:
break
dest.write( bytes )
return True
except IOError as e:
if e.errno == errno.ENOENT:
return False
raise
def otype( self, path ):
"""
Given the path of am object, return:
* 'F' for a file,
* 'D' for a directory,
* \c None if the path does not exist
"""
oldname = os.path.join(self._basedir,path)
try:
mode = os.stat( oldname ).st_mode
except OSError as e:
if e.errno == errno.ENOENT:
return None
raise
return 'D' if stat.S_ISDIR(mode) else 'F' if stat.S_ISREG(mode) else '?'
def put( self, source, destname ):
"""
Store a file. If a file with the same name existed, it is overwritten
@param source (file): an object with a read() method
@param destname (str): name of the destination file,
relative to repo base directory
"""
name = os.path.join(self._basedir,destname)
with open(name, 'wb') as f:
while True:
bytes = source.read( CHUNK )
if not bytes:
break
f.write( bytes )
def delete( self, filename ):
"""
Delete a file
"""
name = os.path.join(self._basedir,filename)
os.unlink( name )
def rename( self, oldname, newname ):
"""
Rename a file into a new name and/or folder
"""
oldname = os.path.join(self._basedir,oldname)
newname = os.path.join(self._basedir,newname)
os.rename( oldname, newname )
def folder_cre
|
ate( self, path ):
"""
Make a folder in the repository, assuming all parent folders exist
"""
os.mkdir( os.path.join(self._basedir,path) )
def folder_list( self, path ):
"""
Return the list of all components (files & folders) in a folder
*This method is optional*
"""
return os.listdir( os.path.join(self._basedir,pa
|
th) )
|
haiy/XF_PRISM
|
src/XF-Prism/rc_generator.py
|
Python
|
gpl-3.0
| 1,587
| 0.028986
|
#author :haiyfu
#date:April 14
#description:
#contact:[email protected]
"""
This little part is to check how many different values in
a column and store the unqiue values in a list.
For FCBF initially.
The last column is the class .
"""
from sys import argv
#only count the target file and return
#a list structure which contains the detail
#information,like [23, [[1,23],[11,233]], 34 ]
#Here is the correspond meanings
#[attribure_number,[ [first-column-different-values] [2nd-df-val] ],line_num]
def rc_gn(sn):
fin=o
|
pen(sn)
atrn=len(fin.readline().split(","))
#Initialize the result list
fin.seek(0,0)
rc=[]
rc.append(atrn)
rc.append([])
l=fin.readline().strip("\r \n ").split(",")
for x in l:
rc[1]
|
.append([x])
count=0
for l in fin:
l=l.strip("\n \r").split(",")
idx=0
if(len(l)<rc[0]):
break
for x in l:
if x not in rc[1][idx]:
rc[1][idx].append(x)
rc[1][idx].sort()
idx=idx+1
count=count+1
#print rc
rc.append(count+1)
fin.close()
return rc
def wrt_rc(rc,tn):
#print rc
ft=open(tn,"w")
#class info
ft.write(str(len(rc[1][-1]))+","+",".join(rc[1][-1])+".\n" )
#attribure number
ft.write(str( rc[0]-1 )+"\n")
#every attribure info
for x in range(rc[0]-1):
sl="A"+str(x+1)+" - "+",".join(rc[1][x])+".\n"
ft.write(sl)
ft.close()
if __name__=="__main__":
script_nm,src_file,out_file=argv
wrt_rc(rc_gn(src_file),out_file)
|
PAIR-code/lit
|
lit_nlp/components/scrambler.py
|
Python
|
apache-2.0
| 2,899
| 0.00276
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Simple scrambling test generator."""
import copy
import random
from typing import List, Text, Optional
from lit_nlp.api import components as lit_components
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import model as lit_model
from lit_nlp.api import types
from lit_nlp.lib import utils
JsonDict = types.JsonDict
FIELDS_TO_SCRAMBLE_KEY = 'Fields to scramble'
class Scrambler(lit_components.Generator):
"""Scramble all words in an example to generate a new example."""
@staticmethod
def scramble(val: Text) -> Text:
words = val.split(' ')
random.shuffle(words)
return ' '.join(words)
def config_spec(self) -> types.Spec:
return {
FIELDS_TO_SCRAMBLE_KEY:
types.MultiFieldMatcher(
spec='input',
types=['TextSegment'],
select_all=True),
}
def generate(self,
example: JsonDict,
model: lit_model.Model,
dataset: lit_dataset.Dataset,
config: Optional[JsonDict] = None) -> List[JsonDict]:
"""Naively scramble all words in an example.
Note: Even if more than one field is to be scrambled, only a single example
will be produced, unlike other generators which will produce multiple
examples, one per field.
Args:
example: the example used for basis of generated examples.
model: the model.
dataset: the dataset.
config: user-provided config properties.
Returns:
examples: a list of generated examples.
"""
del model # Unused.
config = config or {}
# If config key is missing, generate no examples.
fields_to_scramble = list(config.get(FIELDS_TO_SCRAMBLE_KEY, []))
if not fields_to_scramble:
return []
# TODO(lit-dev): move this to generate_all(), so we read the spec once
# instead of on every example.
text_keys = utils.find_spec_keys(dataset.spec(), types.TextSegment)
if not text_keys:
return []
text_keys = [key for key in text_keys if
|
key in fields_to_scramble]
|
new_example = copy.deepcopy(example)
for text_key in text_keys:
new_example[text_key] = self.scramble(example[text_key])
return [new_example]
|
Tigge/platinumshrimp
|
utils/str_utils.py
|
Python
|
mit
| 1,370
| 0.00073
|
import re
import html
# The regular string.split() only takes a max number of splits,
# but it won't unpack if there aren't enough values.
# This function ensures that we always get the wanted
# number of returned values, even if the string doesn't include
# as many splits values as we want, simply by filling in extra
# empty strings at the end.
#
# Some examples:
# split("a b c d", " ", 3) = ["a", "b", "c d"]
# split("a b c" , " ", 3) = ["a", "b", "c"]
# split("a b", " ", 3) = ["a", "b", ""]
def split(s, sep, count):
return
|
(s + ((count - 1 - s.count(sep)) * sep)).split(sep, count - 1)
# Sanitize a s
|
tring by removing all new lines and extra spaces
def sanitize_string(s):
return " ".join(s.split()).strip()
# Unescape HTML/XML entities
def unescape_entities(text):
def replace_entity(match):
try:
if match.group(1) in html.entities.name2codepoint:
return chr(html.entities.name2codepoint[match.group(1)])
elif match.group(1).lower().startswith("#x"):
return chr(int(match.group(1)[2:], 16))
elif match.group(1).startswith("#"):
return chr(int(match.group(1)[1:]))
except (ValueError, KeyError):
pass # Fall through to default return
return match.group(0)
return re.sub(r"&([#a-zA-Z0-9]+);", replace_entity, text)
|
marteinn/The-Big-Username-Blacklist-Python
|
setup.py
|
Python
|
mit
| 1,848
| 0.001083
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
import re
from setuptools import setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
packages = [
"the_big_username_blacklist"
]
# Handle requirements
install_requires = []
tests_requires = [
"pytest==3.0.5",
]
# Convert markdown to rst
try:
from pypandoc import convert
long_description = convert("README.md", "rst")
except:
long_description = ""
version = ''
with io.open('the_big_username_blacklist/__init__.py', 'r', encoding='utf-8') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name="the_big_username_blacklist",
version=version,
|
description="Validate usernames against a black
|
list", # NOQA
long_description=long_description,
author="Martin Sandström",
author_email="[email protected]",
url="https://github.com/marteinn/the-big-username-blacklist-python",
packages=packages,
package_data={"": ["LICENSE", ], "the_big_username_blacklist": ["*.txt"]},
package_dir={"the_big_username_blacklist": "the_big_username_blacklist"},
include_package_data=True,
install_requires=install_requires,
license="MIT",
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: PyPy"
],
)
|
dmccloskey/SBaaS_COBRA
|
SBaaS_COBRA/stage02_physiology_graphData_execute.py
|
Python
|
mit
| 4,655
| 0.017401
|
#SBaaS
from .stage02_physiology_graphData_io import stage02_physiology_graphData_io
from SBaaS_models.models_COBRA_execute import models_COBRA_execute
from .stage02_physiology_analysis_query import stage02_physiology_analysis_query
#System
import copy
class stage02_physiology_graphData_execute(stage02_physiology_graphData_io):
def execute_findShortestPaths(self,
analysis_id_I,
algorithms_params_I,
nodes_startAndStop_I,
exclusion_list_I=[],
weights_I=[],
):
'''
compute the shortest paths
INPUT:
model_id_I
algorithms_params_I
nodes_startAndStop_I
simulation_id_I
exclusion_list_I
OUTPUT:
'''
exCOBRA01 = models_COBRA_execute(self.session,self.engine,self.settings);
exCOBRA01.initialize_supportedTables();
physiology_analysis_query = stage02_physiology_analysis_query(self.session,self.engine,self.settings);
physiology_analysis_query.initialize_supportedTables();
data_O=[];
data_graphs_O=[];
rows = physiology_analysis_query.getJoin_analysisID_dataStage02PhysiologyAnalysisAndSimulation(analysis_id_I);
for row in rows:
weights = [];
if type(weights_I)==type([]):
weights = weights_I;
weights_str = '[]';
elif type(weights_I)==type(''):
if weights_I == 'stage02_physiology_sampledData_query':
weights = self.import_graphWeights_sampledData(row['simulation_id']);
weights_str = 'stage02_physiology_sampledData_query';
elif weights_I ==
|
'stage02_physiology_simulatedDat
|
a_query':
weights = self.import_graphWeights_simulatedData(row['simulation_id']);
weights_str = 'stage02_physiology_simulatedData_query';
else:
print('weights source not recognized');
# run the analysis for different algorithms/params
for ap in algorithms_params_I:
shortestPaths = exCOBRA01.execute_findShortestPath_nodes(
row['model_id'],
nodes_startAndStop_I = nodes_startAndStop_I,
algorithm_I=ap['algorithm'],
exclusion_list_I=exclusion_list_I,
params_I=ap['params'],
weights_I=weights
)
for sp in shortestPaths:
tmp = {};
tmp['analysis_id']=analysis_id_I
tmp['simulation_id']=row['simulation_id']
tmp['weights']=weights_str;
tmp['used_']=True;
tmp['comment_']=None;
tmp['params']=sp['params']
tmp['path_start']=sp['start']
tmp['algorithm']=sp['algorithm']
tmp['path_stop']=sp['stop']
tmp1 = copy.copy(tmp);
tmp1['path_n']=sp['path_n']
tmp1['path_iq_1']=sp['path_iq_1']
tmp1['path_var']=sp['path_var']
tmp1['path_ci_lb']=sp['path_ci_lb']
tmp1['path_cv']=sp['path_cv']
tmp1['path_iq_3']=sp['path_iq_3']
tmp1['path_ci_ub']=sp['path_ci_ub']
tmp1['path_average']=sp['path_average']
tmp1['path_max']=sp['path_max']
tmp1['path_median']=sp['path_median']
tmp1['path_ci_level']=sp['path_ci_level']
tmp1['path_min']=sp['path_min']
data_O.append(tmp1);
for path in sp['all_paths']:
tmp2 = copy.copy(tmp);
tmp2['paths']=path;
data_graphs_O.append(tmp2);
#for sp in shortestPaths:
#dict_keys(['stop', 'params', 'path_n', 'all_paths', 'path_iq_1', 'path_var', 'path_ci_lb', 'path_cv', 'path_iq_3', 'path_ci_ub', 'path_average', 'path_max', 'path_median', 'start', 'algorithm', 'path_ci_level', 'path_min'])
# str = "start: %s, stop: %s, min: %s, max: %s, average: %s, " \
# %(sp['start'],sp['stop'],sp['path_min'],
# sp['path_max'],sp['path_average'])
# print(str)
self.add_rows_table('data_stage02_physiology_graphData_shortestPathStats',data_O);
self.add_rows_table('data_stage02_physiology_graphData_shortestPaths',data_graphs_O);
|
aterrel/dynd-python
|
dynd/ndt/dim_helpers.py
|
Python
|
bsd-2-clause
| 4,145
| 0.000965
|
from __future__ import absolute_import, division, print_function
|
from dynd._pydynd import w_type, \
make_var_dim, make_strided_dim, make_fixed_dim, make_cfixed_dim
__all__ = ['var', 'strided', 'fixed', 'cfixed']
class _Dim(object):
__slots__ = []
def __mul__(self, rhs):
if isinstance(rhs, w_type):
# Apply all the dimensions to get
|
# produce a type
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, (str, type)):
# Allow:
# ndt.strided * 'int32'
# ndt.strided * int
rhs = w_type(rhs)
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, _Dim):
# Combine the dimension fragments
return _DimFragment(self.dims + rhs.dims)
else:
raise TypeError('Expected a dynd dimension or type, not %r' % rhs)
def __pow__(self, count):
return _DimFragment(self.dims * count)
class _DimFragment(_Dim):
__slots__ = ['dims']
def __init__(self, dims):
self.dims = dims
def __repr__(self):
return ' * '.join(repr(dim) for dim in self.dims)
class _Var(_Dim):
"""
Creates a var dimension when combined with other types.
Examples
--------
>>> ndt.var * ndt.int32
ndt.type('var * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_var_dim(eltype)
def __repr__(self):
return 'ndt.var'
class _Strided(_Dim):
"""
Creates a strided dimension when combined with other types.
Examples
--------
>>> ndt.strided * ndt.int32
ndt.type('strided * int32')
>>> ndt.fixed[5] * ndt.strided * ndt.float64
ndt.type('5 * strided * float64')
"""
__slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_strided_dim(eltype)
def __repr__(self):
return 'ndt.strided'
class _Fixed(_Dim):
"""
Creates a fixed dimension when combined with other types.
Examples
--------
>>> ndt.fixed[3] * ndt.int32
ndt.type('3 * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
if self.dim_size is not None:
return (self,)
else:
raise TypeError('Need to specify ndt.fixed[dim_size],' +
' not just ndt.fixed')
def create(self, eltype):
return make_fixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _Fixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.fixed[%d]' % self.dim_size
else:
return 'ndt.fixed'
class _CFixed(_Dim):
"""
Creates a cfixed dimension when combined with other types.
Examples
--------
>>> ndt.cfixed[3] * ndt.int32
ndt.type('cfixed[3] * int32')
>>> ndt.fixed[5] * ndt.cfixed[2] * ndt.float64
ndt.type('5 * cfixed[2] * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
if self.dim_size is not None:
return (self,)
else:
raise TypeError('Need to specify ndt.cfixed[dim_size],' +
' not just ndt.cfixed')
def create(self, eltype):
return make_cfixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _CFixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.cfixed[%d]' % self.dim_size
else:
return 'ndt.cfixed'
var = _Var()
strided = _Strided()
fixed = _Fixed()
cfixed = _CFixed()
|
Alphalink/netbox
|
netbox/tenancy/models.py
|
Python
|
apache-2.0
| 1,852
| 0.00216
|
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from extras.models import CustomFieldModel, CustomFieldValue
from utilities.models import CreatedUpdatedModel
from utilities.utils import csv_format
@python_2_unicode_compatible
class TenantGroup(models.Model):
"""
An arbitrary collection of Tenants.
"""
name = models.CharField(max_length=50, unique=True)
slug = models.SlugField(unique=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?group={}".format(reverse('tenancy:tenant_list'), self.slug)
@python_2_unicode_compatible
class Tenant(CreatedUpdatedModel, CustomFieldModel):
"""
A Tenant represents an organization served by the NetBox owner. This is typi
|
cally a customer or an internal
department.
"""
name = models.CharField(max_length=30, unique=True)
slug = models.SlugField(unique=True)
group = models.ForeignKey('TenantGroup', related_name='tenants', blank=True, null=True, on_delete=models.SET_NULL)
description = models.CharField(max_length=100, blank=True, help_text="Long-form name (optional)")
comments = models.TextField(blank=True)
c
|
ustom_field_values = GenericRelation(CustomFieldValue, content_type_field='obj_type', object_id_field='obj_id')
class Meta:
ordering = ['group', 'name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('tenancy:tenant', args=[self.slug])
def to_csv(self):
return csv_format([
self.name,
self.slug,
self.group.name if self.group else None,
self.description,
])
|
neuroticnerd/armory
|
armory/phone/lookup.py
|
Python
|
apache-2.0
| 2,647
| 0.000756
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
import requests
import json
import logging
from bs4 import BeautifulSoup as htmldoc
def carrier_lookup():
return None
class CarrierLookup(object):
def __init__(self, number, logname=None):
self.number = number
self._logname = logname if logname else ''
self.log = logging.getLogger(self._logname)
def lookup(self):
log = self.log
domain = 'www.twilio.com'
host = 'https://{0}'.format(domain)
lookup = '{0}/lookup'.format(host)
# masquerade as OS-X Firefox
s = requests.Session()
s.headers['user-agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:37.0) Gecko/20100101 Firefox/37.0'
s.headers['x-requested-with'] = 'XMLHttpRequest'
s.headers['accept-language'] = 'en-US,en;q=0.5'
s.headers['cache-control'] = 'no-cache'
s.headers['content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
s.headers['host'] = domain
s.headers['DNT'] = '1'
s.headers['connection'] = 'close'
# fetch the base page to set the cookies and get csrf and sid values
r = s.get(lookup)
hdrs = {k: v for k, v in s.headers.iteritems()}
cookies = [{c.name: c.value} for c in s.cookies]
log.debug('\nsession headers: {0}\n'.format(jsonify(hdrs)))
log.debug('\nsession cookies: {0}\n'.format(jsonify(cookies)))
if not cookies:
log.error('unknown error accessing base page: {0}'.format(lookup))
log.error('ERROR: {0}'.format(r.status_code))
log.error(r.text)
raise ValueError()
# extract the csrf and sid
page = htmldoc(r.text)
token = page.find('meta', attrs={'name': 'csrfToken'})
if token is None:
log.debug(r.text)
csrf = token['content']
log.debug('NAME={0} CONTENT={1}'.format(token['name'], csrf))
sid_attrs = {'type': 'hidden', 'role': 'visitorSid'}
role = page.find('input', attrs=sid_attrs)
sid = role['value']
log.debug('ROLE={0} VALUE={1}'.format(role['role'], sid))
# retrieve the phone number information
|
s.headers['referer'] = lookup
params = {
'Type': 'lookup',
'PhoneNumber': "{0}".format(self.number),
'VisitorSid': sid,
'CSRF': csrf,
}
log.debug('\nparams:
|
{0}\n'.format(jsonify(params)))
url = '{0}/functional-demos'.format(host)
r = s.post(url, params=params)
info = json.loads(r.content)
return info
|
simone-campagna/py-configment
|
src/configment/configment.py
|
Python
|
apache-2.0
| 5,069
| 0.001578
|
#!/usr/bin/env python
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Configment interface
>>> class TestCfg(Configment):
... CONFIGSPEC_SOURCE = '''
... [abc]
... x = integer(default=3)
... '''
>>> cfg = TestCfg()
>>> cfg["abc"]["x"]
3
>>>
"""
import os
import validate
import six
from .configobj_wrap import ConfigObjWrap
from .meta_configment import MetaConfigment
from .configment_validator import ConfigmentValidator
from .pathname import Pathname
from .environment import load_configspec
__author__ = "Simone Campagna"
__all__ = [
'create_configment_class',
'Configment',
'ConfigmentValidateError',
]
class ConfigmentValidateError(validate.ValidateError):
def __str__(self):
return "validation failed: {}".format(self.args[0])
class BaseConfigment(ConfigObjWrap):
CONFIGSPEC = None
DEFAULT_MODE_HIDE = "hide"
DEFAULT_MODE_SHOW = "show"
DEFAULT_MODES = [DEFAULT_MODE_HIDE, DEFAULT_MODE_SHOW]
DEFAULT_MODE = DEFAULT_MODE_HIDE
def __init__(self, filename=None, default_mode=None):
super(BaseConfigment, self).__init__(
infile=None,
configspec=self.__class__.CONFIGSPEC,
unrepr=True,
interpolation=False,
indent_type=" ",
stringify=True,
)
if default_mode is None:
default_mode = self.DEFAULT_MODE
self.default_mode = default_mode
self.set_filename(filename)
if self.filename is not None:
self.load_file(filename, throw_on_errors=True)
else:
self.initialize(throw_on_errors=False)
def set_filename(self, filename=None):
super(BaseConfigment, self).set_filename(filename)
if self.filename is None:
self._base_dir = os.getcwd()
else:
self._base_dir = os.path.dirname(os.path.abspath(filename))
def do_validation(self, base_dir=None, reset=False, throw_on_errors=False):
if base_dir is None:
base_dir = self._base_dir
validator = ConfigmentValidator()
copy = self.default_mode == self.DEFAULT_MODE_SHOW
result = super(BaseConfigment, self).validate(validator, preserve_errors=True, copy=copy)
result = self.filter_validation_result(result)
self.set_paths(base_dir, reset=reset)
if throw_on_errors and result:
raise ConfigmentValidateError(result)
c_result = ConfigObjWrap(
infile=result,
stringify=True,
unrepr=True,
indent_type=' ',
)
return c_result
@six.add_metaclass(MetaConfigment)
class Configment(BaseConfigment):
def __init__(self, filename=None, default_mode=None):
super(Configment, self).__init__(
filename=filename,
default_mode=default_mode,
)
def impl_initialize(self, throw_on_errors=False):
|
try:
return self.do_validation(reset=False, throw_on_errors=throw_on_errors)
except: # pylint: disable=bare-except
return False
def impl_load_file(self, filename, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
Pathname.set_default_base_dir(self._base_dir)
self.set_filename(filename)
self.reload()
try:
result = self.do_validation(base_dir=self._base_dir, reset=True, throw_on_errors=throw_
|
on_errors)
finally:
Pathname.set_default_base_dir(default_base_dir)
return result
def impl_dump_s(self, stream=None, filename=None, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
try:
if filename is not None:
base_dir = os.path.dirname(os.path.normpath(os.path.abspath(filename)))
else:
base_dir = self._base_dir
Pathname.set_default_base_dir(base_dir)
self.do_validation(base_dir=base_dir, reset=False, throw_on_errors=throw_on_errors)
self.write(stream)
finally:
Pathname.set_default_base_dir(default_base_dir)
def create_configment_class(configspec_filename, class_name=None, dir_list=None):
if class_name is None:
class_name = os.path.splitext(os.path.basename(configspec_filename))[0]
class_bases = (Configment, )
class_dict = {
'CONFIGSPEC_SOURCE': load_configspec(configspec_filename, dir_list=dir_list),
}
return MetaConfigment(class_name, class_bases, class_dict)
|
tvwenger/millennium-compact-groups
|
compact_group.py
|
Python
|
gpl-3.0
| 5,674
| 0.003877
|
"""
compact_group.py - Part of millennium-compact-groups package
Defines CompactGroup object to handle information about a single
compact group.
Copyright(C) 2016 by
Trey Wenger; [email protected]
Chris Wiens; [email protected]
Kelsey Johnson; [email protected]
GNU General Pub
|
lic License v3 (GNU GPLv3)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundatio
|
n, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
14 Mar 2016 - TVW Finalized version 1.0
"""
_PACK_NAME = 'millennium-compact-groups'
_PROG_NAME = 'compact_group.py'
_VERSION = 'v1.0'
# System utilities
import sys
import os
# Numerical utilities
import numpy as np
import pandas
class CompactGroup:
"""
Compact Group Object
"""
def __init__(self, label, members):
"""
Initialize ComactGroup Object
"""
self.label = label
self.members = members
self.median_vel = 0.0
self.mediod = None
self.radius = 0.0
self.avg_mvir = 0.0
self.avg_stellarmass = 0.0
self.num_nearby_galaxies = 0
self.neighbors = []
self.annular_mass_ratio = 0.0
self.secondtwo_mass_ratio = 0.0
def find_dwarfs(self,dwarf_limit):
"""
Find galaxies that have a stellar mass less than dwarf_limit
"""
# add a is_dwarf column to members
self.members['is_dwarf'] = np.zeros(len(self.members),dtype=bool)
# assign dwarfs
ind = self.members['stellarMass'] < dwarf_limit
self.members.ix[ind,'is_dwarf'] = True
def calc_median_velocity(self):
"""
Calculate the median velocity of galaxies in this group
"""
good = (~self.members['is_dwarf'])
vels = (self.members['velX']*self.members['velX'] +
self.members['velY']*self.members['velY'] +
self.members['velZ']*self.members['velZ'])**0.5
# add a velocity2 column to members
self.members['vel'] = vels
self.median_vel = np.median(vels[good])
def find_flybys(self,crit_velocity):
"""
Find galaxies that are travelling crit_velocity faster or
slower than median velocity of group. These are "fly-bys"
"""
# add a is_flyby column to members
self.members['is_flyby'] = np.zeros(len(self.members),dtype=bool)
# assign flybys
ind = np.abs(self.members['vel'] - self.median_vel) > crit_velocity
self.members.ix[ind,'is_flyby'] = True
def calc_mediod(self):
"""
Calculate the mediod center of this group, excluding
dwarfs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
x_med = np.median(self.members['x'][good])
y_med = np.median(self.members['y'][good])
z_med = np.median(self.members['z'][good])
self.mediod = np.array([x_med,y_med,z_med])
def calc_radius(self):
"""
Calculate the radius of this group, defined as the
maximum galaxy distance from the mediod, excluding
dwarfs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
xdist = self.members['x'][good]-self.mediod[0]
ydist = self.members['y'][good]-self.mediod[1]
zdist = self.members['z'][good]-self.mediod[2]
dists = (xdist*xdist + ydist*ydist + zdist*zdist)**0.5
self.radius = np.max(dists)
def calc_avg_mvir(self):
"""
Calculate the average virial mass of galaxies in this group
excluding dwafs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
if np.sum(good) == 0:
self.avg_mvir = np.nan
else:
self.avg_mvir = np.mean(self.members['mvir'][good])
def calc_avg_stellarmass(self):
"""
Calculate the average stellar mass of galaxies in this group
excluding dwafs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
if np.sum(good) == 0:
self.avg_stellarmass = np.nan
else:
self.avg_stellarmass = np.mean(self.members['stellarMass'][good])
def calc_annular_mass_ratio(self,radius):
"""
Calculate the virial mass ratio
of neighboring galaxies within the surrounding annulus to the
total virial mass of all galaxies within the sphere
"""
# mass of cluster
sphere_mass = np.sum(self.members['mvir'])
sphere_mass = sphere_mass / (4.*np.pi/3. * self.radius**3.)
# mass in annulus
annulus_mass = np.sum(self.neighbors['mvir'])
annulus_mass = annulus_mass/(4.*np.pi/3. * (radius**3. - self.radius**3.))
self.annular_mass_ratio = annulus_mass/sphere_mass
def calc_secondtwo_mass_ratio(self):
"""
Calculate the ratio of the virial masses of the second largest
members to the virial mass of the largest member
"""
sorted_masses = np.sort(self.members['mvir'])
self.secondtwo_mass_ratio = (sorted_masses[-2]+sorted_masses[-3])/sorted_masses[-1]
|
7sDream/zhihu-oauth
|
test/test_client_people_badge.py
|
Python
|
mit
| 762
| 0
|
from .test_base_class import ZhihuClientClassTest
PEOPLE_SLUG = 'giantchen'
class TestPeopleBadgeNumber(ZhihuClientClassTest):
|
def test_badge_topics_number(self):
self.assertEqual(
len(list(self.client.people(PEOPLE_SLUG).badge.topics)), 2,
)
def test_people_has_badge(self):
self.assertTrue(self.client.people(PEOPLE_SLUG).badge.has_badge)
def test_people_has_identity(self):
self.a
|
ssertFalse(self.client.people(PEOPLE_SLUG).badge.has_identity)
def test_people_is_best_answerer_or_not(self):
self.assertTrue(self.client.people(PEOPLE_SLUG).badge.is_best_answerer)
def test_people_identify_information(self):
self.assertIsNone(self.client.people(PEOPLE_SLUG).badge.identity)
|
jhnphm/xbs_xbd
|
python/scripts/test.py
|
Python
|
gpl-3.0
| 122
| 0.008197
|
#! /b
|
in/python
import xbh as xbhpkg
xbh = xbhpkg.Xbh()
#xbh.switch_to_app()
xbh.calc_checksum()
print(xbh.get_results())
| |
feroda/django-pro-history
|
current_user/registration.py
|
Python
|
agpl-3.0
| 349
| 0
|
class FieldRegistry(object):
|
_registry = {}
def add_field(self, model, field):
reg = self.__class__._registry.setdefault(model, [])
reg.append(field)
def get_fields(self, model):
return self.__class__._registry.get(model, [])
def __contains__(self, model):
return model in sel
|
f.__class__._registry
|
tizz98/cs216
|
p11/rand.py
|
Python
|
unlicense
| 1,083
| 0.025854
|
"""
Generates 40 random numbers and writes them
to a file. No number is repeated.
~ Created by Elijah Wilson 2014 ~
"""
# used for generating random integers
from random import randint
# open the output file -> "in.data"
f = open("in.data", "w")
# create an empty list
succ = []
# loops through 40 times for generating numbers
for x in xrange(0,40):
# generate random int between 1111 & 9999
randNum = randint(1111, 9999)
# check to see if it was already generated
if randNum not in succ:
# put the random number in the list
succ.append(str(randNum))
else:
# while the randNum has already been generated
# generate a new one
while randNum in succ:
randNum = randint(1111, 9999)
#
|
put the random number in the list
succ.append(str(randNum))
# loops through 40 times for writing to file
for x in xrange(0,40):
|
# makes sure it isn't the last line to be written
# to write a new line char
if x != 39:
f.write(succ[x] + "\n")
else:
# if it is the last line to be written
# don't write a new line char
f.write(succ[x])
#close the file
f.close()
|
jordanemedlock/psychtruths
|
temboo/core/Library/Box/Files/ZipFile.py
|
Python
|
apache-2.0
| 4,646
| 0.005166
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ZipFile
# Creates a zipped version of the specified Box file and returns a link to the new compressed file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ZipFile(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ZipFile Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ZipFile, self).__init__(temboo_session, '/Library/Box/Files/ZipFile')
def new_input_set(self):
return ZipFileInputSet()
def _make_result_set(self, result, path):
return ZipFileResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ZipFileChoreographyExecution(session, exec_id, path)
class ZipFileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ZipFile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(ZipFileInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(ZipFileInputSet, self)._set_input('AsUser', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to zip.)
"""
super(ZipFileInputSet, self)._set_input('FileID', value)
def set_SharedLink(self, value):
"""
Set the value of the SharedLink input for this Choreo. ((conditional, json) A JSON object representing the item?s shared link and associated permissions. See documentation for formatting examples.)
"""
super(ZipFileInputSet, self)._set_input('SharedLink', value)
def set_ZipFileLocation(self, value):
"""
Set the value of the ZipFileLocation input for this Choreo. ((conditional, string) The id of the folder to put the new zip file in. When not specified, the zip file will be put in the root folder.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileLocation', value)
def set_ZipFileName(self, value):
"""
Set the value of the ZipFileName input for this Choreo. ((required, string) The name of the zip file that will be created.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileName', value)
class ZipFileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ZipFile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((string) The response from Box. This contains the newly created zip file metadata.)
|
"""
return self._output.get('Response
|
', None)
def get_URL(self):
"""
Retrieve the value for the "URL" output from this Choreo execution. ((string) The url for the newly created zip file.)
"""
return self._output.get('URL', None)
class ZipFileChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ZipFileResultSet(response, path)
|
spatialaudio/python-sounddevice
|
setup.py
|
Python
|
mit
| 2,712
| 0
|
import os
import platform
from setuptools import setup
# "import" __version__
__version__ = 'unknown'
for line in open('sounddevice.py'):
if line.startswith('__version__'):
exec(line)
break
MACOSX_VERSIONS = '.'.join([
'macosx_10_6_x86_64', # for compatibility with pip < v21
'macosx_10_6_universal2',
])
# environment variables for cross-platform package creation
system = os.environ.get('PYTHON_SOUNDDEVICE_PLATFORM', platform.system())
architecture0 = os.environ.get('PYTHON_SOUNDDEVICE_ARCHITECTURE',
platform.architecture()[0])
if system == 'Darwin':
libname = 'libportaudio.dylib'
elif system == 'Windows':
libname = 'libportaudio' + architecture0 + '.dll'
else:
libname = None
if libname and os.path.isdir('_sounddevice_data/portaudio-binaries'):
packages = ['_sounddevice_data']
package_data = {'_sounddevice_data': ['portaudio-binaries/' + libname,
'portaudio-binaries/README.md']}
zip_safe = False
else:
packages = None
package_data = None
zip_safe = True
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
cmdclass = {}
else:
class bdist_wheel_half_pure(bdist_wheel):
"""Create OS-dependent, but Python-independent wheels."""
def get_tag(self):
if system == 'Darwin':
oses = MACOSX_VERSIONS
|
elif system == 'Windows':
|
if architecture0 == '32bit':
oses = 'win32'
else:
oses = 'win_amd64'
else:
oses = 'any'
return 'py3', 'none', oses
cmdclass = {'bdist_wheel': bdist_wheel_half_pure}
setup(
name='sounddevice',
version=__version__,
py_modules=['sounddevice'],
packages=packages,
package_data=package_data,
zip_safe=zip_safe,
python_requires='>=3',
setup_requires=['CFFI>=1.0'],
install_requires=['CFFI>=1.0'],
extras_require={'NumPy': ['NumPy']},
cffi_modules=['sounddevice_build.py:ffibuilder'],
author='Matthias Geier',
author_email='[email protected]',
description='Play and Record Sound with Python',
long_description=open('README.rst').read(),
license='MIT',
keywords='sound audio PortAudio play record playrec'.split(),
url='http://python-sounddevice.readthedocs.io/',
platforms='any',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Multimedia :: Sound/Audio',
],
cmdclass=cmdclass,
)
|
demonchild2112/travis-test
|
grr/server/grr_response_server/databases/mysql_foreman_rules.py
|
Python
|
apache-2.0
| 1,746
| 0.005727
|
#!/usr/bin/env python
"""The MySQL database methods for foreman rule handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import rdfvalue
from grr_response_server import foreman_rules
from grr_response_server.databases import mysql_utils
class MySQLDBForemanRulesMixin(object):
"""MySQLDB mixin for foreman rules related functions."""
@mysql_utils.WithTransaction()
def WriteForemanRule(self, rule, cursor=None):
"""Writes a foreman rule to the database.
|
"""
query = ("INSERT INTO foreman_rules "
" (hunt_id, expiration_time, rule) "
"VALUES (%s, FROM_UNIXTIME(%s), %s) "
"ON DUPLICATE KEY UPDATE "
" expiration_time=FROM_UNIXTIME(%s), rule=%s")
exp_str = mysql_utils.RDFDatetimeToTimestamp(rule.expiration_time),
rule_str = rule.SerializeToBytes()
cursor.execute(query, [rule.hunt_id, exp_str, rule_str, exp_str, rule_
|
str])
@mysql_utils.WithTransaction()
def RemoveForemanRule(self, hunt_id, cursor=None):
query = "DELETE FROM foreman_rules WHERE hunt_id=%s"
cursor.execute(query, [hunt_id])
@mysql_utils.WithTransaction(readonly=True)
def ReadAllForemanRules(self, cursor=None):
cursor.execute("SELECT rule FROM foreman_rules")
res = []
for rule, in cursor.fetchall():
res.append(foreman_rules.ForemanCondition.FromSerializedBytes(rule))
return res
@mysql_utils.WithTransaction()
def RemoveExpiredForemanRules(self, cursor=None):
now = rdfvalue.RDFDatetime.Now()
cursor.execute(
"DELETE FROM foreman_rules WHERE expiration_time < FROM_UNIXTIME(%s)",
[mysql_utils.RDFDatetimeToTimestamp(now)])
|
vivisect/vstruct
|
vstruct2/bases.py
|
Python
|
apache-2.0
| 4,994
| 0.009211
|
import traceback
from vstruct2.compat import int2bytes, bytes2int
# This routine was coppied from vivisect to allow vstruct
# to be free from dependencies
MAX_WORD = 16
def initmask(bits):
return (1<<bits)-1
bitmasks = [ initmask(i) for i in range(MAX_WORD*8) ]
def bitmask(value,bits):
return value & bitmasks[bits]
class v_base:
'''
Base class for all VStruct types
'''
def __init__(self):
self._vs_onset = []
self._vs_isprim = True
def __len__(self):
return self.vsSize()
def __bytes__(self):
return self.vsEmit()
def vsOnset(self, callback, *args, **kwargs):
'''
Trigger a callback when the fields value is updated.
NOTE: this callback is triggered during parse() as well
as during value updates.
'''
self._vs_onset.append( (callback,args,kwargs) )
return self
def _fire_onset(self):
for cb,args,kwargs in self._vs_onset:
try:
cb(*args,**kwargs)
except Exception as e:
traceback.print_exc()
class v_prim(v_base):
'''
Base class for all vstruct primitive types
'''
def __init__(self, size=None, valu=None):
v_base.__init__(self)
self._vs_size = size
self._vs_bits = size * 8
self._vs_value = self._prim_norm(valu)
self._vs_parent = None
# on-demand field parsing
self._vs_backfd = None
self._vs_backoff = None
self._vs_backbytes = None
self._vs_writeback = False
def __repr__(self):
return repr(self._prim_getval())
def vsGetTypeName(self):
return self.__class__.__name__
def vsParse(self, bytez, offset=0, writeback=False):
'''
Byte parsing method for VStruct primitives.
'''
self._vs_value = None
self._vs_backoff = offset
self._vs_backbytes = bytez
self._vs_writeback = writeback
retval = offset + self.vsSize()
self._fire_onset()
return retval
def vsLoad(self, fd, offset=0, writeback=False):
self._vs_value = None
self._vs_backfd = fd
self._vs_backoff = offset
self._vs_writeback = writeback
retval = offset + self.vsSize()
self._fire_onset()
return retval
def vsSize(self):
'''
Return the size of the field.
'''
return self._vs_size
def vsResize(self, size):
'''
Resizing callback which can dynamically change the size
of a primitive.
'''
self._vs_size = size
def _prim_setval(self, newval):
valu = self._prim_norm(newval)
self._vs_value = valu
# if requested, write changes back to bytearray / fd
if self._vs_writeback:
byts = self._prim_emit(valu)
if self._vs_backbytes != None:
self._vs_backbytes[ self._vs_backoff:self._vs_backoff + len(byts) ] = byts
if self._vs_backfd != None:
self._vs_backfd.seek( self._vs_backoff )
self._vs_backfd.write( byts )
self._fire_onset()
def _prim_getval(self):
# trigger on-demand parsing if needed
if self._vs_value == None:
if self._vs_backfd:
self._vs_value = self._prim_load(self._vs_backfd, self._vs_backoff)
elif self._vs_back
|
bytes:
self._vs_value = self._prim_parse(self._vs_backbyt
|
es, self._vs_backoff)
return self._vs_value
def _prim_load(self, fd, offset):
# easy base case...
fd.seek(offset)
byts = fd.read(self._vs_size)
return self._prim_parse(byts, 0)
def vsEmit(self):
return self._prim_emit( self._prim_getval() )
def _prim_norm(self, x):
raise Exception('Implement Me')
def _prim_emit(self, x):
raise Exception('Implement Me')
def _prim_parse(self, bytez, offset):
raise Exception('Implement Me')
class v_int(v_prim):
def __init__(self,valu=0,size=4,endian='little',signed=False,enum=None):
v_prim.__init__(self,valu=valu,size=size)
self._vs_enum = enum
self._vs_endian = endian
self._vs_signed = signed
def __int__(self):
return self._prim_getval()
def __repr__(self):
valu = self._prim_getval()
if self._vs_enum != None:
enum = self._vs_enum[valu]
if enum != None:
return enum
return repr(valu)
def vsResize(self, size):
self._vs_bits = size * 8
return v_prim.vsResize(self,size)
def _prim_emit(self,x):
return int2bytes(x, self._vs_size, byteorder=self._vs_endian, signed=self._vs_signed)
def _prim_norm(self,x):
return bitmask(x,self._vs_bits)
def _prim_parse(self, byts, offset):
return bytes2int(byts, self._vs_size, byteorder=self._vs_endian, signed=self._vs_signed, off=offset)
|
crackhopper/TFS-toolbox
|
tests/core/layer/dropout_test.py
|
Python
|
mit
| 426
| 0.044601
|
import pytest
import tensorflow as tf
import nu
|
mpy as np
import tfs.core.layer.ops as ops
from tfs.core.layer.dropout import Dropout
from tfs.network import Network
net = Network()
@pytest.fixture
def l():
|
l = Dropout(
net,
keep_prob=1.0,
)
return l
class TestDropout:
def test_build_inverse(self,l):
_in = tf.zeros([1,10,10,4])
_out=l.build(_in)
assert _out.get_shape().as_list()==[1,10,10,4]
|
luoxufeiyan/python
|
Forec/0015/0015.py
|
Python
|
mit
| 410
| 0.02439
|
# c
|
oding = utf-8
__author__ = 'Forec'
import xlwt
import re
book = xlwt.Workbook(encoding = 'utf-8', style_compression=0)
sheet = book.add_sheet('student',cell_overwrite_ok = True)
line = 0
info = re.compile(r'\"(\d+)\" : \"(.*?)\"')
with open('city.txt',"r") as f:
data = f.read()
for x in info.findall(data):
for i in range(len(x)):
sheet.write(line,i,x[i])
line+=1
book.save('city.
|
xls')
|
lixiangning888/whole_project
|
modules/signatures_orginal_20151110/injection_rwx.py
|
Python
|
lgpl-3.0
| 1,229
| 0.004068
|
# Copyright (C) 2014 Optiv, Inc. ([email protected])
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.or
|
g
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class InjectionRWX(Signature):
name = "injection_rwx"
description = "Creates RWX memory"
severity = 2
confidence = 50
categories = ["injection"]
authors = ["Optiv"]
minimum = "1.2"
evented = True
def __init__(self, *args, **kwargs)
|
:
Signature.__init__(self, *args, **kwargs)
filter_apinames = set(["NtAllocateVirtualMemory","NtProtectVirtualMemory","VirtualProtectEx"])
filter_analysistypes = set(["file"])
def on_call(self, call, process):
if call["api"] == "NtAllocateVirtualMemory" or call["api"] == "VirtualProtectEx":
protection = self.get_argument(call, "Protection")
# PAGE_EXECUTE_READWRITE
if protection == "0x00000040":
return True
elif call["api"] == "NtProtectVirtualMemory":
protection = self.get_argument(call, "NewAccessProtection")
# PAGE_EXECUTE_READWRITE
if protection == "0x00000040":
return True
|
mdevaev/slog
|
src/common.py
|
Python
|
gpl-2.0
| 1,483
| 0.002697
|
# -*- mode: python; coding: utf-8; -*-
import os
APP_NAME = "SLog"
VERSION = "0.9.4"
WEBSITE = "http://vialinx.org"
LICENSE = """
SLog is a PyGTK-based GUI for the LightLang SL dictionary.
Copyright 2007 Nasyrov Renat <[email protected]>
This file is part of SLog.
SLog is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
SLog is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
along with SLog; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
INSTALL_PREFIX = "@prefix@"
PIXMAP_DIR = os.path.join(INSTALL_PREFIX, "share", "pixmaps")
LOCALE_DIR = os.path.join(INSTALL_PREFIX, "share", "locale")
DATA_DIR = os.path.join(INSTALL_PREFIX, "share"
|
, "slog")
LOGO_ICON = "slog.png"
LOGO_ICON_SPY = "slog_spy.png"
#FTP_LL_URL = "ftp://ftp.lightlang.org.ru/dicts"
FTP_LL_URL = "ftp://etc.edu.ru/pub/soft
|
/for_linux/lightlang"
FTP_DICTS_URL = FTP_LL_URL + "/dicts"
FTP_REPO_URL = FTP_DICTS_URL + "/repodata/primary.xml"
REPO_FILE = os.path.expanduser("~/.config/slog/primary.xml")
SL_TMP_DIR = "/tmp/sl"
def get_icon(filename):
return os.path.join(PIXMAP_DIR, filename)
|
nttcom/eclcli
|
eclcli/identity/v3/user.py
|
Python
|
apache-2.0
| 15,378
| 0
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 User action implementations"""
import copy
import six
import sys
from keystoneauth1 import exceptions as ks_exc
from eclcli.common import command
from eclcli.common import utils
from eclcli.i18n import _ # noqa
from eclcli.identity import common
class CreateUser(command.ShowOne):
"""Create new user"""
def get_parser(self, prog_name):
parser = super(CreateUser, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help='New user name',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Default domain (name or ID)',
)
parser.add_argument(
'--project',
metavar='<project>',
help='Default project (name or ID)',
)
common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--password',
metavar='<password>',
help='Set user password',
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help='Prompt interactively for password',
)
parser.add_argument(
'--email',
metavar='<email-address>',
help='Set user email address',
)
parser.add_argument(
'--description',
metavar='<description>',
help='User description',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable user (default)',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable user',
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing user'),
)
return parser
def take_action(self, parsed_args):
|
identity_client = self.app.client_manager.identity
project_id = None
if parsed_args.project:
project_id = common.find_project(identity_client,
parsed_args.project,
parsed_args.project_domain).id
domain_id = None
if parsed_args.domain:
domain_
|
id = common.find_domain(identity_client,
parsed_args.domain).id
enabled = True
if parsed_args.disable:
enabled = False
if parsed_args.password_prompt:
parsed_args.password = utils.get_password(self.app.stdin)
try:
user = identity_client.users.create(
name=parsed_args.name,
domain=domain_id,
default_project=project_id,
password=parsed_args.password,
email=parsed_args.email,
description=parsed_args.description,
enabled=enabled
)
except ks_exc.Conflict as e:
if parsed_args.or_show:
user = utils.find_resource(identity_client.users,
parsed_args.name,
domain_id=domain_id)
self.log.info('Returning existing user %s', user.name)
else:
raise e
user._info.pop('links')
return zip(*sorted(six.iteritems(user._info)))
class DeleteUser(command.Command):
"""Delete user(s)"""
def get_parser(self, prog_name):
parser = super(DeleteUser, self).get_parser(prog_name)
parser.add_argument(
'users',
metavar='<user>',
nargs="+",
help='User(s) to delete (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain owning <user> (name or ID)',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client, parsed_args.domain)
for user in parsed_args.users:
if domain is not None:
user_obj = utils.find_resource(identity_client.users,
user,
domain_id=domain.id)
else:
user_obj = utils.find_resource(identity_client.users,
user)
identity_client.users.delete(user_obj.id)
class ListUser(command.Lister):
"""List users"""
def get_parser(self, prog_name):
parser = super(ListUser, self).get_parser(prog_name)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Filter users by <domain> (name or ID)',
)
project_or_group = parser.add_mutually_exclusive_group()
project_or_group.add_argument(
'--group',
metavar='<group>',
help='Filter users by <group> membership (name or ID)',
)
project_or_group.add_argument(
'--project',
metavar='<project>',
help='Filter users by <project> (name or ID)',
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client,
parsed_args.domain).id
group = None
if parsed_args.group:
group = common.find_group(identity_client,
parsed_args.group,
parsed_args.domain).id
if parsed_args.project:
if domain is not None:
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
domain_id=domain
).id
else:
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
).id
assignments = identity_client.role_assignments.list(
project=project)
# NOTE(stevemar): If a user has more than one role on a project
# then they will have two entries in the returned data. Since we
# are looking for any role, let's just track unique user IDs.
user_ids = set()
for assignment in assignments:
if hasattr(assignment, 'user'):
user_ids.add(assignment.user['id'])
# NOTE(stevemar): Call find_resource once we have unique IDs, so
# it's fewer trips to the Identity API, then collect the data.
data = []
for user_id in user_ids:
user = utils.find_resource(identity_client.users, user_id)
data.append(user)
else:
data = identity_client.users.list(
domain=domain,
group=group,
|
jordanemedlock/psychtruths
|
temboo/core/Library/Zendesk/Search/__init__.py
|
Python
|
apache-2.0
| 296
| 0.006757
|
from temboo.Library.Zendesk.Search.SearchAll import SearchAll, SearchAllInputSet, SearchAllResultSet, SearchAllChoreographyExecution
from temboo.Library.Zendesk.Search.SearchAnonymous import SearchAnonymous, SearchAnony
|
mousInputSet, S
|
earchAnonymousResultSet, SearchAnonymousChoreographyExecution
|
navarro0/racing-all-afternoon
|
course.py
|
Python
|
mit
| 3,892
| 0.023124
|
import random
## Course texture colors ##
###########################
class Course(object):
def __init__(self, num):
## Default colors, fall back to these
fog = [0,0,0]
light_road = [0,0,0]
dark_road = [0,0,0]
light_offroad = [0,0,0]
dark_offroad = [0
|
,0,0]
light_
|
wall = [0,0,0]
dark_wall = [0,0,0]
light_rumble = [0,0,0]
dark_rumble = [0,0,0]
## Course road geometry
self.geometry = [0,0,0,0,0,0,0,0]
last_seg = 0
## Start with a straightaway by default
## Exactly six "segments" are made
for i in range(7):
## Add a segment that's different from the previous one
self.geometry[i] = last_seg
last_seg += random.choice((-1,1))
if last_seg < 1:
last_seg = 3
elif last_seg > 3:
last_seg = 1
## Length of each segment, larger number = longer strip
self.strip = 3 ## Wall
self.road = 2 ## Road
## Load texture colors to overwrite defaults
f = open("res/stage/%d.dat" %num, "r").readlines()
for line in f:
if line.startswith("fog = "): ## Fog color to fade into
temp = line.strip("fog = ").split(",")
fog = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_road = "): ## Light ground strip
temp = line.strip("light_road = ").split(",")
light_road = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_road = "): ## Dark ground strip
temp = line.strip("dark_road = ").split(",")
dark_road = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_offroad = "): ## Light offroad strip
temp = line.strip("light_offroad = ").split(",")
light_offroad = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_offroad = "): ## Dark offroad strip
temp = line.strip("dark_offroad = ").split(",")
dark_offroad = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_wall = "): ## Light wall strip
temp = line.strip("light_wall = ").split(",")
light_wall = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_wall = "): ## Dark wall strip
temp = line.strip("dark_wall = ").split(",")
dark_wall = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_rumble = "): ## Light rumble strip
temp = line.strip("light_rumble = ").split(",")
light_rumble = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_rumble = "): ## Dark rumble strip
temp = line.strip("dark_rumble = ").split(",")
dark_rumble = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("ceiling = "): ## Ceiling
temp = line.strip("ceiling = ").split(",")
ceiling = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("strip = "): ## Length of wall segment
self.strip = int(line.strip("strip = "))
elif line.startswith("road = "): ## Length of road segment
self.road = int(line.strip("road = "))
## Define start line, finish line, and dark and light strip lists
white = [200,200,200]
self.start = [white,white,light_wall, white, fog, white]
self.finish = [white, white, dark_wall, white, fog, white]
self.dark_colors = [dark_road, dark_offroad, dark_wall, dark_rumble, fog, None, ceiling]
self.light_colors = [light_road, light_offroad, light_wall, light_rumble, fog, white, ceiling]
|
sssllliang/edx-analytics-pipeline
|
edx/analytics/tasks/tests/acceptance/test_internal_reporting_user.py
|
Python
|
agpl-3.0
| 2,665
| 0.004878
|
"""
End to end test of the internal reporting user table loading task.
"""
import os
import logging
import datetime
import pandas
from luigi.date_interval import Date
from edx.analytics.tasks.tests.acceptance import AcceptanceTestCase
from edx.analytics.tasks.url import url_path_join
log = logging.getLogger(__name__)
class InternalReportingUserLoadAcceptanceTest(AcceptanceTestCase):
"""End-to-end test of the workflow to load the internal reporting warehouse's user table."""
INPUT_FILE = 'location_by_course_tracking.log'
DATE_INTERVAL = Date(2014, 7, 21)
def setUp(self):
super(InternalReportingUserLoadAcceptanceTest, self).setUp()
# Set up the mock LMS databases.
self.execute_sql_fixture_file('load_auth_user_for_internal_reporting_user.sql')
self.execute_sql_fixture_file('load_auth_userprofile.sql')
# Put up the mock tracking log for user locations.
self.upload_tracking_log(self.INPUT_FILE, datetime.datetime(2014, 7, 21))
def test_internal_reporting_user(self):
"""Tests the workflow for the internal reporting user table, end to end."""
self.task.launch([
'LoadInternalReportingUserToWarehouse',
'--interval', self.DATE_INTERVAL.to_string(),
'--user-country-output', url_path_join(self.test_out, 'user'),
'--n-reduce-tasks', str(self.NUM_REDUCERS),
])
self.validate_output()
def validate_output(self):
"""Validates the output, comparing it to a csv of all the expected output from this workflow."""
with self.vertica.cursor() as cursor:
|
expected_output_csv = os.path.join(self.data_dir, 'output', 'acceptance_expe
|
cted_d_user.csv')
expected = pandas.read_csv(expected_output_csv, parse_dates=True)
cursor.execute("SELECT * FROM {schema}.d_user".format(schema=self.vertica.schema_name))
response = cursor.fetchall()
d_user = pandas.DataFrame(response, columns=['user_id', 'user_year_of_birth', 'user_level_of_education',
'user_gender', 'user_email', 'user_username',
'user_account_creation_time',
'user_last_location_country_code'])
try: # A ValueError will be thrown if the column names don't match or the two data frames are not square.
self.assertTrue(all(d_user == expected))
except ValueError:
self.fail("Expected and returned data frames have different shapes or labels.")
|
gangadharkadam/letzerp
|
erpnext/startup/__init__.py
|
Python
|
agpl-3.0
| 986
| 0.002028
|
# -*- coding: utf-8 -*-
# ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any
|
later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public
|
License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# default settings that can be made for a user.
from __future__ import unicode_literals
import frappe
# product_name = "ERPNext"
product_name = "letzERP"
user_defaults = {
"Company": "company",
"Territory": "territory"
}
|
sulantha2006/Processing_Pipeline
|
Utils/PipelineLogger.py
|
Python
|
apache-2.0
| 498
| 0.026104
|
__author__ = 'Sulantha'
import logging
class PipelineLogger:
logFunctions={'info':log
|
ging.info,
'debug':logging.debug,
'warning':logging.warning,
'error':logging.error,
'critical':logging.critical,
'exception':logging.exception}
@staticmethod
def log(moduleName, level, message):
level = level.lower()
logging.getLogger(mo
|
duleName)
PipelineLogger.logFunctions[level](message)
|
oblique-labs/pyVM
|
rpython/rlib/test/test_signature.py
|
Python
|
mit
| 10,192
| 0.005789
|
import py
from rpython.rlib.signature import signature, finishsigs, FieldSpec, ClassSpec
from rpython.rlib import types
from rpython.annotator import model
from rpython.rtyper.llannotation import SomePtr
from rpython.annotator.signature import SignatureError
from rpython.translator.translator import TranslationContext, graphof
from rpython.rtyper.lltypesystem import rstr
from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy
def annotate_at(f, policy=None):
t = TranslationContext()
t.config.translation.check_str_without_nul = True
a = t.buildannotator(policy=policy)
a.annotate_helper(f, [model.s_ImpossibleValue]*f.func_code.co_argcount, policy=policy)
return a
def sigof(a, f):
# returns [param1, param2, ..., ret]
g = graphof(a.translator, f)
return [a.binding(v) for v in g.startblock.inputargs] + [a.binding(g.getreturnvar())]
def getsig(f, policy=None):
a = annotate_at(f, policy=policy)
return sigof(a, f)
def check_annotator_fails(caller):
exc = py.test.raises(model.AnnotatorError, annotate_at, caller).value
assert caller.func_name in str(exc)
def test_bookkeeping():
@signature('x', 'y', returns='z')
def f(a, b):
return a + len(b)
f.foo = 'foo'
assert f._signature_ == (('x', 'y'), 'z')
assert f.func_name == 'f'
assert f.foo == 'foo'
assert f(1, 'hello') == 6
def test_basic():
@signature(types.int(), types.str(), returns=types.char())
def f(a, b):
return b[a]
assert getsig(f) == [model.SomeInteger(), model.SomeString(), model.SomeChar()]
def test_arg_errors():
@signature(types.int(), types.str(), returns=types.int())
def f(a, b):
return a + len(b)
@check_annotator_fails
def ok_for_body(): # would give no error without signature
f(2.0, 'b')
@check_annotator_fails
def bad_for_body(): # would give error inside 'f' body, instead errors at call
f('a', 'b')
def test_return():
@signature(returns=types.str())
def f():
return 'a'
assert getsig(f) == [model.SomeString()]
@signature(types.str(), returns=types.str())
def f(x):
return x
def g():
return f('a')
a = annotate_at(g)
assert sigof(a, f) == [model.SomeString(), model.SomeString()]
def test_return_errors():
@check_annotator_fails
@signature(returns=types.int())
def int_not_char():
return 'a'
@check_annotator_fails
@signature(types.str(), returns=types.int())
def str_to_int(s):
return s
@signature(returns=types.str())
def str_not_None():
return None
@check_annotator_fails
def caller_of_str_not_None():
return str_not_None()
@py.test.mark.xfail
def test_return_errors_xfail():
@check_annotator_fails
@signature(returns=types.str())
def str_not_None():
return None
def test_none():
@signature(returns=types.none())
def f():
pass
assert getsig(f) == [model.s_None]
def test_float():
@signature(types.longfloat(), types.singlefloat(), returns=types.float())
def f(a, b):
return 3.0
assert getsig(f) == [model.SomeLongFloat(), model.SomeSingleFloat(), model.SomeFloat()]
def test_unicode():
@signature(types.unicode(), returns=types.int())
def f(u):
return len(u)
assert getsig(f) == [model.SomeUnicodeString(), model.SomeInteger()]
def test_str0():
@signature(types.unicode0(), returns=types.str0())
def f(u):
return 'str'
assert getsig(f) == [model.SomeUnicodeString(no_nul=True),
model.SomeString(no_nul=True)]
def test_ptr():
policy = LowLevelAnnotatorPolicy()
@signature(types.ptr(rstr.STR), returns=types.none())
def f(buf):
pass
argtype = getsig(f, policy=policy)[0]
assert isinstance(argtype, SomePtr)
assert argtype.ll_ptrtype.TO == rstr.STR
def g():
f(rstr.mallocstr(10))
getsig(g, policy=policy)
def test_list():
@signature(types.list(types.int()), returns=types.int())
def f(a):
return len(a)
argtype = getsig(f)[0]
assert isinstance(argtype, model.SomeList)
item = argtype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == True
@check_annotator_fails
def ok_for_body():
f(['a'])
@check_annotator_fails
def bad_for_body():
f('a')
@signature(returns=types.list(types.char()))
def ff():
return ['a']
@check_annotator_fails
def mutate_broader():
ff()[0] = 'abc'
@check_annotator_fails
def mutate_unrelated():
ff()[0] = 1
@check_annotator_fails
@signature(types.list(types.char()), returns=types.int())
def mutate_in_body(l):
l[0] = 'abc'
return len(l)
def can_append():
l = ff()
l.append('b')
getsig(can_append)
def test_array():
@signature(returns=types.array(types.int()))
def f():
return [1]
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeList)
item = rettype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == False
def try_append():
l = f()
l.append(2)
check_annotator_fails(try_append)
def test_dict():
@signature(returns=types.dict(types.str(), types.int()))
def f():
return {'a': 1, 'b': 2}
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeDict)
assert rettype.dictdef.dictkey.s_value == model.SomeString()
assert rettype.dictdef.dictvalue.s_value == model.SomeInteger()
def test_instance():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3), returns=types.instance(C2))
def f(x):
assert isinstance(x, C2)
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
@check_annotator_fails
def ok_for_body():
f(None)
def test_instance_or_none():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3, can_be_None=True), returns=types.instance(C2, can_be_None=True))
def f(x):
assert isinstance(x, C2) or x is None
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert argtype.can_be_None
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
assert rettype.can_be_None
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
def test_self():
@finishsigs
class C(object):
@signature(types.self(), types.self(), returns=types.none())
def f(self, other):
pass
class D1(C):
pass
class D2(C):
pass
def g():
D1().f(D2())
a = annotate_at(g)
argtype = sigof(a, C.__dict__['f'])[0]
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C
def test_self_error():
class C(object):
@signature(types.self(), returns=types.none())
def incomplete_sig_meth(self):
pass
exc = py.test.raises(SignatureError, annotate_at, C.incomplete_sig_meth).value
assert 'incomplete_sig_meth' in str(exc)
assert 'finis
|
hsigs' in str(exc)
def test_any_as_argument():
@signature(types.any(), types.int(), returns=types.float())
def f(x, y):
return x + y
@signature(types.int(), returns=types.float())
def g(x):
re
|
turn f(x, x)
sig = getsig(g)
assert sig == [model.SomeInteger(), model.SomeFloat()]
@signature(types.float(), returns=types.float())
def g(x):
return f(x, 4)
sig = ge
|
jehomez/pymeadmin
|
prueba_de_inventario.py
|
Python
|
gpl-2.0
| 1,191
| 0.004209
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sin título.py
#
# Copyright 2012 Jesús Hómez <jesus@soneview>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Softwa
|
re F
|
oundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from modelo import Model
#Model().conectar()
#Model().borrar_tablas_para_inventario()
#~ Model().crear_tablas_para_inventario(2011,12)
#~ Model().crear_tabla_entradas()
venta_id = Model().venta_id_max()
new_venta_id = Model().nuevo_venta_id()
print venta_id
print new_venta_id
|
Digital-Preservation-Finland/dpres-siptools
|
siptools/scripts/create_audiomd.py
|
Python
|
lgpl-3.0
| 6,706
| 0
|
"""Command line tool for creating audioMD metadata."""
from __future__ import unicode_literals, print_function
import os
import sys
import click
import six
import audiomd
from siptools.mdcreator import MetsSectionCreator
from siptools.utils import fix_missing_metadata, scrape_file
click.disable_unicode_literals_warning = True
FILEDATA_KEYS = ['audio_data_encoding', 'bits_per_sample',
'data_rate', 'data_rate_mode', 'sampling_frequency']
AUDIOINFO_KEYS = ['duration', 'num_channels']
ALLOW_UNAV = ['audio_data_encoding', 'codec_creator_app',
'codec_creator_app_version', 'codec_name',
'duration', 'num_channels']
ALLOW_ZERO = ['bits_per_sample', 'data_rate', 'sampling_frequency']
@click.command()
@click.argument(
'filename', type=str)
@click.option(
'--workspace', type=click.Path(exists=True),
default='./workspace/',
metavar='<WORKSPACE PATH>',
help="Workspace directory for the metadata files. "
"Defaults to ./workspace/")
@click.option(
'--base_path', type=click.Path(exists=True), default='.',
metavar='<BASE PATH>',
help="Source base path of digital objects. If used, give path to "
"the file in relation to this base path.")
def main(filename, workspace, base_path):
"""Write audioMD metadata for an audio file or streams.
FILENAME: Relative path to the file from current directory or from
--base_path.
"""
create_audiomd(filename, workspace, base_path)
return 0
def create_audiomd(filename, workspace="./workspace/", base_path="."):
"""
Write audioMD metadata for an audio file or streams.
:filename: Audio file path relative to base path
:workspace: Workspace path
:base_path: Base path
"""
filerel = os.path.normpath(filename)
filepath = os.path.normpath(os.path.join(base_path, filename))
creator = AudiomdCreator(workspace)
creator.add_audiomd_md(filepath, filerel)
creator.write()
class AudiomdCreator(MetsSectionCreator):
"""
Subclass of MetsSectionCreator, which generates audioMD metadata for audio
files.
"""
def add_audiomd_md(self, filepath, filerel=None):
"""Create audioMD metadata for a audio file and append it
to self.md_elements.
If a file is not a video container, then the audio stream metadata is
processed in file level. Video container includes streams which need
to be processed separately one at a time.
:filepath: Audio file path
:filerel: Audio file path relative to base path
"""
# Create audioMD metadata
audiomd_dict = create_audiomd_metadata(
filepath, filerel, self.workspace
)
if '0' in audiomd_dict and len(audiomd_dict) == 1:
self.add_md(metadata=audiomd_dict['0'],
filename=(filerel if filerel else filepath))
else:
for index, audio in six.iteritems(audiomd_dict):
self.add_md(metadata=audio,
filename=(filerel if filerel else filepath),
stream=index)
# pylint: disable=too-many-arguments
def write(self, mdtype="OTHER", mdtypeversion="2.0",
othermdtype="AudioMD", section=None, stdout=False,
file_metadata_dict=None,
ref_file="create-audiomd-md-references.jsonl"):
"""
Write AudioMD metadata.
"""
super(AudiomdCreator, self).write(
mdtype=mdtype, mdtypeversion=mdtypeversion,
othermdtype=othermdtype, ref_file=ref_file
)
def create_audiomd_metadata(filename, filerel=None, workspace=None,
streams=None):
"""Creates and returns list of audioMD XML sections.
:filename: Audio file path
:filrel: Audio file path relative to base path
:workspace: Workspace path
:streams: Metadata dict of streams. Will be created if None.
:returns: Dict of AudioMD XML sections.
"""
if streams is None:
(streams, _, _) = scrape_file(filepath=filename,
filerel=filerel,
workspace=workspace,
skip_well_check=True)
fix_missing_metadata(streams, filename, ALLOW_UNAV, ALLOW_ZERO)
audiomd_dict = {}
for index, stream_md in six.iteritems(streams):
if stream_md['stream_type'] != 'audio':
continue
stream_md = _fix_data_rate(stream_md)
file_data_elem = _get_file_data(stream_md)
audio_info_elem = _get_audio_info(stream_md)
audiomd_elem = audiomd.create_audiomd(
file_data=file_data_elem,
audio_info=audio_info_elem
)
audiomd_dict[six.text_type(index)] = audiomd_elem
if not audiomd_dict:
print('The file has no audio streams. No AudioMD metadata created.')
return None
return audiomd_dict
def _fix_data_rate(stream_dict):
"""Changes the data_rate to an integer if it is of a
float type by rounding the number. The value is saved as
a string in the dictionary.
:stream_dict: Metadata dict of a stream
:returns: Fixed metadata dict
"""
for key in stream_dict:
if key == 'data_rate':
data_rate = stream_dict[key]
if data_rate:
try:
data_rate = float(data_rate)
stream_dict['data_rate'] = six.text_type(int(round(data_rate)))
except ValueError:
pass
return stream_dict
def _get_file_data(stream_dict):
"""Creates and returns the fileData XML element.
:stream_dict: Metadata dict of a stream
:returns: AudioMD fileData element
"""
params = {}
for key in FILEDATA_KEYS:
keyparts = key.split('_')
camel_key = keyparts[0] + ''.join(x.title() for x in keyparts[1:])
params[camel_key] = stream_dict[key]
compression = (stream_dict['codec_creator_app'],
stream_dict['codec_creator_app_version'],
stream_dict['codec_name'],
stream_
|
dict['codec_quality'])
params['compression'] = audiomd.amd_compression(*compression)
return audiomd.amd_file_data(params)
def _get_audio_info(stream_dict):
"""Creates and returns the audioInfo XML element.
:stream_dict: Metadata dict of a stream
:r
|
eturns: AudioMD audioInfo element
"""
return audiomd.amd_audio_info(
duration=stream_dict['duration'],
num_channels=stream_dict['num_channels'])
if __name__ == '__main__':
RETVAL = main() # pylint: disable=no-value-for-parameter
sys.exit(RETVAL)
|
jimklo/LearningRegistry
|
LR/lr/controllers/extract.py
|
Python
|
apache-2.0
| 10,398
| 0.009233
|
import logging
import StringIO
from iso8601 import parse_date
from datetime import datetime
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from lr.model.base_mode
|
l import appConfig
from
|
lr.lib.base import BaseController, render
import json
import ijson
import collections, sys
import math
from urllib2 import urlopen,HTTPError
import lr.lib.helpers as h
log = logging.getLogger(__name__)
import couchdb
class ExtractController(BaseController):
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('extract', 'extract')
def _getView(self,view='_all_docs',keys=[], includeDocs=True,startKey=None,endKey=None):
args = {'include_docs':includeDocs}
if len(keys) > 0:
args['keys'] = keys
args['reduce'] = False
args['stale'] = appConfig['couchdb.stale.flag']
if startKey is not None:
args['startkey'] = startKey
if endKey is not None:
args['endkey'] = endKey
db_url = '/'.join([appConfig['couchdb.url'],appConfig['couchdb.db.resourcedata']])
view = h.getResponse(database_url=db_url,view_name=view,**args)
return view
def _convertDateTime(self, dt):
try:
epoch = parse_date("1970-01-01T00:00:00Z")
if isinstance(dt, str) or isinstance(dt,unicode):
dt = parse_date(dt)
dt = dt - epoch
return int(math.floor(dt.total_seconds()))
except:
abort(500,"Invalid Date Format")
def _processRequest(self,startKey, endKey,urlBase, includeDocs=True):
def streamResult(resp):
CHUNK_SIZE=1024
data = resp.read(CHUNK_SIZE)
while len(data) > 0:
yield data
data = resp.read(CHUNK_SIZE)
try:
resp = self._getView(urlBase,startKey=startKey,endKey=endKey,includeDocs=includeDocs)
return streamResult(resp)
except HTTPError as ex:
abort(404, "not found")
def _orderParmaByView(self,params,view):
def makeEndKey(key):
from copy import deepcopy
newkey = deepcopy(key)
#if complex key
if isinstance(newkey, list):
# get last element in key
last = newkey[-1]
# if the last element is a list, just append an empty object to the last element's list
if isinstance(last, list):
last.append({})
# if the last element in an object, it becomes a bit tricky
# *** note that the key parameter MUST have been originally json parsed with the object_pairs_hook=collections.OrderedDict otherwise
# key order won't be guaranteed to be the same as what CouchDB will use!!!!
elif isinstance(last, dict):
lastkey = last.keys()[-1]
# since there's no easy way to increment a float accurately, instead append a new key that 'should' sort after the previous key.
if (isinstance(last[lastkey], float)):
last[lastkey+u'\ud7af'] = None
# if it's something else... this thing should recurse and keep going.
else:
last[lastkey] = makeEndKey(last[lastkey])
# if we got here, it's nothing really special, so we'll just append a {} to newkey
else:
newkey.append({})
# this if to handle the odd case where we have string as either the key or the value of an object in a complex key.
elif isinstance(newkey, (str, unicode, basestring)):
newkey=newkey+u'\ud7af'
# integer... so just increment 1.
elif isinstance(newkey, int):
newkey += 1
# if we skipped everything else - we don't have a strategy to deal with it... so don't
return newkey
def makeStartsWithEndKey(key):
from copy import deepcopy
newkey = deepcopy(key)
# this the base case for keys that are just strings, append the funky unicode char so that it grabs everything from
# "foo" to "foo\ud7af", which is technically the only way we know how to deal with starts with.
if isinstance(newkey, (str, unicode, basestring)):
newkey=newkey+u'\ud7af'
# if this is a complex key, then get the last element and recurse
elif isinstance(newkey, list):
newkey[-1] = makeStartsWithEndKey(newkey[-1])
# if the last element in an object, it becomes a bit tricky, because you must modify the last key, which implies
# order of keys was maintained when the value was originally parsed.
# *** IMPORTANT: The key parameter MUST have been originally json parsed with the object_pairs_hook=collections.OrderedDict otherwise
# *** key order won't be guaranteed to be the same as what CouchDB will use!!!!
elif isinstance(last, dict):
lastkey = last.keys()[-1]
#take the value from the last key and recurse.
last[lastkey] = makeEndKey(last[lastkey])
# if we skipped everything else - we don't have a strategy to deal with it as a Starts With key, so just return
else:
newkey = key
return newkey
def hasParamFor(funcName):
if funcName == 'ts' and ('from' in params or 'until' in params):
return True
elif funcName == 'discriminator' and ('discriminator' in params or 'discriminator-starts-with' in params):
return True
elif funcName == 'resource' and ('resource' in params or 'resource-starts-with' in params):
return True
else:
return False
def populateTs(startKey, endKey, pos, isLast):
if 'from' in params:
startKey.append(self._convertDateTime(params['from']))
elif pos == 1:
startKey.append(self._convertDateTime(datetime.min.isoformat() + "Z"))
if 'until' in params:
endKey.append(self._convertDateTime(params['until']))
elif pos == 1:
endKey.append(self._convertDateTime(datetime.utcnow().isoformat()+"Z"))
return startKey, endKey
def populateDiscriminator(startKey, endKey, pos, isLast):
if 'discriminator' in params:
# preserve key order!!!
try:
discriminator = json.loads(params['discriminator'], object_pairs_hook=collections.OrderedDict)
except:
log.error(sys.exc_info()[0])
discriminator = params['discriminator']
startKey.append(discriminator)
endKey.append(discriminator)
elif 'discriminator-starts-with' in params:
# preserve key order!!!
try:
discriminator = json.loads(params['discriminator-starts-with'], object_pairs_hook=collections.OrderedDict)
except:
log.error(sys.exc_info()[0])
discriminator = params['discriminator-starts-with']
startKey.append(discriminator)
endKey.append(discriminator)
endKey = makeStartsWithEndKey(endKey)
return startKey, endKey
# else:
# startKey.append('')
# endKey.append(u'\ud7af')
def populateResource(startKey, endKey, pos, isLast):
if 'resource' in params:
startKey.append(params['resource'])
endKey.append(params['resource'])
elif 'resource-starts-with' in params:
startKey.append(params['resource-starts-with'
|
Korotkin/text_processor_sample
|
src/plugins.py
|
Python
|
gpl-3.0
| 821
| 0.004872
|
# -*- coding: utf-8 -*-
__all__ = ("clear_tags", "get_text_from_html", "clear_text")
def clear_tags(obj):
"""
Remove not used blocks, such a
|
s table of contents, advertisements
"""
SEARCH_TAG = ["div", "table"]
for i in SEARCH_TAG:
res= obj.soup(i)
for row in res:
if row["align"]=="right":
row.clear()
res= obj.soup("title")
if len(res):
res[0].clear()
def join_rows(obj):
"""
Join formatted rows into paragraphs.
Need check splitted words.
Skipped in this solutio
|
n
"""
pass
def get_text_from_html(obj):
"""
Return text without html tags
"""
obj.text = obj.soup.get_text()
def clear_text(obj):
"""
Remove special/not used symbols
"""
obj.text = obj.text.replace("\t", "")
|
yangdongsheng/autotest
|
frontend/tko/models.py
|
Python
|
gpl-2.0
| 30,170
| 0.001624
|
from django.db import models as dbmodels, connection
from django.utils import datastructures
from autotest.frontend.afe import model_logic, readonly_connection
_quote_name = connection.ops.quote_name
class TempManager(model_logic.ExtendedManager):
_GROUP_COUNT_NAME = 'group_count'
def _get_key_unless_is_function(self, field):
if '(' in field:
return field
return self.get_key_on_this_table(field)
def _get_field_names(self, fields, extra_select_fields={}):
field_names = []
for field in fields:
if field in extra_select_fields:
field_names.append(extra_select_fields[field][0])
else:
field_names.append(self._get_key_unless_is_function(field))
return field_names
def _get_group_query_sql(self, query, group_by):
compiler = query.query.get_compiler(using=query.db)
sql, params = compiler.as_sql()
# insert GROUP BY clause into query
group_fields = self._get_field_names(group_by, query.query.extra_select)
group_by_clause = ' GROUP BY ' + ', '.join(group_fields)
group_by_position = sql.rfind('ORDER BY')
if group_by_position == -1:
group_by_position = len(sql)
sql = (sql[:group_by_position] +
group_by_clause + ' ' +
sql[group_by_position:])
return sql, params
def _get_column_names(self, cursor):
"""
Gets the column names from the cursor description. This method exists
so that it can be mocked in the unit test for sqlite3 compatibility.
"""
return [column_info[0] for column_info in cursor.description]
def execute_group_query(self, query, group_by):
"""
Performs the given query grouped by the fields in group_by with the
given query's extra select fields added. Returns a lis
|
t of dicts, where
each dict corresponds to single row and contains a key for each grouped
field as well as all of the extra select fields.
"""
sql, params = self._get_group_query_sql(query, group_by)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql, params)
field_names = self._get_column_names(cursor)
row_dicts = [dict(
|
zip(field_names, row)) for row in cursor.fetchall()]
return row_dicts
def get_count_sql(self, query):
"""
Get the SQL to properly select a per-group count of unique matches for
a grouped query. Returns a tuple (field alias, field SQL)
"""
if query.query.distinct:
pk_field = self.get_key_on_this_table()
count_sql = 'COUNT(DISTINCT %s)' % pk_field
else:
count_sql = 'COUNT(1)'
return self._GROUP_COUNT_NAME, count_sql
def _get_num_groups_sql(self, query, group_by):
group_fields = self._get_field_names(group_by, query.query.extra_select)
query = query.order_by() # this can mess up the query and isn't needed
compiler = query.query.get_compiler(using=query.db)
sql, params = compiler.as_sql()
from_ = sql[sql.find(' FROM'):]
return ('SELECT DISTINCT %s %s' % (','.join(group_fields),
from_),
params)
def _cursor_rowcount(self, cursor):
"""To be stubbed by tests"""
return cursor.rowcount
def get_num_groups(self, query, group_by):
"""
Returns the number of distinct groups for the given query grouped by the
fields in group_by.
"""
sql, params = self._get_num_groups_sql(query, group_by)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql, params)
return self._cursor_rowcount(cursor)
class Machine(dbmodels.Model):
'''
A machine used to run a test
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
machine_idx = dbmodels.AutoField(primary_key=True)
#: The name, such as a FQDN, of the machine that run the test. Must be
#: unique.
hostname = dbmodels.CharField(unique=True, max_length=255)
#: the machine group
machine_group = dbmodels.CharField(blank=True, max_length=240)
#: the machine owner
owner = dbmodels.CharField(blank=True, max_length=240)
class Meta:
db_table = 'tko_machines'
class Kernel(dbmodels.Model):
'''
The Linux Kernel used during a test
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
kernel_idx = dbmodels.AutoField(primary_key=True)
#: the kernel hash
kernel_hash = dbmodels.CharField(max_length=105, editable=False)
#: base
base = dbmodels.CharField(max_length=90)
#: printable
printable = dbmodels.CharField(max_length=300)
class Meta:
db_table = 'tko_kernels'
class Patch(dbmodels.Model):
'''
A Patch applied to a Linux Kernel source during the build process
'''
#: A reference to a :class:`Kernel`
kernel = dbmodels.ForeignKey(Kernel, db_column='kernel_idx')
#: A descriptive name for the patch
name = dbmodels.CharField(blank=True, max_length=240)
#: The URL where the patch was fetched from
url = dbmodels.CharField(blank=True, max_length=900)
#: hash
the_hash = dbmodels.CharField(blank=True, max_length=105, db_column='hash')
class Meta:
db_table = 'tko_patches'
class Status(dbmodels.Model):
'''
The possible results of a test
These objects are populated automatically from a
:ref:`fixture file <django:initial-data-via-fixtures>`
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
status_idx = dbmodels.AutoField(primary_key=True)
#: A short descriptive name for the status. This exact name is searched for
#: while the TKO parser is reading and parsing status files
word = dbmodels.CharField(max_length=30)
class Meta:
db_table = 'tko_status'
class Job(dbmodels.Model, model_logic.ModelExtensions):
"""
A test job, having one or many tests an their results
"""
job_idx = dbmodels.AutoField(primary_key=True)
tag = dbmodels.CharField(unique=True, max_length=100)
label = dbmodels.CharField(max_length=300)
username = dbmodels.CharField(max_length=240)
machine = dbmodels.ForeignKey(Machine, db_column='machine_idx')
queued_time = dbmodels.DateTimeField(null=True, blank=True)
started_time = dbmodels.DateTimeField(null=True, blank=True)
finished_time = dbmodels.DateTimeField(null=True, blank=True)
#: If this job was scheduled through the AFE application, this points
#: to the related :class:`autotest.frontend.afe.models.Job` object
afe_job_id = dbmodels.IntegerField(null=True, default=None)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_jobs'
class JobKeyval(dbmodels.Model):
job = dbmodels.ForeignKey(Job)
key = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
class Meta:
db_table = 'tko_job_keyvals'
class Test(dbmodels.Model, model_logic.ModelExtensions,
model_logic.ModelWithAttributes):
test_idx = dbmodels.AutoField(primary_key=True)
job = dbmodels.ForeignKey(Job, db_column='job_idx')
test = dbmodels.CharField(max_length=300)
subdir = dbmodels.CharField(blank=True, max_length=300)
kernel = dbmodels.ForeignKey(Kernel, db_column='kernel_idx')
status = dbmodels.ForeignKey(Status, db_column='status')
reason = dbmodels.CharField(blank=True, max_length=3072)
machine = dbmodels.ForeignKey(Machine, db_column='machine_idx')
finished_time = dbmodels.DateTimeField(null=True, blank=True)
started_time = dbmodels.DateTimeField(null=True, blank=True)
objects = model_
|
tvalacarta/tvalacarta
|
python/main-classic/channels/ecuadortv.py
|
Python
|
gpl-3.0
| 680
| 0.010294
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvalacarta - XBMC Plugin
# Canal para ecuador tv
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
import os
import sys
import urlparse,re
import urllib
import datetime
from core import logger
from core import scraper
|
tools
from core.item import Item
import youtube_channel
__channel__ = "ecuadortv"
DEBUG = True
YOUTUBE_CHANNEL_ID = "RTVEcuador"
def isGeneric():
return True
|
def mainlist(item):
logger.info("tvalacarta.channels.ecuadortv mainlist")
return youtube_channel.playlists(item,YOUTUBE_CHANNEL_ID)
|
zomux/deepy
|
examples/auto_encoders/recursive_auto_encoder.py
|
Python
|
mit
| 593
| 0.008432
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging, os
logging.basicConfig(level=logging.INFO)
from deepy.networks import RecursiveAutoEncoder
from deepy.trainers import SGDTrainer, LearningRateAnnealer
from util import get_data, VECTOR_SIZE
model_path = os.path.join(os.path.dirname(__file__), "models", "rae1.gz")
if __name__ == '__main__':
model = RecursiveAutoEncoder(input_dim=VECTOR_SIZE, rep_dim=10)
trainer = SGDTrai
|
ner(model)
annealer = LearningRateAnnealer()
trainer.run(get_dat
|
a(), epoch_controllers=[annealer])
model.save_params(model_path)
|
CompassionCH/compassion-modules
|
crm_compassion/migrations/12.0.1.0.0/pre-migration.py
|
Python
|
agpl-3.0
| 409
| 0
|
def migrate(cr, version):
if not version:
return
# Replace ids of better_zip by ids of city_zip
cr.execute("""
ALTER TABLE crm_event_compassion
DROP CONSTRAINT crm_event_compassion_zip_id_fkey;
UPDATE crm_event_compassion e
SET
|
zip_id = (
SELECT id FROM res_city_zip
WHERE openupgrade_legacy_12_0_better_zip_id = e.zip_id)
""")
|
|
openstack/nomad
|
cyborg/tests/unit/api/controllers/v1/test_fpga_program.py
|
Python
|
apache-2.0
| 2,179
| 0
|
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six.moves import http_client
from cyborg.api.controllers.v1.deployables import Deployable
from cyborg.tests.unit.api.controllers.v1 import base as v1_test
from cyborg.tests.unit import fake_deployable
from cyborg.agent.rpcapi import AgentAPI
class TestFPGAProgramController(v1_test.APITestV1):
def setUp(self):
super(TestFPGAProgramController, self).setUp()
self.headers = self.gen_headers(self.context)
self.deployable_uuids = ['0acbf8d6-e02a-4394-aae3-57557d209498']
@mock.patch('cyborg.objects.Deployable.get')
@mock.patch('cyborg.agent.rpcapi.AgentAPI.p
|
rogram_fpga_with_bitstream')
def test_program(self, mock_program, mock_get_dep):
self.headers['X-Roles'] = 'admin'
self.headers['Content-Type'] = 'application/json'
dep_uuid = self.deployable_uuids[0]
fake_dep = fake_deployable.fake_deployable_obj(self.context,
uuid=dep_uuid)
mock_get_dep.return_v
|
alue = fake_dep
mock_program.return_value = None
body = [{"image_uuid": "9a17439a-85d0-4c53-a3d3-0f68a2eac896"}]
response = self.\
patch_json('/accelerators/deployables/%s/program' % dep_uuid,
[{'path': '/program', 'value': body,
'op': 'replace'}],
headers=self.headers)
self.assertEqual(http_client.OK, response.status_code)
data = response.json_body
self.assertEqual(dep_uuid, data['uuid'])
|
micha-shepher/oervoer-wizard
|
oervoer/order.py
|
Python
|
gpl-3.0
| 2,431
| 0.008638
|
'''
Created on Oct 11, 2014
@author: mshepher
'''
from globals import Globals
class Order(object):
EIGENAAR = 0
DIER = 1
GESLACHT = 2
GECASTREERD = 3
AKTIEF = 4
OVERGANGS = 5
GEWICHT = 6 #numerical
PAKKETKG = 7 #float
SOORT = 8
PUP = 9
RAS = 10
def __init__(self,order):
'''order = line from csv file, unparsed'''
rec = order.strip().split(',')
self.base = rec[:self.RAS+1]
self.owner, self.animal = rec[:self.GESLACHT]
self.weight = float(rec[self.GEWICHT])
self.package = float(rec[self.PAKKETKG])
self.kind = rec[self.SOORT].upper()
self.ras = rec[self.RAS].upper()
rest = rec[self.RAS+1:]
if '|' in rest:
splitter = rest.index('|')
self.donts = [i.upper() for i in rest[:splitter]]
self.prefers = [i.upper() for i in rest[splitter+1:]]
else:
self.donts = [i.upper() for i in rest]
self.prefers = []
self.factor = 1.0
self.result = None
self.portie = 'beide'
def get_prefers(self):
return self.prefers
def set_prefers(self, value):
self.prefers = value
def get_base(self):
return ','.join(self.base)
def is_allergic(self,stuff):
'''true if animal is allergic to stuff'''
return stuff in self.donts
def get_donts(self):
return self.donts
def set_donts(self, donts):
self.donts = donts
def get_days(self):
return round(self.package / (self.weight * Globals.FACTOR[self.ras]))
def get_meal_size(self):
return self.weight * self.factor * Globals.FACTOR[self.ras] / 2
def get_weight(self):
return s
|
elf.wei
|
ght
def get_package(self):
return self.package
def get_kind(self):
return self.kind
def get_owner(self):
return self.owner
def get_animal(self):
return self.animal
def get_ras(self):
return self.ras
def set_result(self, result):
self.result = result
def get_result(self):
return self.result
def get_factor(self):
return self.factor
def set_factor(self, factor):
self.factor = factor
def get_portie(self):
return self.portie
def set_portie(self, portie):
self.portie = portie
|
erkarl/browl-api
|
apps/posts/models.py
|
Python
|
mit
| 211
| 0
|
from django.db import models
from django.contrib.auth.models import User
clas
|
s Post(models.Model):
title = models.CharField(max_lengt
|
h=255)
body = models.TextField()
user = models.ForeignKey(User)
|
XianliangJ/collections
|
ShrewAttack/plotter.py
|
Python
|
gpl-3.0
| 1,097
| 0.012762
|
import csv
from util.helper import *
import util.plot_defaults
from matplotlib.ticker import MaxNLocator
from pylab import figure
parser = argparse.ArgumentParser()
parser.add_argument('--file', '-f',
help="data file directory",
required=True,
action="store",
dest="file")
parser.add_argument('-o',
help="Output directory",
required=Tr
|
ue,
|
action="store",
dest="dir")
args = parser.parse_args()
to_plot = []
cong = ['reno', 'cubic', 'vegas']
bursts = ['0.03', '0.05', '0.07', '0.09']
graphfiles = []
for burst in bursts:
for tcptype in cong:
data = read_list(args.file + '/' + tcptype + '-' + burst +'-raw_data.txt')
xs = col(0, data)
ys = col(1, data)
plt.plot(xs, ys, label=tcptype)
plt.title('Shrew-attack TCP throughput. Burst = ' + burst)
plt.legend(loc='upper left')
plt.xlabel('seconds')
plt.ylabel("% thoroughput")
plt.grid(True)
plt.savefig("{0}/{1}-result.png".format(args.dir, burst))
plt.close()
|
avocado-framework/avocado-vt
|
virttest/unittests/test_utils_zchannels.py
|
Python
|
gpl-2.0
| 3,733
| 0.00375
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
import unittest
try:
from unittest import mock
except ImportError:
import mock
import virttest
from virttest.utils_zchannels import ChannelPaths, SubchannelPaths
OUT_OK = ["Device Subchan. DevType CU Type Use PIM PAM POM CHPIDs ",
"----------------------------------------------------------------------",
"0.0.0600 0.0.0000 1732/01 1731/01 yes 80 80 ff 17000000 00000000",
"0.0.0601 0.0.0001 1732/01 1731/01 yes 80 80 ff 17000000 00000000",
"0.0.0602 0.0.0002 1732/01 1731/01 yes 80 80 ff 17000000 00000000",
"0.0.540c 0.0.24ac 3390/0c 3990/e9 yes f0 f0 ff 01021112 00000000",
"0.0.540d 0.0.24ad 3390/0c 3990/e9 yes f0 f0 ff 01021112 00000000",
"none 0.0.26aa f0 f0 ff 11122122 00000000",
"none 0.0.26ab f0 f0 ff 11122122 00000000",
"0.0.570c 0.0.27ac 3390/0c 3990/e9 yes f0 f0 ff 12212231 00000000"]
class TestSubchannelPaths(unittest.TestCase):
def test_get_info(self):
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(OUT_OK)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
self.assertEqual(8, len(subchannel_paths.devices))
def test_get_first_unused_and_safely_removable(self):
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(OUT_OK)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
device = subchannel_paths.get_first_unused_and_safely_removable()
self.assertIsNotNone(device)
self.assertEqual("0.0.26aa", device[1])
def test_get_first_unused_and_safely_removable_not_safe(self):
not_safe = OUT_OK.copy()
not_safe[6] = not_safe[6].replace("01021112", "11122122")
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(not_safe)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
device = subchannel_paths.get_first_unused_and_safely_removable()
self.assertIsNone(device)
def test_get_first_unused_an
|
d_safely_removable_not_first(self):
not_safe = OUT_OK.copy()
not_safe[7] = not_safe[7].replace("11122122", "01021112")
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(not_safe)))
subchannel_paths = SubchannelPaths()
subchannel_path
|
s.get_info()
device = subchannel_paths.get_first_unused_and_safely_removable()
self.assertIsNotNone(device)
self.assertEqual("0.0.26ab", device[1])
class TestChannelPaths(unittest.TestCase):
def test__split(self):
chpids = "12345678"
ids = ChannelPaths._split(chpids)
self.assertEqual(4, len(ids))
self.assertEqual("0.12", ids[0])
self.assertEqual("0.78", ids[3])
if __name__ == '__main__':
unittest.main()
|
square/pants
|
tests/python/pants_test/tasks/test_eclipse_integration.py
|
Python
|
apache-2.0
| 3,401
| 0.010879
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class EclipseIntegrationTest(PantsRunIntegrationTest):
def _eclipse_test(self, specs, project_dir=os.path.join('.pants.d', 'tmp', 'test-eclipse'),
project_name='project'):
"""Helper method that tests eclipse generation on the input spec list."""
if not os.path.exists(project_dir):
os.makedirs(project_dir)
with temporary_dir(root_dir=project_dir) as path:
pants_run = self.run_pants(['goal', 'eclipse',] + specs
+ ['--no-pantsrc', '--eclipse-project-dir={dir}'.format(dir=path)])
self.assertEquals(pants_run.returncode, self.PANTS_SUCCESS_CODE,
"goal eclipse expected success, got {0}\n"
"got stderr:\n{1}\n"
"got stdout:\n{2}\n".format(pants_run.returncode,
pants_run.stderr_data,
pants_run.stdout_data))
expected_files = ('.classpath', '.project',)
workdir = os.path.join(path, project_name)
self.assertTrue(os.path.exists(workdir),
'Failed to find project_dir at {dir}.'.format(dir=workdir))
self.assertTrue(all(os.path.exists(os.path.join(workdir, name))
for name in expected_files))
# return contents of .classpath so we can verify it
with open(os.path.join(workdir, '.classpath')) as classpath_f:
classpath = classpath_f.read()
# should be at least one input; if not we may have the wrong target path
self.assertIn('<classpathentry kind="src"', classpath)
return classpath
# Test Eclipse generation on example targ
|
ets; ideally should test that the build "works"
def test_eclipse_on_protobuf(self):
self._eclipse_test(['exam
|
ples/src/java/com/pants/examples/protobuf::'])
def test_eclipse_on_jaxb(self):
self._eclipse_test(['examples/src/java/com/pants/examples/jaxb/main'])
def test_eclipse_on_unicode(self):
self._eclipse_test(['testprojects/src/java/com/pants/testproject/unicode::'])
def test_eclipse_on_hello(self):
self._eclipse_test(['examples/src/java/com/pants/examples/hello::'])
def test_eclipse_on_annotations(self):
self._eclipse_test(['examples/src/java/com/pants/examples/annotation::'])
def test_eclipse_on_all_examples(self):
self._eclipse_test(['examples/src/java/com/pants/examples::'])
def test_eclipse_on_java_sources(self):
classpath = self._eclipse_test(['testprojects/src/scala/com/pants/testproject/javasources::'])
self.assertIn('path="testprojects.src.java"', classpath)
def test_eclipse_on_thriftdeptest(self):
self._eclipse_test(['testprojects/src/java/com/pants/testproject/thriftdeptest::'])
def test_eclipse_on_scaladepsonboth(self):
classpath = self._eclipse_test(['testprojects/src/scala/com/pants/testproject/scaladepsonboth::'])
# Previously Java dependencies didn't get included
self.assertIn('path="testprojects.src.java"', classpath)
|
Qwlouse/Findeco
|
node_storage/validation.py
|
Python
|
gpl-3.0
| 1,992
| 0.001011
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import re
import unicodedata
h1_start = re.compile(r"^\s*=(?P<title>[^=]+)=*[ \t]*")
valid_title = re.compile(r"[^=]+")
general_heading = re.compile(r"^\s*(={2,6}(?P<title>" + valid_title.pattern +
")=*)\s*$", flags=re.MULTILINE)
invalid_symbols = re.compile(r"[^\w\-_\s]+")
def strip_accents(s):
return ''.join(
(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(
c) != 'Mn'))
REPLACEMENTS = {
ord('ä'): 'ae',
ord('ö'): 'oe',
ord('ü'): 'ue',
ord('ß'): 'ss',
ord('Ä'): 'Ae',
ord('Ö'): 'Oe',
ord('Ü'): 'Ue',
ord('ẞ'): 'SS'
}
def substitute_umlauts(s):
return s.translate(REPLACEMENTS)
def remove_unallowed_chars(s):
s = invalid_symbols.sub('', s)
return s
def remove_and_compress_whitespaces(s):
return '_'.join(s.split()).strip('_')
def turn_into_valid_short_title(title, short_title_set=(), max_length=20):
st = substitute_umlauts(title)
st = strip_accents(st)
st = remove_unallowed_chars(st)
st = remove_and_compress_whitespaces(st)
st = st.lstrip('1234567890-_')
st = st[:min(len(st), max_length)]
if not st:
st = 'sub'
if st not in short_title_set:
return st
else:
i = 0
while True:
i += 1
suffix = str(i)
new_st = st[:min(max_length - len(suffix), len(st))] + suffix
if new_st
|
not in short_title_set:
return new_st
def get_heading_matcher(l
|
evel=0):
if 0 < level < 7:
s = "%d" % level
elif level == 0:
s = "1, 6"
else:
raise ValueError(
"level must be between 1 and 6 or 0, but was %d." % level)
pattern = r"^\s*={%s}(?P<title>[^=§]+)" \
r"(?:§\s*(?P<short_title>[^=§\s][^=§]*))?=*\s*$"
return re.compile(pattern % s, flags=re.MULTILINE)
|
SkyLined/headsup
|
decode/GIF_IMAGE.py
|
Python
|
apache-2.0
| 2,315
| 0.019438
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Structure import Structure;
class GIF_IMA
|
GE(Structure):
type_name = 'GIF_IMAGE';
def __init__(self, stream, offset, max_size, parent, name):
import C;
from GIF_BLOCK import GIF_BLOCK;
from GIF_COLORTABLE import GIF_COLORTABLE;
from GIF_IMAGE_DESCRIPTOR import GIF_IMAGE_DESCRIPTOR;
from LZW_compressed_data import LZW_compressed_data;
|
Structure.__init__(self, stream, offset, max_size, parent, name);
self._descriptor = self.Member(GIF_IMAGE_DESCRIPTOR, 'descriptor');
flags = self._descriptor._Flags;
self._has_local_color_table = flags._LocalColorTable.value == 1;
if self._has_local_color_table:
self._local_color_table_entries = \
2 ** (flags._SizeLocalColorTable.value + 1);
self._local_color_table_sorted = flags._Sort.value == 1;
self._local_color_table = self.Member(GIF_COLORTABLE, \
'local_color_table', self._local_color_table_entries, \
self._local_color_table_sorted);
else:
self._local_color_table = None;
self._lzw_minimum_code_size = self.Member(C.BYTE, 'LZW_minimum_code_size');
if self._lzw_minimum_code_size.value == 0:
self._lzw_minimum_code_size.warnings.append('expected value > 0');
self._compressed_pixel_data_container = self.Member(GIF_BLOCK, 'pixel_data');
self._pixel_data_container = \
self._compressed_pixel_data_container.ContainMember( \
LZW_compressed_data, 'pixel_data', \
self._lzw_minimum_code_size.value);
self._pixel_data = self._pixel_data_container.ContainMember( \
C.STRING, 'pixel_data', \
self._descriptor._Width.value * self._descriptor._Height.value);
|
andrewgolman/Learning_Cards
|
bot/db/queries.py
|
Python
|
gpl-3.0
| 7,726
| 0.001941
|
import psycopg2
from db.enums import *
base = psycopg2.connect("dbname='cardkeepersample' user='andrew' host='localhost' password='1234'")
cursor = base.cursor()
# Wrapped queries in alphabetic order
def active_packs(user_id, start=0, count=10):
query = """SELECT packs.pack_id, packs.name FROM user_packs, packs WHERE packs.pack_id = user_packs.pack_id
AND user_packs.status = %s AND user_id = %s ORDER BY pack_id
OFFSET %s LIMIT %s;"""
cursor.execute(query, (CardStatusType.ACTIVE.value, user_id, start, count))
return cursor.fetchall()
def add_pack(user_id, pack_id):
query = """INSERT INTO user_packs (pack_id, user_id, status) VALUES (%s, %s, 'Active');"""
cursor.execute(query, (pack_id, user_id))
query = """SELECT card_id FROM cards WHERE cards.pack_id = %s"""
cursor.execute(query, (pack_id,))
cards = cursor.fetchall()
for i in cards:
query = """INSERT INTO user_cards (user_id, card_id, times_reviewed, correct_answers, status) VALUES (%s, %s, 0, 0, 'Active');"""
cursor.execute(query, (user_id, i[0]))
base.commit()
def add_user(user):
query = """INSERT INTO users (user_id, name, general_goal, weekly_goal, notifications_learn, notifications_stats, joined)
VALUES (%s, %s, %s, %s, %s, %s, current_date);"""
cursor.execute(query, tuple(user))
base.commit()
def available_packs(user_id):
query = """SELECT packs.pack_id, packs.name FROM packs
WHERE packs.privacy = 'public' LIMIT 105;"""
cursor.execute(query)
return cursor.fetchall()
def available_groups(user_id, rights=RightsType.USER, include_higher=False):
query = """SELECT groups.group_id, groups.name FROM groups, user_groups
WHERE groups.group_id = user_groups.group_id
AND user_groups.user_id = %s
AND user_groups.rights """ + ("<" if include_higher else "") + "= %s;"""
cursor.execute(query, (user_id, rights))
return cursor.fetchall()
def delete_pack(pack_id):
owner_id = get_pack(pack_id)['owner_id']
cursor.execute('''
DELETE FROM user_cards
USING cards
WHERE
user_cards.card_id = cards.card_id AND
cards.pack_id = %s;
''', (pack_id,))
cursor.execute(
'DELETE FROM cards WHERE pack_id = %s;',
(pack_id,)
)
cursor.execute(
'DELETE FROM user_packs WHERE pack_id = %s;',
(pack_id,)
)
cursor.execute(
'DELETE FROM packs WHERE pack_id = %s;',
(pack_id,)
)
base.commit()
def get_all_cards_in_pack(pack_id):
cursor.execute('''
SELECT card_id, front, back, comment, type
FROM cards
WHERE pack_id = %s;
''', (pack_id,))
return [{'card_id': card_id, 'front': front, 'back': back,
'comment': comment, 'type': tp}
for card_id, front, back, comment, tp
in cursor.fetchall()]
def get_pack(pack_id, user_id=None):
cursor.execute(
'SELECT name, owner_id, privacy FROM packs WHERE pack_id = %s;',
(pack_id,)
)
name, owner_id, privacy = cursor.fetchone()
status = None
if user_id is not None:
cursor.execute('''
SELECT status FROM user_packs
WHERE user_id = %s AND pack_id = %s;
''', (user_id, pack_id))
status = cursor.fetchone()[0]
return {
'pack_id': pack_id,
'name': name,
'owner_id': owner_id,
'privacy': privacy,
'status': status
}
def if_added(user_id, pack_id):
query = "SELECT * FROM user_packs WHERE user_id = %s AND pack_id = %s;"
cursor.execute(query, (user_id, pack_id))
return list(cursor.fetchall())
# TODO: Take permissions lists into account
def has_pack_read_access(pack_id, user_id):
p
|
ack_info = get_pack(pack_id)
return user_id == pack_info['owner_id'] or pack_info
|
['privacy'] == 'public'
def if_registered(user_id):
query = "SELECT * FROM users WHERE users.user_id = %s;"
cursor.execute(query, (user_id,))
return True if len(cursor.fetchall()) else False
def cards_for_learning(user_id):
query = """SELECT cards.front, cards.back, cards.comment FROM user_cards, cards
WHERE user_cards.card_id = cards.card_id AND
user_id = %s AND cards.type = %s"""
cursor.execute(query, (user_id, CardType.SHORT))
return cursor.fetchall()
def new_card(front, back):
query = "INSERT INTO cards (front, back) VALUES (%s, %s);"
cursor.execute(query, (front, back))
base.commit()
def new_group(name, owner, privacy="public"):
query = "INSERT INTO groups (name, privacy, owner_id) VALUES (%s, %s, %s);"
cursor.execute(query, (name, privacy, owner))
base.commit()
def new_pack(name, owner, privacy=PrivacyType.PUBLIC, status=CardStatusType.ACTIVE, cards=[]):
if isinstance(privacy, PrivacyType):
privacy = privacy.value
if isinstance(status, CardStatusType):
status = status.value
query = "INSERT INTO packs (name, owner_id, privacy) VALUES (%s, %s, %s);"
cursor.execute(query, (name, owner, privacy))
query = "SELECT pack_id FROM packs WHERE name = %s AND owner_id = %s;"
cursor.execute(query, (name, owner))
pack_id = cursor.fetchone()[0]
query = "INSERT INTO user_packs (user_id, pack_id, status) VALUES (%s, %s, %s);"
cursor.execute(query, (owner, pack_id, status))
insert_query = "INSERT INTO cards (pack_id, front, back, comment, type) VALUES (%s, %s, %s, %s, %s) RETURNING card_id;"
insert2_query = "INSERT INTO user_cards (user_id, card_id, times_reviewed, correct_answers, status)" \
"VALUES (%s, %s, 0, 0, 'Active');"
for card in cards:
front = card['front']
back = card['back']
comment = card['comment']
cursor.execute(insert_query, (pack_id, front, back, comment, CardType.SHORT.value))
card_id = cursor.fetchone()[0]
cursor.execute(insert2_query, (owner, card_id))
base.commit()
return pack_id
def select_cards(user_id, pack_id):
print(user_id, pack_id)
query = """SELECT cards.card_id, cards.front, cards.back, cards.comment
FROM cards, user_cards
WHERE cards.card_id = user_cards.card_id
AND user_cards.status = %s
AND cards.pack_id = %s
AND user_cards.user_id = %s"""
cursor.execute(query, (CardStatusType.ACTIVE.value, pack_id, user_id))
return cursor.fetchall()
def update_card_data(user_id, card_id, answer):
query = """UPDATE user_cards SET times_reviewed = times_reviewed+1, correct_answers = correct_answers+%s
WHERE user_id = %s AND card_id = %s"""
cursor.execute(query, (answer, user_id, card_id))
base.commit()
def update_card_status(user_id, card_id, status):
query = """UPDATE user_cards SET status = %s
WHERE user_id = %s AND card_id = %s"""
cursor.execute(query, (status, user_id, card_id))
base.commit()
def update_pack_name(pack_id, new_name):
query = 'UPDATE packs SET name = %s WHERE pack_id = %s;'
cursor.execute(query, (new_name, pack_id))
base.commit()
def update_pack_privacy(pack_id, new_privacy):
if isinstance(new_privacy, PrivacyType):
new_privacy = new_privacy.value
query = 'UPDATE packs SET privacy = %s WHERE pack_id = %s;'
cursor.execute(query, (new_privacy, pack_id))
base.commit()
def update_pack_status(user_id, pack_id, status):
query = """UPDATE user_cards SET status = %s
WHERE user_id = %s AND card_id = %s"""
cursor.execute(query, (status, user_id, pack_id))
base.commit()
|
updatengine/updatengine-server
|
inventory/migrations/0013_auto__add_field_machine_domain__add_field_machine_uuid__add_field_mach.py
|
Python
|
gpl-2.0
| 11,272
| 0.006831
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'machine.domain'
db.add_column('inventory_machine', 'domain',
self.gf('django.db.models.fields.CharField')(default='undefined', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'machine.uuid'
db.add_column('inventory_machine', 'uuid',
self.gf('django.db.models.fields.CharField')(default='undefined', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'machine.language'
db.add_column('inventory_machine', 'language',
self.gf('django.db.models.fields.CharField')(default='undefined', max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'machine.domain'
db.delete_column('inventory_machine', 'domain')
# Deleting field 'machine.uuid'
db.delete_column('inventory_machine', 'uuid')
# Deleting field 'machine.language'
db.delete_column('inventory_machine', 'language')
models = {
'deploy.package': {
'Meta': {'object_name': 'package'},
'command': ('django.db.models.fields.TextField', [],
|
{'max_length': '1000'}),
'conditions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deploy.packagecondition']", 'null': 'True', 'blank': '
|
True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'filename': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignoreperiod': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'packagesum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'})
},
'deploy.packagecondition': {
'Meta': {'object_name': 'packagecondition'},
'depends': ('django.db.models.fields.CharField', [], {'default': "'installed'", 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'softwarename': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'softwareversion': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'deploy.packageprofile': {
'Meta': {'object_name': 'packageprofile'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'packages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deploy.package']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['deploy.packageprofile']"})
},
'deploy.timeprofile': {
'Meta': {'object_name': 'timeprofile'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'start_time': ('django.db.models.fields.TimeField', [], {})
},
'inventory.entity': {
'Meta': {'object_name': 'entity'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'force_packageprofile': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'force_timeprofile': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'old_packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_packageprofile'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['deploy.packageprofile']"}),
'old_timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_timeprofile'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['deploy.timeprofile']"}),
'packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.packageprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['inventory.entity']"}),
'timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.timeprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'inventory.machine': {
'Meta': {'object_name': 'machine'},
'domain': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.entity']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'lastsave': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'manualy_created': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'netsum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'ossum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.packageprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'packages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deploy.package']", 'null': 'True', 'blank': 'True'}),
'product': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'softsum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.timeprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'typemac
|
malikshahzad228/widget-jack
|
widgets/admin.py
|
Python
|
mit
| 400
| 0
|
from django.contrib import admin
from .models import BackgroundImages, Widget
class WidgetAdmin(admin.ModelAdmin):
list_display = ('
|
name', 'link',
|
'is_featured')
ordering = ('-id',)
class BackgroundAdmin(admin.ModelAdmin):
list_display = ('name', 'created_at')
ordering = ('-id',)
admin.site.register(Widget, WidgetAdmin)
admin.site.register(BackgroundImages, BackgroundAdmin)
|
hammerlab/topiary
|
topiary/cli/errors.py
|
Python
|
apache-2.0
| 1,087
| 0.00092
|
# Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "
|
License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed t
|
o in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Commandline arguments related to error handling
"""
from __future__ import print_function, division, absolute_import
def add_error_args(arg_parser):
error_group = arg_parser.add_argument_group(
title="Errors",
description="Options for error handling")
error_group.add_argument(
"--skip-variant-errors",
default=False,
action="store_true",
help="Skip variants which cause runtime errors of any kind")
return error_group
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.