repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
giantas/elibrary
|
repo/urls.py
|
Python
|
mit
| 535
| 0.018692
|
from django.conf.urls import url
from . import views
app_name = 'repo'
urlpatterns =
|
[
url(r'^$', views.home, name='home'),
url(r'^home/$', views.hom
|
e, name='home'),
url(r'^library/$', views.library, name='library'),
url(r'^login/$', views.login, name='login'),
url(r'^register/$', views.register, name='register'),
url(r'^results/?P<form>[A-Za-z]+/$', views.results, name='results'),
url(r'^(?P<sn>[-\/\d\w]{5,100})/borrow/$', views.borrow, name='borrow'),
#url(r'^(?P<sn>[.\D\d.]+)/borrow/$', views.borrow, name='borrow'),
]
|
kylon/pacman-fakeroot
|
test/pacman/tests/upgrade042.py
|
Python
|
gpl-2.0
| 725
| 0.002759
|
self.description = "Backup file relocation"
lp1 = pmpkg("bash")
lp1.files = ["etc/profile*"]
lp1.backup = ["etc/profile"]
self.addpkg2db("local", lp1)
p1 = pmpkg("bash", "1.0-2")
self.addpkg(p1)
lp2 = pmpkg("filesystem")
self.addpkg2db("local", lp2)
p2 = pmpkg("filesystem", "1.0-2")
p2.files = ["etc/profile**"]
p2.backup = ["etc/profile"]
p2.depends = [ "bash" ]
self.addpkg(p2)
self.args = "-U %s" % " ".join([p.filena
|
me() for p in (p1, p2)])
self.filesystem = ["etc/profile"]
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=bash|1.0-2")
self.addrule("PKG_VERSION=filesystem|1.0-2")
self.addrule("!FILE_PACSAVE=etc/profile")
self.a
|
ddrule("FILE_PACNEW=etc/profile")
self.addrule("FILE_EXIST=etc/profile")
|
ingadhoc/account-payment
|
account_payment_group/hooks.py
|
Python
|
agpl-3.0
| 1,366
| 0.000732
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
from odoo import api, SUPERUSER_ID
_logger = logging.getLogger(__name__)
def post_init_hook(cr, registry):
"""
Create a payment group for every existint payment
"""
env = api.Environment(cr, SUPERUSER_ID, {})
# payments = env['account.payment'].search(
# [('payment_type', '!=', 'transfer')])
# on v10, on reconciling from statements, if not partner is choosen, then
# a payment is created with no partner. We still make partners mandatory
# on payment groups. So, we dont create payment groups for payments
# without partner_id
payments = env['account.payment'].search(
[('partner_id', '!=', False)])
for payment in payments:
_logger.info('creating payment group for payment %s' % payment.id)
_s
|
tate = payment.state in ['sent', 'reconciled'] and 'posted' or payment.state
_state = _state if _state != 'cancelled' else 'cancel'
env['account.payment.group'].create({
'company_id': payment.company_id.id,
'partner_type': payment.partner_type,
'partner_id': payment.partner_id.id,
'payment_date': payment.date,
'communication': payment.ref,
'payment_ids': [(4, payment.id, False)]
|
,
'state': _state,
})
|
showell/zulip
|
analytics/lib/counts.py
|
Python
|
apache-2.0
| 29,578
| 0.003719
|
import logging
import time
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
from django.conf import settings
from django.db import connection
from django.db.models import F
from psycopg2.sql import SQL, Composable, Identifier, Literal
from analytics.models import (
BaseCount,
FillState,
InstallationCount,
RealmCount,
StreamCount,
UserCount,
installation_epoch,
last_successful_fill,
)
from zerver.lib.logging_util import log_to_file
from zerver.lib.timestamp import ceiling_to_day, ceiling_to_hour, floor_to_hour, verify_UTC
from zerver.models import (
Message,
Realm,
RealmAuditLog,
Stream,
UserActivityInterval,
UserProfile,
models,
)
## Logging setup ##
logger = logging.getLogger('zulip.management')
log_to_file(logger, settings.ANALYTICS_LOG_PATH)
# You can't subtract timedelta.max from a datetime, so use this instead
TIMEDELTA_MAX = timedelta(days=365*1000)
## Class definitions ##
class CountStat:
HOUR = 'hour'
DAY = 'day'
FREQUENCIES = frozenset([HOUR, DAY])
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta]=None) -> None:
self.property = property
self.data_collector = data_collector
# might have to do something different for bitfields
if frequency not in self.FREQUENCIES:
raise AssertionError(f"Unknown frequency: {frequency}")
self.frequency = frequency
if interval is not None:
self.interval = interval
elif frequency == CountStat.HOUR:
self.interval = timedelta(hours=1)
else: # frequency == CountStat.DAY
self.interval = timedelta(days=1)
def __str__(self) -> str:
return f"<CountStat: {self.property}>"
class LoggingCountStat(CountStat):
def __init__(self, property: str, output_table: Type[BaseCount], frequency: str) -> None:
CountStat.__init__(self, property, DataCollector(output_table, None), frequency)
class DependentCountStat(CountStat):
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta] = None, dependencies: Sequence[str] = []) -> None:
CountStat.__init__(self, property, data_collector, frequency, interval=interval)
self.dependencies = dependencies
class DataCollector:
def __init__(self, output_table: Type[BaseCount],
pull_function: Optional[Callable[[str, datetime, datetime, Optional[Realm]], int]]) -> None:
self.output_table = output_table
self.pull_function = pull_function
## CountStat-level operations ##
def process_count_stat(stat: CountStat, fill_to_time: datetime,
realm: Optional[Realm]=None) -> None:
# TODO: The realm argument is not yet supported, in that we don't
# have a solution for how to update FillState if it is passed. It
# exists solely as partial plumbing for when we do fully implement
# doing single-realm analytics runs for use cases like data import.
#
# Also, note that for the realm argument to be properly supported,
# the CountStat object passed in needs to have come from
# E.g. get_count_stats(realm), i.e. have the realm_id already
# entered into the SQL query defined by the CountState object.
if stat.frequency == CountStat.HOUR:
time_increment = timedelta(hours=1)
elif stat.frequ
|
ency == CountStat.DAY:
time_increment = timedelta(days=1)
else:
raise AssertionError(f"Unknown frequency: {stat.frequency}")
verify_UTC(fill_to_time)
if floor_to_hour(fill_to_time) != fill_to_time:
raise ValueError(f"fill_to_time must be on an hour boundary: {fill_to_time}")
fill_state = FillState.objects.filter(property=stat.property).first()
|
if fill_state is None:
currently_filled = installation_epoch()
fill_state = FillState.objects.create(property=stat.property,
end_time=currently_filled,
state=FillState.DONE)
logger.info("INITIALIZED %s %s", stat.property, currently_filled)
elif fill_state.state == FillState.STARTED:
logger.info("UNDO START %s %s", stat.property, fill_state.end_time)
do_delete_counts_at_hour(stat, fill_state.end_time)
currently_filled = fill_state.end_time - time_increment
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
logger.info("UNDO DONE %s", stat.property)
elif fill_state.state == FillState.DONE:
currently_filled = fill_state.end_time
else:
raise AssertionError(f"Unknown value for FillState.state: {fill_state.state}.")
if isinstance(stat, DependentCountStat):
for dependency in stat.dependencies:
dependency_fill_time = last_successful_fill(dependency)
if dependency_fill_time is None:
logger.warning("DependentCountStat %s run before dependency %s.",
stat.property, dependency)
return
fill_to_time = min(fill_to_time, dependency_fill_time)
currently_filled = currently_filled + time_increment
while currently_filled <= fill_to_time:
logger.info("START %s %s", stat.property, currently_filled)
start = time.time()
do_update_fill_state(fill_state, currently_filled, FillState.STARTED)
do_fill_count_stat_at_hour(stat, currently_filled, realm)
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
end = time.time()
currently_filled = currently_filled + time_increment
logger.info("DONE %s (%dms)", stat.property, (end-start)*1000)
def do_update_fill_state(fill_state: FillState, end_time: datetime, state: int) -> None:
fill_state.end_time = end_time
fill_state.state = state
fill_state.save()
# We assume end_time is valid (e.g. is on a day or hour boundary as appropriate)
# and is timezone aware. It is the caller's responsibility to enforce this!
def do_fill_count_stat_at_hour(stat: CountStat, end_time: datetime, realm: Optional[Realm]=None) -> None:
start_time = end_time - stat.interval
if not isinstance(stat, LoggingCountStat):
timer = time.time()
assert(stat.data_collector.pull_function is not None)
rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time, realm)
logger.info("%s run pull_function (%dms/%sr)",
stat.property, (time.time()-timer)*1000, rows_added)
do_aggregate_to_summary_table(stat, end_time, realm)
def do_delete_counts_at_hour(stat: CountStat, end_time: datetime) -> None:
if isinstance(stat, LoggingCountStat):
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
if stat.data_collector.output_table in [UserCount, StreamCount]:
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
else:
UserCount.objects.filter(property=stat.property, end_time=end_time).delete()
StreamCount.objects.filter(property=stat.property, end_time=end_time).delete()
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime,
realm: Optional[Realm]=None) -> None:
cursor = connection.cursor()
# Aggregate into RealmCount
output_table = stat.data_collector.output_table
if realm is not None:
realm_clause = SQL("AND zerver_realm.id = {}").format(Literal(realm.id))
else:
realm_clause = SQL("")
if output_table in (UserCount, StreamCount):
realmcount_query = SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
|
petterip/exam-archive
|
test/rest_api_test_course.py
|
Python
|
mit
| 16,344
| 0.006975
|
'''
Testing class for database API's course related functions.
Authors: Ari Kairala, Petteri Ponsimaa
Originally adopted from Ivan's exercise 1 test class.
'''
import unittest, hashlib
import re, base64, copy, json, server
from database_api_test_common import BaseTestCase, db
from flask import json, jsonify
from exam_archive import ExamDatabaseErrorNotFound, ExamDatabaseErrorExists
from unittest import TestCase
from resources_common import COLLECTIONJSON, PROBLEMJSON, COURSE_PROFILE, API_VERSION
class RestCourseTestCase(BaseTestCase):
'''
RestCourseTestCase contains course related unit tests of the database API.
'''
# List of user credentials in exam_archive_data_dump.sql for testing purposes
super_user = "bigboss"
super_pw = hashlib.sha256("ultimatepw").hexdigest()
admin_user = "antti.admin"
admin_pw = hashlib.sha256("qwerty1234").hexdigest()
basic_user = "testuser"
basic_pw = hashlib.sha256("testuser").hexdigest()
wrong_pw = "wrong-pw"
test_course_template_1 = {"template": {
"data": [
{"name": "archiveId", "value": 1},
{"name": "courseCode", "value": "810136P"},
{"name": "name", "value": "Johdatus tietojenk\u00e4sittelytieteisiin"},
{"name": "description", "value": "Lorem ipsum"},
{"name": "inLanguage", "value": "fi"},
{"name": "creditPoints", "value": 4},
{"name": "teacherId", "value": 1}]
}
}
test_course_template_2 = {"template": {
"data": [
{"name": "archiveId", "value": 1},
{"name": "courseCode", "value": "810137P"},
{"name": "name", "value": "Introduction to Information Processing Sciences"},
{"name": "description", "value": "Aaa Bbbb"},
{"name": "inLanguage", "value": "en"},
{"name": "creditPoints", "value": 5},
|
{"name": "teacherId", "value": 2}]
}
}
course_resource_url = '/exam_archive/api/archives/1/courses/1/'
course_re
|
source_not_allowed_url = '/exam_archive/api/archives/2/courses/1/'
courselist_resource_url = '/exam_archive/api/archives/1/courses/'
# Set a ready header for authorized admin user
header_auth = {'Authorization': 'Basic ' + base64.b64encode(super_user + ":" + super_pw)}
# Define a list of the sample contents of the database, so we can later compare it to the test results
@classmethod
def setUpClass(cls):
print "Testing ", cls.__name__
def test_user_not_authorized(self):
'''
Check that user in not able to get course list without authenticating.
'''
print '(' + self.test_user_not_authorized.__name__ + ')', \
self.test_user_not_authorized.__doc__
# Test CourseList/GET
rv = self.app.get(self.courselist_resource_url)
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Test CourseList/POST
rv = self.app.post(self.courselist_resource_url)
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Test Course/GET
rv = self.app.get(self.course_resource_url)
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Test Course/PUT
rv = self.app.put(self.course_resource_url)
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Test Course/DELETE
rv = self.app.put(self.course_resource_url)
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Try to Course/POST when not admin or super user
rv = self.app.post(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.basic_user + ":" + self.basic_pw)})
self.assertEquals(rv.status_code,403)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Try to delete course, when not admin or super user
rv = self.app.delete(self.course_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.basic_user + ":" + self.basic_pw)})
self.assertEquals(rv.status_code,403)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Try to get Course list as basic user from unallowed archive
rv = self.app.get(self.course_resource_not_allowed_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.basic_user + ":" + self.basic_pw)})
self.assertEquals(rv.status_code,403)
self.assertEquals(PROBLEMJSON,rv.mimetype)
# Try to get Course list as super user with wrong password
rv = self.app.get(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.super_user + ":" + self.wrong_pw)})
self.assertEquals(rv.status_code,401)
self.assertEquals(PROBLEMJSON,rv.mimetype)
def test_user_authorized(self):
'''
Check that authenticated user is able to get course list.
'''
print '(' + self.test_user_authorized.__name__ + ')', \
self.test_user_authorized.__doc__
# Try to get Course list as basic user from the correct archive
rv = self.app.get(self.course_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.basic_user + ":" + self.basic_pw)})
self.assertEquals(rv.status_code,200)
self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type)
# User authorized as super user
rv = self.app.get(self.courselist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.super_user + ":" + self.super_pw)})
self.assertEquals(rv.status_code,200)
self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type)
def test_course_get(self):
'''
Check data consistency of Course/GET and CourseList/GET.
'''
print '(' + self.test_course_get.__name__ + ')', \
self.test_course_get.__doc__
# Test CourseList/GET
self._course_get(self.courselist_resource_url)
# Test single course Course/GET
self._course_get(self.course_resource_url)
def _course_get(self, resource_url):
'''
Check data consistency of CourseList/GET.
'''
# Get all the courses from database
courses = db.browse_courses(1)
# Get all the courses from API
rv = self.app.get(resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,200)
self.assertEquals(COLLECTIONJSON+";"+COURSE_PROFILE,rv.content_type)
input = json.loads(rv.data)
assert input
# Go through the data
data = input['collection']
items = data['items']
self.assertEquals(data['href'], resource_url)
self.assertEquals(data['version'], API_VERSION)
for item in items:
obj = self._create_dict(item['data'])
course = db.get_course(obj['courseId'])
assert self._isIdentical(obj, course)
def test_course_post(self):
'''
Check that a new course can be created.
'''
print '(' + self.test_course_post.__name__ + ')', \
self.test_course_post.__doc__
resource_url = self.courselist_resource_url
new_course = self.test_course_template_1.copy()
# Test CourseList/POST
rv = self.app.post(resource_url, headers=self.header
|
hazelnusse/sympy-old
|
bin/sympy_time.py
|
Python
|
bsd-3-clause
| 1,207
| 0.023198
|
import time
seen = set()
import_order = []
elapsed_times = {}
level = 0
parent = None
children = {}
def new_import(name, globals={}, locals={}, fromlist=[]):
global level, parent
if name in seen:
return old_import(name, globals, locals, fromlist)
seen.add(name)
import_order.append((name, level, parent))
t1 = time.time()
old_parent = parent
parent = name
level += 1
module = old_import(name, globals, locals, fromlist)
level -= 1
parent = old_par
|
ent
t2 = time.time()
elapsed_times[name] = t2-t1
return module
old_import = __builtins__.__import__
__builtins__.__import__ = new_import
from sympy import *
parents = {}
is_parent = {}
for name, level, parent in import_order:
parents[name] = parent
is_parent[parent] = True
print "== Tree =="
for name, level, parent in import_order:
print "%s%s: %.3f (%s)" % (" "*level, name, elapsed_times.get(name,0),
parent)
p
|
rint "\n"
print "== Slowest (including children) =="
slowest = sorted((t, name) for (name, t) in elapsed_times.items())[-50:]
for elapsed_time, name in slowest[::-1]:
print "%.3f %s (%s)" % (elapsed_time, name, parents[name])
|
wileeam/airflow
|
airflow/operators/dummy_operator.py
|
Python
|
apache-2.0
| 1,203
| 0
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
|
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, e
|
ither express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DummyOperator(BaseOperator):
"""
Operator that does literally nothing. It can be used to group tasks in a
DAG.
"""
ui_color = '#e8f7e4'
@apply_defaults
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def execute(self, context):
pass
|
nachoplus/cronoStamper
|
zmqClient.py
|
Python
|
gpl-2.0
| 568
| 0.021127
|
#!/usr/bin/python
'''
Example of zmq client.
Can be used to record test data on
remote PC
Nacho Mas January-2017
'''
import sys
import zmq
import time
import json
from
|
config import *
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.SUB)
#socket.setsockopt(zmq.CONFLATE, 1)
socket.connect ("tcp
|
://cronostamper:%s" % zmqShutterPort)
topicfilter = ShutterFlange
socket.setsockopt(zmq.SUBSCRIBE, topicfilter)
# Process
while True:
topic, msg = demogrify(socket.recv())
print "%f" % msg['unixUTC']
#time.sleep(5)
|
psf/black
|
src/black_primer/lib.py
|
Python
|
mit
| 13,941
| 0.001507
|
import asyncio
import errno
import json
import logging
import os
import stat
import sys
from functools import partial
from pathlib import Path
from platform import system
from shutil import rmtree, which
from subprocess import CalledProcessError
from sys import version_info
from tempfile import TemporaryDirectory
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
)
from urllib.parse import urlparse
import click
TEN_MINUTES_SECONDS = 600
WINDOWS = system() == "Windows"
BLACK_BINARY = "black.exe" if WINDOWS else "black"
GIT_BINARY = "git.exe" if WINDOWS else "git"
LOG = logging.getLogger(__name__)
# Windows needs a ProactorEventLoop if you want to exec subprocesses
# Starting with 3.8 this is the default - can remove when Black >= 3.8
# mypy only respects sys.platform if directly in the evaluation
# https://mypy.readthedocs.io/en/latest/common_issues.html#python-version-and-system-platform-checks # noqa: B950
if sys.platform == "win32":
asyncio.set_event_loop(asyncio.ProactorEventLoop())
class Results(NamedTuple):
stats: Dict[str, int] = {}
failed_projects: Dict[str, CalledProcessError] = {}
async def _gen_check_output(
cmd: Sequence[str],
timeout: float = TEN_MINUTES_SECONDS,
env: Optional[Dict[str, str]] = None,
cwd: Optional[Path] = None,
stdin: Optional[bytes] = None,
) -> Tuple[bytes, bytes]:
process = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
env=env,
cwd=cwd,
)
try:
(stdout, stderr) = await asyncio.wait_for(process.communicate(stdin), timeout)
except asyncio.TimeoutError:
process.kill()
await process.wait()
raise
# A non-optional timeout was supplied to asyncio.wait_for, guaranteeing
# a timeout or completed process. A terminated Python process will have a
# non-empty returncode value.
assert process.returncode is not None
if process.returncode != 0:
cmd_str = " ".join(cmd)
raise CalledProcessError(
process.returncode, cmd_str, output=stdout, stderr=stderr
)
return (stdout, stderr)
def analyze_results(project_count: int, results: Results) -> int:
failed_pct = round(((results.stats["failed"] / project_count) * 100), 2)
success_pct = round(((results.stats["success"] / project_count) * 100), 2)
if results.failed_projects:
click.secho("\nFailed projects:\n", bold=True)
for project_name, project_cpe in results.failed_projects.items():
print(f"## {project_name}:")
print(f" - Returned {project_cpe.returncode}")
if project_cpe.stderr:
print(f" - stderr:\n{project_cpe.stderr.decode('utf8')}")
if project_cpe.stdout:
print(f" - stdout:\n{project_cpe.stdout.decode('utf8')}")
print("")
click.secho("-- primer results 📊 --\n", bold=True)
click.secho(
f"{results.stats['success']} / {project_count} succeeded ({success_pct}%) ✅",
bold=True,
fg="green",
)
click.secho(
f"{results.stats['failed']} / {project_count} FAILED ({failed_pct}%) 💩",
bold=bool(results.stats["failed"]),
fg="red",
)
s = "" if results.stats["disabled"] == 1 else "s"
click.echo(f" - {results.stats['disabled']} project{s} disabled by config")
s = "" if results.stats["wrong_py_ver"] == 1 else "s"
click.echo(
f" - {results.stats['wrong_py_ver']} project{s} skipped due to Python version"
)
click.echo(
f" - {results.stats['skipped_long_checkout']} skipped due to long checkout"
)
if results.failed_projects:
failed = ", ".join(results.failed_projects.keys())
click.secho(f"\nFailed projects: {failed}\n", bold=True)
return results.stats["failed"]
def _flatten_cli_args(cli_args: List[Union[Sequence[str], str]]) -> List[str]:
"""Allow a user to put long arguments into a list of strs
to make the JSON human readable"""
flat_args = []
for arg in cli_args:
if isinstance(arg, str):
flat_args.append(arg)
continue
args_as_str = "".join(arg)
flat_args.append(args_as_str)
return flat_args
async def black_run(
project_name: str,
repo_path: Optional[Path],
project_config: Dict[str, Any],
results: Results,
no_diff: bool = False,
) -> None:
"""Run Black and record failures"""
if not repo_path:
results.stats["failed"] += 1
results.failed_projects[project_name] = CalledProcessError(
69, [], f"{project_name} has no repo_path: {repo_path}".encode(), b""
)
return
stdin_test = project_name.upper() == "STDIN"
cmd = [str(which(BLACK_BINARY))]
if "cli_arguments" in project_config and project_config["cli_arguments"]:
cmd.extend(_flatten_cli_args(project_config["cli_arguments"]))
cmd.append("--check")
if not no_diff:
cmd.append("--diff")
# Workout if we should read in a python file or search from cwd
stdin = None
if stdin_test:
cmd.append("-")
stdin = repo_path.read_bytes()
elif "base_path" in project_config:
cmd.append(project_config["base_path"])
else:
cmd.append(".")
timeout = (
project_config["timeout_seconds"]
if "timeout_seconds" in project_config
else TEN_MINUTES_SECONDS
)
with TemporaryDirectory() as tmp_path:
# Prevent reading top-level user configs by manipulating environment variables
env = {
**os.environ,
"XDG_CONFIG_HOME": tmp_path, # Unix-like
"USERPROFILE": tmp_path, # Windows (changes `Path.home()` output)
}
cwd_path = repo_path.parent if st
|
din_test else repo_path
try:
LOG.debug(f"Running black for {project_name}: {' '.join(cmd)}")
_stdout, _stderr = await _gen_check_output(
cmd, cwd=cwd_path, env=env, stdin=stdin, timeout=timeout
)
except asyncio.TimeoutError:
results.stats["failed"] += 1
LOG.error(f"Running black for {repo_path} timed out ({cmd})")
|
except CalledProcessError as cpe:
# TODO: Tune for smarter for higher signal
# If any other return value than 1 we raise - can disable project in config
if cpe.returncode == 1:
if not project_config["expect_formatting_changes"]:
results.stats["failed"] += 1
results.failed_projects[repo_path.name] = cpe
else:
results.stats["success"] += 1
return
elif cpe.returncode > 1:
results.stats["failed"] += 1
results.failed_projects[repo_path.name] = cpe
return
LOG.error(f"Unknown error with {repo_path}")
raise
# If we get here and expect formatting changes something is up
if project_config["expect_formatting_changes"]:
results.stats["failed"] += 1
results.failed_projects[repo_path.name] = CalledProcessError(
0, cmd, b"Expected formatting changes but didn't get any!", b""
)
return
results.stats["success"] += 1
async def git_checkout_or_rebase(
work_path: Path,
project_config: Dict[str, Any],
rebase: bool = False,
*,
depth: int = 1,
) -> Optional[Path]:
"""git Clone project or rebase"""
git_bin = str(which(GIT_BINARY))
if not git_bin:
LOG.error("No git binary found")
return None
repo_url_parts = urlparse(project_config["git_clone_url"])
path_parts = repo_url_parts.path[1:].split("/", maxsplit=1)
repo_path: Path = work_path / path_parts[1].replace(".git", "")
cmd = [git_bin, "clone", "--depth", str(depth), project_config["git_clone_url"]]
cwd = work_path
if repo_path.exists() and rebase:
cmd = [git_bin, "pull", "--rebase"]
cwd = repo_path
elif re
|
cg31/tensorflow
|
tensorflow/contrib/distributions/python/ops/operator_test_util.py
|
Python
|
apache-2.0
| 6,295
| 0.008896
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing `OperatorPDBase` and related classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
import tensorflow as tf
@six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init
class OperatorPDDerivedClassTest(tf.test.TestCase):
"""Tests for derived classes.
Subclasses should implement every abstractmethod, and this will enable all
test methods to work.
"""
def setUp(self):
self._rng = np.random.RandomState(42)
def _compare_results(
self, expected, actual, static_shapes=True, atol=1e-5):
"""Compare expected value (array) to the actual value (Tensor)."""
if static_shapes:
self.assertEqual(expected.shape, actual.get_shape())
self.assertAllClose(expected, actual.eval(), atol=atol)
@abc.abstractmethod
def _build_operator_and_mat(self, batch_shape, k, dtype=np.float64):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator represents a (batch) matrix. This method returns both
together, and is used e.g. by tests.
Args:
batch_shape: List-like of Python integers giving batch shape of operator.
k: Python integer, the event size.
dtype: Numpy dtype. Data type of returned array/operator.
Returns:
operator: `OperatorPDBase` subclass.
mat: numpy array representing a (batch) matrix.
"""
# Create a matrix as a numpy array. Shape = batch_shape + [k, k].
# Create an OperatorPDDiag that should have the same behavior as the matrix.
# All arguments are convertable to numpy arrays.
#
batch_shape = list(batch_shape)
mat_shape = batch_shape + [k, k]
# return operator, mat
raise NotImplementedError("Not implemented yet.")
def testToDense(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
for dtype in [np.float32, np.float64]:
operator, mat = self._build_operator_and_mat(
batch_shape, k, dtype=dtype)
self._compare_results(
expected=mat,
actual=operator.to_dense())
def testSqrtToDense(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
sqrt = operator.sqrt_to_dense()
self.assertEqual(mat.shape, sqrt.get_shape())
# Square roots are not unique, but SS^T should equal mat. In this
# case however, we should have S = S^T.
self._compare_results(
expected=mat,
actual=tf.batch_matmul(sqrt, sqrt))
def testDeterminants(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
expected_det = tf.matrix_determinant(mat).eval()
self._compare_results(expected_det, operator.det())
self._compare_results(np.log(expected_det), operator.log_det())
def testMatmul(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
# Work with 5 simultaneous systems. 5 is arbitrary.
x = self._rng.randn(*(batch_shape + (k, 5)))
self._compare_results(
expected=tf.batch_matmul(mat, x).eval(),
actual=operator.matmul(x))
def testSqrtMatmul(self):
# Square roots are not unique, but we should have SS^T x = Ax, and in our
# case, we should have S = S^T, so SSx = Ax.
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
# Work with 5 simultaneous systems. 5 is arbitrary.
x = self._rng.randn(*(batch_shape + (k, 5)))
self._compare_results(
expected=tf.batch_matmul(mat, x).eval(),
actual=operator.sqrt_matmul(operator.sqrt_matmul(x)))
def testSolve(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
# Work with 5 simultaneous systems. 5 is arbitrary.
x = self._rng.randn(*(batch_shape + (k, 5)))
self._compare_results(
expected=tf.matrix_solve(mat, x).eval(), actual=operator.solve(x))
def testSqrtSolve(self):
# Square roots are not unique, but we should still have
# S^{-T} S^{-1} x = A^{-1} x.
# In our case, we should have S = S^T, so then S^{-1} S^{-1} x = A^{-1} x.
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
# Work with 5 simultaneous systems. 5 is arbitrary.
x = self._rng.randn(*(batch_shape + (
|
k, 5)))
self._compare_results(
expected=tf.matrix_solve(mat, x).eval(),
actual=operator.sqrt_solve(operator.sqrt_solve(x)))
def testAddToTensor(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
tensor = tf.ones_like(mat)
self._compare_results(
expected=(mat + tensor).eva
|
l(),
actual=operator.add_to_tensor(tensor))
|
ismailsunni/f3-factor-finder
|
core/tweet_model.py
|
Python
|
gpl-2.0
| 4,719
| 0.042594
|
#!/F3/core/tweet_model.py
# A class for representating a tweet.
# Author : Ismail Sunni/@ismailsunni
# Created : 2012-03-30
from db_control import db_conn
from datetime import datetime, timedelta
import preprocess as pp
class tweet_model:
'''A class for representating a tweet.'''
def __init__(self, id, time, text, sentiment = 0, negation = 0):
'''Standar __init__ function'''
self.id = id
self.time = time
self.text = text
self.negation = negation
self.sentiment = sentiment
self.parsed_word = []
self.parsed = False
self.post_parsed_word = []
self.post_parsed = False # this attribute indicate that the parsed_word has been preprocess again
def print_tweet(self):
'''Print procedure'''
import unicodedata
print unicodedata.normalize('NFKD', self.text.decode('latin-1')).encode('ascii', 'ignore'), self.sentiment
def get_normal_text(self):
'''Return content of the tweet in normal form.'''
import unicodedata
return unicodedata.normalize('NFKD', self.text.decode('latin-1')).encode('ascii', 'ignore')
def preprocess(self, dict_param = None):
'''Preprocess a tweet and save the result in parsed_word and negation.'''
self.negation, preprocesssed_text = pp.preprocess_tweet(self.text, dict_param)
self.parsed_word = preprocesssed_text.split(' ')
self.parsed = True
temp_post_parsed_word = pp.postparsed_text(preprocesssed_text)
self.post_parsed_word = temp_post_parsed_word.split(' ')
self.post_parsed = True
# public function
def get_dev_data():
'''Retrieve data from database for training and test as list of tweet object.'''
db = db_conn()
tweets = []
query = "SELECT * FROM " + db.test_table + " WHERE `dev_tweet` = 1"
retval = db.read(query)
for row in retval:
id = row[0]
time = row[2]
text = row[1]
sentiment = row[3]
negation = row[4]
tweets.append(tweet_model(id, time, text, sentiment, negation))
return tweets
def get_test_data(keyword = "", start_time = None, end_time = None):
'''Retrieve data from database for training and test as list of tweet object.'''
db = db_conn()
tweets = []
query = "SELECT * FROM " + db.test_table
where = " WHERE `tweet_text` LIKE '%" + keyword + "%' AND `dev_tweet` != 1"
if start_time != None:
where += " AND `created_at` >= '" + start_time.__str__() + "'"
if end_time != None:
where += " AND `created_at` <= '" + end_time.__str__() + "'"
order = " ORDER BY `created_at` ASC"
retval = db.read(query + where)
for row in retval:
id = row[0]
time = row[2]
text = row
|
[1]
sentiment = row[3]
negation = row[4]
tweets.append(tweet_model(id, time, text, sentiment, negation))
return tweets
def get_test_data_by_duration(keyword = "", start_time = None, end_time = None, duration_hour = 1):
'''return test data divide byu duration.'''
duration_second = duration_hour * 3600
delta_durat
|
ion = timedelta(0, duration_second)
cur_time = start_time
retval = []
dur_times = []
while (cur_time + delta_duration < end_time):
retval.append(get_test_data(keyword, cur_time, cur_time + delta_duration))
dur_times.append(cur_time)
cur_time += delta_duration
if (cur_time < end_time):
dur_times.append(cur_time)
retval.append(get_test_data(keyword, cur_time, end_time))
return retval, dur_times
# main function for testing only
if __name__ == '__main__':
keyword = "foke"
start_time = datetime.strptime("10-4-2012 18:00:00", '%d-%m-%Y %H:%M:%S')
end_time = datetime.strptime("18-4-2012 12:00:00", '%d-%m-%Y %H:%M:%S')
duration_hour = 6
retval, dur_times = get_test_data_by_duration(keyword, start_time, end_time, duration_hour)
num_tweet = 0
for ret in retval:
print len(ret)
num_tweet += len(ret)
print num_tweet
# write in excel
from xlwt import Workbook
from tempfile import TemporaryFile
import util
book = Workbook()
try:
sheet_idx = 1
for list_tweet in retval:
activeSheet = book.add_sheet(str(sheet_idx))
activeSheet.write(0, 0, dur_times[sheet_idx - 1].__str__())
i = 1
activeSheet.write(i, 0, 'No')
activeSheet.write(i, 1, 'Tweet Id')
activeSheet.write(i, 2, 'Created')
activeSheet.write(i, 3, 'Text')
i += 1
for tweet in list_tweet:
activeSheet.write(i, 0, str(i - 1))
activeSheet.write(i, 1, str(tweet.id))
activeSheet.write(i, 2, tweet.time.__str__())
activeSheet.write(i, 3, pp.normalize_character(tweet.text))
i += 1
sheet_idx += 1
book.save('output.xls')
book.save(TemporaryFile())
except Exception, e:
util.debug(str(e))
print 'fin'
|
sdispater/orator
|
tests/orm/test_factory.py
|
Python
|
mit
| 4,197
| 0.000477
|
# -*- coding: utf-8 -*-
from orator.orm import Factory, Model, belongs_to, has_many
from orator.connections import SQLiteConnection
from orator.connectors import SQLiteConnector
from .. import OratorTestCase, mock
class FactoryTestCase(OratorTestCase):
@classmethod
def setUpClass(cls):
Model.set_connection_resolver(DatabaseConnectionResolver())
@classmethod
def tearDownClass(cls):
Model.unset_connection_resolver()
def connection(self):
return Model.get_connection_resolver().connection()
def schema(self):
return self.connection().get_schema_builder()
def setUp(self):
with self.schema().create("users") as table:
table.increments("id")
table.string("name").unique()
table.string("email").unique()
table.boolean("admin").default(True)
table.timestamps()
with self.schema().create("posts") as table:
table.increments("id")
table.integer("user_id")
table.string("title").unique()
table.text("content").unique()
table.timestamps()
table.foreign("user_id").references("id").on("users")
self.factory = Factory()
@self.factory.define(User)
def users_factory(faker):
return {"name": faker.name(), "email": faker.email(), "admin": False}
@self.factory.define(User, "admin")
def users_factory(faker):
attributes = self.factory.raw(User)
attributes.update({"admin": True})
return attributes
@self.factory.define(Post)
def posts_factory(faker):
return {"title": faker.sentence(), "content": faker.text()}
def tearDown(self):
self.schema().drop("posts")
self.schema().drop("users")
def test_factory_make(self):
user = self.factory.make(User)
self.assertIsInstance(user, User)
self.assertIsNotNone(user.name)
self.assertIsNotNone(user.email)
self.assertIsNone(User.where("name", user.name).first())
def test_factory_create(self):
user = self.factory.create(User)
self.assertIsInstance(user, User)
self.assertIsNotNone(user.name)
self.assertIsNotNone(user.email)
self.assertIsNotNone(User.where("name", user.name).first())
|
def test_factory_create_with_attributes(self):
user = self.factory.create(User, name="foo", email="[email protected]")
self.assertIsInstance(user, User)
self.assertEqual("foo", user.name)
self.assertEqual("[email protected]", user.email)
self.assertIsNotNone(User.where("name", user.name).first())
def test_factory_create_with_relations(self):
users = self.factory.build(User, 3)
users = users.create().each(lambda u: u.posts().sav
|
e(self.factory.make(Post)))
self.assertEqual(3, len(users))
self.assertIsInstance(users[0], User)
self.assertEqual(3, User.count())
self.assertEqual(3, Post.count())
def test_factory_call(self):
user = self.factory(User).create()
self.assertFalse(user.admin)
users = self.factory(User, 3).create()
self.assertEqual(3, len(users))
self.assertFalse(users[0].admin)
admin = self.factory(User, "admin").create()
self.assertTrue(admin.admin)
admins = self.factory(User, "admin", 3).create()
self.assertEqual(3, len(admins))
self.assertTrue(admins[0].admin)
class User(Model):
__guarded__ = ["id"]
@has_many("user_id")
def posts(self):
return Post
class Post(Model):
__guarded__ = []
@belongs_to("user_id")
def user(self):
return User
class DatabaseConnectionResolver(object):
_connection = None
def connection(self, name=None):
if self._connection:
return self._connection
self._connection = SQLiteConnection(
SQLiteConnector().connect({"database": ":memory:"})
)
return self._connection
def get_default_connection(self):
return "default"
def set_default_connection(self, name):
pass
|
dtroyer/osc-debug
|
oscdebug/tests/v1/test_auth.py
|
Python
|
apache-2.0
| 1,401
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific langu
|
age governing permissions and limitations
# under the License.
#
import mock
from oscdebug.tests import base
from oscdebug.v1 import auth
class TestAuthTypeShow(base.TestCommand):
def setUp(self):
super(TestAuthTypeShow, self).setUp()
# Get the command object to test
self.cmd = auth.ShowAuthType(self.app, None)
def test_auth_type_show(self):
arglist = [
'password',
]
verifylist = [
|
('auth_type', 'password'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
collist = ('name', 'options')
self.assertEqual(collist, columns)
datalist = (
'password',
mock.ANY,
)
self.assertEqual(datalist, data)
|
alephobjects/Cura2
|
cura/Scene/GCodeListDecorator.py
|
Python
|
lgpl-3.0
| 316
| 0
|
from UM.Scene.SceneNodeDecorat
|
or import SceneNodeDecorator
class GCodeListDecorator(SceneNodeDecorator):
def __init__(self):
super().__init__()
self._gcode_list = []
def getGCodeList(self):
return self._gcode_list
def setGCodeList(self, list):
self._g
|
code_list = list
|
alvarouc/ica
|
ica/__init__.py
|
Python
|
gpl-3.0
| 49
| 0.020408
|
from .ica imp
|
ort *
#from .ica_gpu i
|
mport ica_gpu
|
electrolinux/weblate
|
weblate/accounts/tests.py
|
Python
|
gpl-3.0
| 26,044
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for user handling.
"""
import tempfile
from unittest import TestCase as UnitTestCase
from django.test import TestCase
from unittest import SkipTest
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser, User, Group
from django.core import mail
from django.test.utils import override_settings
from django.core.management import call_command
from django.http import HttpRequest, HttpResponseRedirect
from weblate.accounts.models import (
Profile,
notify_merge_failure,
notify_new_string,
notify_new_suggestion,
notify_new_comment,
notify_new_translation,
notify_new_contributor,
notify_new_language,
)
from weblate.accounts.captcha import (
hash_question, unhash_question, MathCaptcha
)
from weblate.accounts import avatar
from weblate.accounts.middleware import RequireLoginMiddleware
from weblate.accounts.models import VerifiedEmail
from weblate.trans.tests.test_views import ViewTestCase, RegistrationTestMixin
from weblate.trans.tests.utils import get_test_file
from weblate.trans.tests import OverrideSettings
from weblate.trans.models.unitdata import Suggestion, Comment
from weblate.lang.models import Language
REGISTRATION_DATA = {
'username': 'username',
'email': '[email protected]',
'first_name': 'First Last',
'captcha_id': '00',
'captcha': '9999'
}
class RegistrationTest(TestCase, RegistrationTestMixin):
clear_cookie = False
def assert_registration(self, match=None):
url = self.assert_registration_mailbox(match)
if self.clear_cookie:
del self.client.cookies['sessionid']
# Confirm account
response = self.client.get(url, follow=True)
self.assertRedirects(
response,
reverse('password')
)
@OverrideSettings(REGISTRATION_CAPTCHA=True)
def test_register_captcha(self):
# Enable captcha
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
self.assertContains(
response,
'Please check your math and try again.'
)
@OverrideSettings(REGISTRATION_OPEN=False)
def test_register_closed(self):
# Disable registration
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
self.assertContains(
response,
'Sorry, but registrations on this site are disabled.'
)
@OverrideSettings(REGISTRATION_CAPTCHA=False)
def test_register(self):
# Disable captcha
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
# Check we did succeed
self.assertRedirects(response, reverse('email-sent'))
# Confirm account
self.assert_registration()
# Set password
response = self.client.post(
reverse('password'),
{
'password1': 'password',
'password2': 'password',
}
)
self.assertRedirects(response, reverse('profile'))
# Check we can access home (was redirected to password change)
response = self.client.get(reverse('home'))
self.assertContains(response, 'First Last')
user = User.objects.get(username='username')
# Verify user is active
self.assertTrue(user.is_active)
# Verify stored first/last name
self.assertEqual(user.first_name, 'First Last')
@OverrideSettings(REGISTRATION_CAPTCHA=False)
def test_register_missing(self):
# Disable captcha
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
# Check we did succeed
self.assertRedirects(response, reverse('email-sent'))
# Confirm account
url = self.assert_registration_mailbox()
# Remove session ID from URL
url = url.split('&id=')[0]
# Delete session ID from cookies
del self.client.cookies['sessionid']
# Confirm account
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse('login'))
self.assertContains(response, 'Failed to verify your registration')
def test_reset(self):
'''
Test for password reset.
'''
User.objects.create_user('testuser', '[email protected]', 'x')
response = self.client.post(
reverse('password_reset'),
{
'email': '[email protected]'
}
)
self.assertRedirects(response, reverse('email-sent'))
self.assert_registration('[Weblate] Password reset on Weblate')
def test_wrong_username(self):
data = REGISTRATION_DATA.copy()
data['username'] = ''
response = self.client.post(
reverse('register'),
data
)
self.assertContains(
response,
'This field is required.',
)
def test_wrong_mail(self):
data = REGISTRATION_DATA.copy()
data['email'] = 'x'
response = self.client.post(
reverse('register'),
data
)
self.assertContains(
response,
'Enter a valid email address.'
)
def test_spam(self):
data = REGISTRATION_DATA.copy()
data['content'] = 'x'
response = self.client.post(
reverse('register'),
data
)
self.assertContains(
response,
'Invalid value'
)
def test_add_mail(self):
# Create user
self.test_register()
mail.outbox.pop()
# Check adding email page
response = self.client.get(
reverse('email_login')
)
self.assertContains(response, 'Register email')
# Add email account
response = self.client.post(
reverse('social:complete', kwargs={'backend': 'email'}),
{'email': '[email protected]'},
follow=True,
)
self.assertRedirects(response, reverse('email-sent'))
# Verify confirmation mail
url = self.assert_registration
|
_mailbox()
response = self.client.get(url, follow=True)
self.assertRedirects(
response, '{0}#auth'.format(reverse('profile'))
)
# Check database models
user = User.objects.get(username='username')
|
self.assertEqual(
VerifiedEmail.objects.filter(social__user=user).count(), 2
)
self.assertTrue(
VerifiedEmail.objects.filter(
social__user=user, email='[email protected]'
).exists()
)
class NoCookieRegistrationTest(RegistrationTest):
clear_cookie = True
class CommandTest(TestCase):
'''
Tests for management commands.
'''
def test_createadmin(self):
call_command('createadmin')
user = User.objects.get(username='admin')
self.assertEqual(user.first_name, 'Weblate Admin')
self.assertEqual(user.last_name, '')
self.assertFalse(user.check_password('admin'))
def test_createadmin_password(self):
call_command('createadmin', password='admin')
user = User.objects.get(username='admin')
self.assertEqual(
|
dnjohnstone/hyperspy
|
hyperspy/tests/component/test_components.py
|
Python
|
gpl-3.0
| 20,259
| 0.000346
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import inspect
import itertools
import numpy as np
import pytest
from numpy.testing import assert_allclose
import hyperspy.api as hs
from hyperspy import components1d
from hyperspy.component import Component
from hyperspy.misc.test_utils import ignore_warning
from hyperspy.models.model1d import Model1D
TRUE_FALSE_2_TUPLE = [p for p in itertools.product((True, False), repeat=2)]
def get_components1d_name_list():
components1d_name_list = []
for c_name in dir(components1d):
obj = getattr(components1d, c_name)
if inspect.isclass(obj) and issubclass(obj, Component):
components1d_name_list.append(c_name)
# Remove EELSCLEdge, since it is tested elsewhere more appropriate
components1d_name_list.remove('EELSCLEdge')
return components1d_name_list
@pytest.mark.filterwarnings("ignore:invalid value encountered in true_divide:RuntimeWarning")
@pytest.mark.filterwarnings("ignore:divide by zero encountered in true_divide:RuntimeWarning")
@pytest.mark.filterwarnings("ignore:invalid value encountered in cos:RuntimeWarning")
@pytest.mark.filterwarnings("ignore:The API of the")
@pytest.mark.parametrize('component_name', get_components1d_name_list())
def test_creation_components1d(component_name):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = 100
s.axes_manager[0].scale = 0.01
kwargs = {}
if component_name == 'ScalableFixedPattern':
kwargs['signal1D'] = s
elif component_name == 'Expression':
kwargs.update({'expression': "a*x+b", "name": "linear"})
component = getattr(components1d, component_name)(**kwargs)
component.function(np.arange(1, 100))
m = s.create_model()
m.append(component)
class TestPowerLaw:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = 100
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.PowerLaw())
m[0].A.value = 1000
m[0].r.value = 4
self.m = m
self.s = s
@pytest.mark.parametrize(("only_current", "binn
|
ed"), TRUE_FALSE_2_TUPLE)
def test_estimate_parameters(self, only_current, binned):
self.m.signal.metadata.Signal.binned = binned
s = self.m.as_signal(parallel=False)
assert s.metadata.Signal.binned == binned
g = hs.model.components1D.PowerLaw()
|
g.estimate_parameters(s, None, None, only_current=only_current)
A_value = 1008.4913 if binned else 1006.4378
r_value = 4.001768 if binned else 4.001752
assert_allclose(g.A.value, A_value)
assert_allclose(g.r.value, r_value)
if only_current:
A_value, r_value = 0, 0
# Test that it all works when calling it with a different signal
s2 = hs.stack((s, s))
g.estimate_parameters(s2, None, None, only_current=only_current)
assert_allclose(g.A.map["values"][1], A_value)
assert_allclose(g.r.map["values"][1], r_value)
def test_EDS_missing_data(self):
g = hs.model.components1D.PowerLaw()
s = self.m.as_signal(parallel=False)
s2 = hs.signals.EDSTEMSpectrum(s.data)
g.estimate_parameters(s2, None, None)
def test_function_grad_cutoff(self):
pl = self.m[0]
pl.left_cutoff.value = 105.0
axis = self.s.axes_manager[0].axis
for attr in ['function', 'grad_A', 'grad_r', 'grad_origin']:
values = getattr(pl, attr)((axis))
assert_allclose(values[:501], np.zeros((501)))
assert getattr(pl, attr)((axis))[500] == 0
getattr(pl, attr)((axis))[502] > 0
def test_exception_gradient_calculation(self):
# if this doesn't warn, it means that sympy can compute the gradients
# and the power law component can be updated.
with pytest.warns(UserWarning):
hs.model.components1D.PowerLaw(compute_gradients=True)
class TestDoublePowerLaw:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = 100
s.axes_manager[0].scale = 0.1
m = s.create_model()
m.append(hs.model.components1D.DoublePowerLaw())
m[0].A.value = 1000
m[0].r.value = 4
m[0].ratio.value = 200
self.m = m
@pytest.mark.parametrize(("binned"), (True, False))
def test_fit(self, binned):
self.m.signal.metadata.Signal.binned = binned
s = self.m.as_signal(parallel=False)
assert s.metadata.Signal.binned == binned
g = hs.model.components1D.DoublePowerLaw()
# Fix the ratio parameter to test the fit
g.ratio.free = False
g.ratio.value = 200
m = s.create_model()
m.append(g)
m.fit_component(g, signal_range=(None, None))
assert_allclose(g.A.value, 1000.0)
assert_allclose(g.r.value, 4.0)
assert_allclose(g.ratio.value, 200.)
class TestOffset:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(10))
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.Offset())
m[0].offset.value = 10
self.m = m
@pytest.mark.parametrize(("only_current", "binned"), TRUE_FALSE_2_TUPLE)
def test_estimate_parameters(self, only_current, binned):
self.m.signal.metadata.Signal.binned = binned
s = self.m.as_signal(parallel=False)
assert s.metadata.Signal.binned == binned
o = hs.model.components1D.Offset()
o.estimate_parameters(s, None, None, only_current=only_current)
assert_allclose(o.offset.value, 10)
def test_function_nd(self):
s = self.m.as_signal(parallel=False)
s = hs.stack([s] * 2)
o = hs.model.components1D.Offset()
o.estimate_parameters(s, None, None, only_current=False)
axis = s.axes_manager.signal_axes[0]
assert_allclose(o.function_nd(axis.axis), s.data)
@pytest.mark.filterwarnings("ignore:The API of the `Polynomial` component")
class TestDeprecatedPolynomial:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1024))
s.axes_manager[0].offset = -5
s.axes_manager[0].scale = 0.01
m = s.create_model()
m.append(hs.model.components1D.Polynomial(order=2))
coeff_values = (0.5, 2, 3)
self.m = m
s_2d = hs.signals.Signal1D(np.arange(1000).reshape(10, 100))
self.m_2d = s_2d.create_model()
self.m_2d.append(hs.model.components1D.Polynomial(order=2))
s_3d = hs.signals.Signal1D(np.arange(1000).reshape(2, 5, 100))
self.m_3d = s_3d.create_model()
self.m_3d.append(hs.model.components1D.Polynomial(order=2))
# if same component is pased, axes_managers get mixed up, tests
# sometimes randomly fail
for _m in [self.m, self.m_2d, self.m_3d]:
_m[0].coefficients.value = coeff_values
def test_gradient(self):
c = self.m[0]
np.testing.assert_array_almost_equal(c.grad_coefficients(1),
np.array([[6, ], [4.5], [3.5]]))
assert c.grad_coefficients(np.arange(10)).shape == (3, 10)
@pytest.mark.parametrize(("only_current", "binned"), TRUE_FALSE_2_TUPLE)
def test_estimate_parameters(self, only_current, binned):
self.m.signal.metadata.Signal.binned = binned
|
thegodone/pyms
|
Experiment/Class.py
|
Python
|
gpl-2.0
| 3,288
| 0.012165
|
"""
Models a GC-MS experiment represented by a list of signal peaks
"""
#############################################################################
# #
# PyMS software for processing of metabolomic mass-spectrometry
|
data #
# Copyright (C) 2005-2012 Vladimir Likic #
# #
# This program is free software; you can redistribute
|
it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. #
# #
#############################################################################
from pyms.Utils.Error import error
from pyms.Utils.Utils import is_str
from pyms.Peak.Class import Peak
from pyms.Peak.List.Utils import is_peak_list, sele_peaks_by_rt
class Experiment:
"""
@summary: Models an experiment object
@author: Vladimir Likic
@author: Andrew Isaac
"""
def __init__(self, expr_code, peak_list):
"""
@summary: Models an experiment
@param expr_code: Unique identifier for the experiment
@type expr_code: StringType
@param peak_list: A list of peak objects
@type peak_list: ListType
"""
if not is_str(expr_code):
error("'expr_code' must be a string")
if not is_peak_list(peak_list):
error("'peak_list' must be a list of Peak objects")
self.__expr_code = expr_code
self.__peak_list = peak_list
def get_expr_code(self):
"""
@summary: Returns the expr_code of the experiment
@return: The expr_code of the experiment
@rtype: StringType
"""
return self.__expr_code
def get_peak_list(self):
"""
@summary: Returns the peak list
@return: A list of peak objects
@rtype: ListType
"""
return self.__peak_list
def sele_rt_range(self, rt_range):
"""
@summary: Discards all peaks which have the retention time outside
the specified range
@param rt_range: Min, max retention time given as a list [rt_min,rt_max]
@type rt_range: ListType
@return: none
@rtype: NoneType
"""
peaks_sele = sele_peaks_by_rt(self.__peak_list, rt_range)
self.__peak_list = peaks_sele
|
kevinharvey/django-tourney
|
tourney/players/apps.py
|
Python
|
gpl-3.0
| 89
| 0
|
from django.app
|
s import AppConfig
class PlayersConfig(AppConfig):
name = 'players'
| |
adamjmcgrath/fridayfilmclub
|
src/tests/test_model_league.py
|
Python
|
mpl-2.0
| 1,925
| 0.002597
|
#!/usr/bin/python
#
# Copyright Friday Film Club. All Rights Reserved.
"""League unit tests."""
__author__ = '[email protected] (Adam McGrath)'
import unittest
import base
import helpers
import models
class LeagueTestCase(base.TestCase):
def testPostPutHook(self):
league_owner = helpers.user()
league_member_1 = helpers.user()
league_member_2 = helpers.user()
leagu
|
e = models.League(name='Foo',
owner=league_owner.put(),
users=[league_member_1.put(), league_member_2.put()])
league_key = lea
|
gue.put()
self.assertListEqual(league_owner.leagues, [league_key])
self.assertListEqual(league_member_1.leagues, [league_key])
self.assertListEqual(league_member_2.leagues, [league_key])
league.users = [league_member_2.key]
league.put()
self.assertListEqual(league_member_1.leagues, [])
self.assertListEqual(league_member_2.leagues, [league_key])
def testPostDeleteHook(self):
league_owner = helpers.user()
league_member_1 = helpers.user()
league_member_2 = helpers.user()
league = models.League(name='Foo',
owner=league_owner.put(),
users=[league_member_1.put(), league_member_2.put()])
league_key = league.put()
self.assertListEqual(league_owner.leagues, [league_key])
self.assertListEqual(league_member_1.leagues, [league_key])
self.assertListEqual(league_member_2.leagues, [league_key])
league.key.delete()
self.assertListEqual(league_owner.leagues, [])
self.assertListEqual(league_member_1.leagues, [])
self.assertListEqual(league_member_2.leagues, [])
def testGetByName(self):
league = models.League(name='Foo',
owner=helpers.user().put())
league.put()
self.assertEqual(models.League.get_by_name('foo'), league)
if __name__ == '__main__':
unittest.main()
|
codeofdusk/ProjectMagenta
|
src/accessible_output2/__init__.py
|
Python
|
gpl-2.0
| 885
| 0.027119
|
import ctypes
import os
import types
from platform_utils import paths
def load_library(libname):
if paths.is_frozen():
libfile = os.path.join(paths.embedded_data_path(), '
|
accessible_output2', 'lib', libname)
else:
libfile = os.path.join(paths.module_path(), 'lib',
|
libname)
return ctypes.windll[libfile]
def get_output_classes():
import outputs
module_type = types.ModuleType
classes = [m.output_class for m in outputs.__dict__.itervalues() if type(m) == module_type and hasattr(m, 'output_class')]
return sorted(classes, key=lambda c: c.priority)
def find_datafiles():
import os
import platform
from glob import glob
import accessible_output2
if platform.system() != 'Windows':
return []
path = os.path.join(accessible_output2.__path__[0], 'lib', '*.dll')
results = glob(path)
dest_dir = os.path.join('accessible_output2', 'lib')
return [(dest_dir, results)]
|
dchud/sentinel
|
canary/study.py
|
Python
|
mit
| 63,030
| 0.009107
|
# $Id$
import copy
import logging
import time
import traceback
import types
from quixote import form2
from quixote.html import htmltext
import canary.context
from canary.gazeteer import Feature
from canary.qx_defs import MyForm
from canary.utils import DTable, render_capitalized
import dtuple
class ExposureRoute (DTable):
# A Methodology can have one to many ROUTEs
ROUTE = {
'-': -1,
'ingestion' : 1,
'inhalation' : 2,
'mucocutaneous' : 3,
'vector' : 4,
'other' : 5,
}
def __init__ (self):
self.uid = -1
self.study_id = -1
self.methodology_id = -1
self.route = self.ROUTE['-']
def __str__ (self):
out = []
out.append('<Route uid=%s study_id=%s' % (self.uid, self.study_id))
out.append('\troute=%s' % self.get_text_value(self.ROUTE, self.route))
out.append('\tmethodology_id=%s' % self.methodology_id)
out.append('/>')
return '\n'.join(out)
def get_text_value (self, lookup_table, value):
for k, v in lookup_table.iteritems():
if v == value:
return k
return ''
def set_route (self, route):
if type(route) is types.StringType:
if route in self.ROUTE.keys():
self.route = self.ROUTE[route]
elif type(route) is types.IntType:
if route in self.ROUTE.values():
self.route = route
def get_route (self, text=False):
if text:
return self.get_text_value(self.ROUTE, self.route)
else:
return self.route
def delete (self, context):
"""
Delete this route from the database.
"""
cursor = context.get_cursor()
if not self.uid == -1:
try:
cursor.execute("""
DELETE FROM exposure_routes
WHERE uid = %s
""", self.uid)
except Exception, e:
context.logger.error('ExposureRoute: %s (%s)', self.uid, e)
def save (self, context):
cursor = context.get_cursor()
if self.uid == -1:
cursor.execute("""
INSERT INTO exposure_routes
(uid, study_id, methodology_id, route)
VALUES
(NULL, %s, %s, %s)
""", (self.study_id, self.methodology_id, self.route)
)
|
self.uid = self.get_new_uid(context)
else:
# Assume all calls to save() are a
|
fter all routes have been removed
# already by "DELETE FROM exposure_routes" in methodology.save()
try:
cursor.execute("""
INSERT INTO exposure_routes
(uid, study_id, methodology_id, route)
VALUES
(%s, %s, %s, %s)
""", (self.uid, self.study_id, self.methodology_id, self.route)
)
except Exception, e:
context.logger.error('ExposureRoute: %s (%s)', self.uid, e)
# FIXME: should this be set from the SQL?
self.date_modified = time.strftime(str('%Y-%m-%d'))
class Methodology (DTable):
TABLE_NAME = 'methodologies'
# A Methodology must have one TYPE
TYPES = {
'experimental' : 1,
'descriptive' : 2,
'aggregate' : 3,
'cross sectional' : 4,
'cohort' : 5,
'case control' : 6,
'disease model' : 7,
}
# A Methodology can have at most one TIMING
TIMING = {
'-': -1,
'unknown' : 0,
'historical' : 1,
'concurrent' : 2,
'repeated' : 3,
'mixed' : 4,
}
# A Methodology can have at most one SAMPLING
SAMPLING = {
'-': -1,
'unknown' : 0,
'exposure' : 1,
'outcome' : 2,
'both' : 3,
}
# A Methodology can have at most one CONTROLS
CONTROLS = {
'-': -1,
'no' : 0,
'yes' : 1,
'both' : 2,
}
def __init__ (self, uid=-1):
self.uid = uid
self.study_id = -1
self.study_type_id = -1
self.sample_size = ''
self.timing = -1
self.sampling = -1
self.controls = -1
self.is_mesocosm = False
self.is_enclosure = False
self.exposure_routes = []
self.comments = ''
self.date_modified = None
self.date_entered = None
def __str__ (self):
out = []
out.append('<Methodology uid=%s study_id=%s' % (self.uid, self.study_id))
out.append('\tstudy_type=%s' % self.get_text_value(self.TYPES, self.study_type_id))
out.append('\tsample_size=%s' % self.sample_size)
for item in ['timing', 'sampling', 'controls', 'exposure_routes']:
out.append('\t%s=%s' % (item, getattr(self, 'get_' + item)(text=True)))
out.append('\tis_mesocosm=%s, is_enclosure=%s' % (self.is_mesocosm, self.is_enclosure))
out.append('\tcomments=%s' % self.comments or '')
out.append('/>')
return '\n'.join(out)
def evidence_level (self):
"""
Return the evidence level relative to the type of study
performed.
"""
text_value = self.get_text_value(self.TYPES, self.study_type_id)
if text_value in ['experimental', 'cohort']:
return 3
elif text_value in ['case control', 'cross sectional', 'aggregate']:
return 2
elif text_value in ['descriptive', 'disease model']:
return 1
else:
return 0
def get_text_value (self, lookup_table, value):
for k, v in lookup_table.iteritems():
if v == value:
return k
return ''
def set_timing (self, timing):
if type(timing) is types.StringType:
if timing in self.TIMING.keys():
self.timing = self.TIMING[timing]
elif type(timing) is types.IntType:
if timing in self.TIMING.values():
self.timing = timing
def get_timing (self, text=False):
if text:
return self.get_text_value(self.TIMING, self.timing)
else:
return self.timing
def set_sampling (self, sampling):
if type(sampling) is types.StringType:
if sampling in self.SAMPLING.keys():
self.sampling = self.SAMPLING[sampling]
elif type(sampling) is types.IntType:
if sampling in self.SAMPLING.values():
self.sampling = sampling
def get_sampling (self, text=False):
if text:
return self.get_text_value(self.SAMPLING, self.sampling)
else:
return self.sampling
def set_controls (self, controls):
if type(controls) is types.StringType:
if controls in self.CONTROLS.keys():
self.controls = self.CONTROLS[controls]
elif type(controls) is types.IntType:
if controls in self.CONTROLS.values():
self.controls = controls
def get_controls (self, text=False):
if text:
return self.get_text_value(self.CONTROLS, self.controls)
else:
return self.controls
def set_routes (self, routes):
for route in routes:
self.add_route(route)
# Remove routes no longer specified
for route in self.exposure_routes:
if not route.get_route() in [r.get_route() for r in routes]:
self.exposure_routes.remove(route)
def add_route (self, route):
if not route.get_route() in [r.get_route() for r in self.exposure_routes]:
route.methodology_id = self.uid
route.study_id = self.study_id
self.expos
|
jbarmash/rhaptos2.user
|
rhaptos2/user/cnxbase.py
|
Python
|
agpl-3.0
| 1,673
| 0.003586
|
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
###
# Copyright (c) Rice University 2012-13
# This software is subject to
# the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
###
"""
THis exists solely to provide less typing for a "leaf node"
in a simple realtional schema (1:M and 1:M-N:1) when used with SQLAlchemy
SA does not support class based inheritence in the normal Python way for objects inheriting from Base. Thus we have those objects perform multiple inheritence...
"""
import json
import sqlalchemy.types
import datetime
class CNXBase():
def from_dict(self, userprofile_dict):
"""
SHould test for schema validity etc.
"""
d = userprofile_dict
for k in d:
setattr(self, k, d[k])
def to_dict(self):
"""Return self as a dict, suitable for jsonifying """
d = {}
for col in self.__table__.columns:
d[col.name] = self.safe_type_out(col)
return d
def jsonify(self):
"""Helper function that returns simple json repr """
selfd = self.to_dict()
jsonstr = json.dumps(selfd) # here use the Json ENcoder???
return jsons
|
tr
def safe_type_out(self, col):
"""return the value of a coulmn field safely as something that
json can use This is essentially a JSONEncoder sublclass
inside this object.
"""
if isinstance(type(col.type), sqlalchemy.types.DateTime):
outstr = getattr(self, col.name).isoformat()
else:
outstr = getattr(self, col.name)
|
return outstr
|
bashu/django-facebox
|
example/urls.py
|
Python
|
bsd-3-clause
| 266
| 0.003759
|
from django.conf.urls import url
from django.views.gener
|
ic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='homepage.html')),
url(r'^remote.html$', TemplateView.as_view(template_name='remote.html'), name="
|
remote.html"),
]
|
nanditav/15712-TensorFlow
|
tensorflow/contrib/metrics/python/ops/metric_ops_test.py
|
Python
|
apache-2.0
| 163,728
| 0.009143
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future
|
__ import division
from __future__ import print_function
im
|
port math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.metrics.python.ops import metric_ops
NAN = float('nan')
metrics = tf.contrib.metrics
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(queue.enqueue(tf.constant(values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return tf.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return tf.SparseTensor.from_value(_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return tf.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return tf.SparseTensor.from_value(_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
class StreamingMeanTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
tf.ones([4, 3]),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
tf.ones([4, 3]),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(tf.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4/6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = (
(0, 1),
(-4.2, 9.1),
(6.5, 0),
(-3.2, 4.0)
)
values = tf.placeholder(dtype=tf.float32)
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(se
|
tvtsoft/odoo8
|
addons/crm/models/crm_activity.py
|
Python
|
agpl-3.0
| 2,406
| 0.001663
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import api, fields, models
class CrmActivity(models.Model):
''
|
' CrmActivity is a model introduced in Odoo v9 that models activi
|
ties
performed in CRM, like phonecalls, sending emails, making demonstrations,
... Users are able to configure their custom activities.
Each activity has up to three next activities. This allows to model light
custom workflows. This way sales manager can configure their crm workflow
that salepersons will use in their daily job.
CrmActivity inherits from mail.message.subtype. This allows users to follow
some activities through subtypes. Each activity will generate messages with
the matching subtypes, allowing reporting and statistics computation based
on mail.message.subtype model. '''
_name = 'crm.activity'
_description = 'CRM Activity'
_inherits = {'mail.message.subtype': 'subtype_id'}
_rec_name = 'name'
_order = "sequence"
days = fields.Integer('Number of days', default=0,
help='Number of days before doing fulfilling the action, allowing to plan the action date.')
sequence = fields.Integer('Sequence', default=0)
team_id = fields.Many2one('crm.team', string='Sales Team')
subtype_id = fields.Many2one('mail.message.subtype', string='Message Subtype', required=True, ondelete='cascade')
activity_1_id = fields.Many2one('crm.activity', string="Next Activity 1")
activity_2_id = fields.Many2one('crm.activity', string="Next Activity 2")
activity_3_id = fields.Many2one('crm.activity', string="Next Activity 3")
@api.model
def create(self, values):
''' Override to set the res_model of inherited subtype to crm.lead.
This cannot be achieved using a default on res_model field because
of the inherits. Indeed a new field would be created. However the
field on the subtype would still exist. Being void, the subtype
will be present for every model in Odoo. That's quite an issue. '''
if not values.get('res_model') and 'default_res_model' not in self._context:
values['res_model'] = 'crm.lead'
if 'internal' not in values and 'default_internal' not in self._context:
values['internal'] = True
return super(CrmActivity, self).create(values)
|
waile23/todo
|
utils/xwjemail.py
|
Python
|
mit
| 2,175
| 0.063391
|
# coding: utf-8
'''
Created on 2012-8-30
@author: shanfeng
'''
import smtplib
from email.mime.text import MIMEText
import urllib
import web
class XWJemail:
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
pa
|
ss
@staticmethod
def sendfindpass(user,hash):
link = "%s/account/newpass?%s" %(web.ctx.sitehost,urllib.urlencode({'email':user.u_email,"v":hash}))
mail_body = """
<html>
<head></head>
<body>
<h4>%s,你好</h4>
您刚才在 liulin.info 申请了找回密码。<br>
请点击下面的链接来重置密码:<br>
<a href="%s">%s</a><br>
如果无法点击上面的链接,您可以复制该地址,并粘帖在浏览器的地址
|
栏中访问。<br>
</body>
</html>
""" % (web.utf8(user.u_name),link,link)
#mail_body = web.utf8(mail_body)
if isinstance(mail_body,unicode):
mail_body = str(mail_body)
mail_from = "liulin.info<[email protected]>"
mail_to = user.u_email
mail_subject = 'liulin.info重置密码邮件'
msg = MIMEText(mail_body,'html','utf-8')
#msg=MIMEText(mail_body,'html')
if not isinstance(mail_subject,unicode):
mail_subject = unicode(mail_subject)
msg['Subject']= mail_subject
msg['From']=mail_from
msg['To'] = mail_to
msg["Accept-Language"]="zh-CN"
msg["Accept-Charset"]="ISO-8859-1,utf-8"
smtp=smtplib.SMTP()
smtp.connect('smtp.163.com')
smtp.login('[email protected]','831112')
smtp.sendmail(mail_from,mail_to,msg.as_string())
smtp.quit()
def sendMail(mailto,subject,body,format='plain'):
if isinstance(body,unicode):
body = str(body)
me= ("%s<"+fromMail+">") % (Header(_mailFrom,'utf-8'),)
msg = MIMEText(body,format,'utf-8')
if not isinstance(subject,unicode):
subject = unicode(subject)
msg['Subject'] = subject
msg['From'] = me
msg['To'] = mailto
msg["Accept-Language"]="zh-CN"
msg["Accept-Charset"]="ISO-8859-1,utf-8"
try:
s = smtplib.SMTP()
s.connect(host)
s.login(user,password)
s.sendmail(me, mailto, msg.as_string())
s.close()
return True
except Exception, e:
print str(e)
return False
|
schimar/hts_tools
|
vcf2nex012.py
|
Python
|
gpl-2.0
| 1,837
| 0.004355
|
#!/usr/bin/python
# This script reads through a enotype likelihood file and the respective mean genotype likelihood file. It writes a nexus file for all individuals and the given genotypesi, with '0' for ref homozygote, '1' for heterozygote, and '2' for alt homozygote.
# Usage: ~/vcf2nex012.py pubRetStriUG_unlnkd.gl pntest_pubRetStriUG_unlnkd.txt
from sys import argv
# read genotype likelihood file to get scaf
|
fold:bp (which is not in the same order as the vcf file, resulting from vcf2gl.py)
with open(argv[1], 'rb') as gl_file:
scafPos_gl = list()
for line in gl_file:
if line.split(' ')[0] == '65':
continue
elif line.split(' ')[0] == 'CR1043':
ind_id = line.split(' ')
|
ind_id[len(ind_id)-1] = ind_id[len(ind_id)-1].split('\n')[0]
else:
scafPos_gl.append(line.split(' ')[0])
# read the file with mean genotypes
with open(argv[2], 'rb') as mean_gt_file:
ind_dict = dict()
for line in mean_gt_file:
gt_line = line.split(' ')
for i, ind in enumerate(ind_id):
if not ind in ind_dict:
gt_line[i]
ind_dict[ind] = [float(gt_line[i])]
else:
ind_dict[ind].append(float(gt_line[i]))
# parse the mean genotypes and write the proper bases
for key, value in ind_dict.iteritems():
newline = list()
for i, pos in enumerate(scafPos_gl):
if round(float(value[i])) == 0:
newline.append(str(0))
elif round(float(value[i])) == 1:
newline.append(str(1))
elif round(float(value[i])) == 2:
newline.append(str(2))
else:
continue
print str(key + '\t' + ''.join(newline))
#print scafPos_gl
#for key, value in iter(refp_dict.iteritems()):
# print key, ''.join(value)
|
hainm/pythran
|
pythran/optimizations/list_comp_to_map.py
|
Python
|
bsd-3-clause
| 2,611
| 0
|
""" ListCompToMap transforms list comprehension into intrinsics. """
from pythran.analyses import OptimizableComprehension
from pythran.passmanager import Transformation
from pythran.transformations import NormalizeTuples
import ast
class ListCompToMap(Transformat
|
ion):
'''
Transforms list comprehension into intrinsics.
>>> import ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("[x*x for x
|
in range(10)]")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(ListCompToMap, node)
>>> print pm.dump(backend.Python, node)
__builtin__.map((lambda x: (x * x)), range(10))
'''
def __init__(self):
Transformation.__init__(self, NormalizeTuples,
OptimizableComprehension)
def make_Iterator(self, gen):
if gen.ifs:
ldFilter = ast.Lambda(
ast.arguments([ast.Name(gen.target.id, ast.Param())],
None, None, []), ast.BoolOp(ast.And(), gen.ifs))
ifilterName = ast.Attribute(
value=ast.Name(id='itertools', ctx=ast.Load()),
attr='ifilter', ctx=ast.Load())
return ast.Call(ifilterName, [ldFilter, gen.iter], [], None, None)
else:
return gen.iter
def visit_ListComp(self, node):
if node in self.optimizable_comprehension:
self.update = True
self.generic_visit(node)
iterList = []
varList = []
for gen in node.generators:
iterList.append(self.make_Iterator(gen))
varList.append(ast.Name(gen.target.id, ast.Param()))
# If dim = 1, product is useless
if len(iterList) == 1:
iterAST = iterList[0]
varAST = ast.arguments([varList[0]], None, None, [])
else:
prodName = ast.Attribute(
value=ast.Name(id='itertools', ctx=ast.Load()),
attr='product', ctx=ast.Load())
iterAST = ast.Call(prodName, iterList, [], None, None)
varAST = ast.arguments([ast.Tuple(varList, ast.Store())],
None, None, [])
mapName = ast.Attribute(
value=ast.Name(id='__builtin__', ctx=ast.Load()),
attr='map', ctx=ast.Load())
ldBodymap = node.elt
ldmap = ast.Lambda(varAST, ldBodymap)
return ast.Call(mapName, [ldmap, iterAST], [], None, None)
else:
return self.generic_visit(node)
|
dpimenov/tvdb_api
|
tests/gprof2dot.py
|
Python
|
unlicense
| 53,218
| 0.004209
|
#!/usr/bin/env python
#
# Copyright 2008 Jose Fonseca
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Generate a dot graph from the output of several profilers."""
__author__ = "Jose Fonseca"
__version__ = "1.0"
import sys
import math
import os.path
import re
import textwrap
import optparse
try:
# Debugging helper module
import debug
except ImportError:
pass
def percentage(p):
return "%.02f%%" % (p*100.0,)
def add(a, b):
return a + b
def equal(a, b):
if a == b:
return a
else:
return None
def fail(a, b):
assert False
def ratio(numerator, denominator):
numerator = float(numerator)
denominator = float(denominator)
assert 0.0 <= numerator
assert numerator <= denominator
try:
return numerator/denominator
except ZeroDivisionError:
# 0/0 is undefined, but 1.0 yields more useful results
return 1.0
class UndefinedEvent(Exception):
"""Raised when attempting to get an event which is undefined."""
def __init__(self, event):
Exception.__init__(self)
self.event = event
def __str__(self):
return 'unspecified event %s' % self.event.name
class Event(object):
"""Describe a kind of event, and its basic operations."""
def __init__(self, name, null, aggregator, formatter = str):
self.name = name
self._null = null
self._aggregator = aggregator
self._formatter = formatter
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
def null(self):
return self._null
def aggregate(self, val1, val2):
"""Aggregate two event values."""
assert val1 is not None
assert val2 is not None
return self._aggregator(val1, val2)
def format(self, val):
"""Format an event value."""
assert val is not None
return self._formatter(val)
MODULE = Event("Module", None, equal)
PROCESS = Event("Process", None, equal)
CALLS = Event("Calls", 0, add)
SAMPLES = Event("Samples", 0, add)
TIME = Event("Time", 0.0, add, lambda x: '(' + str(x) + ')')
TIME_RATIO = Event("Time ratio", 0.0, add, lambda x: '(' + percentage(x) + ')')
TOTAL_TIME = Event("Total time", 0.0, fail)
TOTAL_TIME_RATIO = Event("Total time ratio", 0.0, fail, percentage)
CALL_RATIO = Event("Call ratio", 0.0, add, percentage)
PRUNE_RATIO = Event("Prune ratio", 0.0, add, percentage)
class Object(object):
"""Base class for all objects in profile which can store events."""
def __init__(self, events=None):
if events is None:
self.events = {}
else:
self.events = events
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
def __contains__(self, event):
return event in self.events
def __getitem__(self, event):
try:
return self.events[event]
except KeyError:
raise UndefinedEvent(event)
def __setitem__(self, event, value):
if value is None:
if event in self.events:
del self.events[event]
else:
self.events[event] = value
class Call(Object):
"""A call between functions.
There should be at most one call object for every pair of functions.
"""
def __init__(self, callee_id):
Object.__init__(self)
self.callee_id = callee_id
class Function(Object):
"""A function."""
def __init__(self, id, name):
Object.__init__(self)
self.id = id
self.name = name
self.calls = {}
self.cycle = None
def add_call(self, call):
if call.callee_id in self.calls:
sys.stderr.write('warning: overwriting call from function %s to %s\n' % (str(self.id), str(call.callee_id)))
self.calls[call.callee_id] = call
# TODO: write utility functions
def __repr__(self):
return self.name
class Cycle(Object):
"""A cycle made from recursive function calls."""
def __init__(self):
Object.__init__(self)
# XXX: Do cycles need an id?
self.functions = set()
def add_function(self, function):
assert function not in self.functions
self.functions.add(function)
# XXX: Aggregate events?
if function.cycle is not None:
for other in function.cycle.functions:
if function not in self.functions:
self.add_function(other)
function.cycle
|
= self
class Profile(Object):
"""The whole profile."""
def __init__(self):
Object.__init__(self)
self.functions = {}
self.cycles = []
de
|
f add_function(self, function):
if function.id in self.functions:
sys.stderr.write('warning: overwriting function %s (id %s)\n' % (function.name, str(function.id)))
self.functions[function.id] = function
def add_cycle(self, cycle):
self.cycles.append(cycle)
def validate(self):
"""Validate the edges."""
for function in self.functions.itervalues():
for callee_id in function.calls.keys():
assert function.calls[callee_id].callee_id == callee_id
if callee_id not in self.functions:
sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name))
del function.calls[callee_id]
def find_cycles(self):
"""Find cycles using Tarjan's strongly connected components algorithm."""
# Apply the Tarjan's algorithm successively until all functions are visited
visited = set()
for function in self.functions.itervalues():
if function not in visited:
self._tarjan(function, 0, [], {}, {}, visited)
cycles = []
for function in self.functions.itervalues():
if function.cycle is not None and function.cycle not in cycles:
cycles.append(function.cycle)
self.cycles = cycles
if 0:
for cycle in cycles:
sys.stderr.write("Cycle:\n")
for member in cycle.functions:
sys.stderr.write("\t%s\n" % member.name)
def _tarjan(self, function, order, stack, orders, lowlinks, visited):
"""Tarjan's strongly connected components algorithm.
See also:
- http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm
"""
visited.add(function)
orders[function] = order
lowlinks[function] = order
order += 1
pos = len(stack)
stack.append(function)
for call in function.calls.itervalues():
callee = self.functions[call.callee_id]
# TODO: use a set to optimize lookup
if callee not in orders:
order = self._tarjan(callee, order, stack, orders, lowlinks, visited)
lowlinks[function] = min(lowlinks[function], lowlinks[callee])
elif callee in stack:
lowlinks[function] = min(lowlinks[function], orders[callee])
if lowlinks[function] == orders[function]:
# Strongly connected component found
members = stack[pos:]
del stack[pos:]
if len(members) > 1:
cycle = Cycle()
for member in members:
cycle.add_function(member)
return order
def cal
|
QTB-HHU/ModelHeatShock
|
HSM_ODEsSystem10or9eqs.py
|
Python
|
gpl-3.0
| 5,759
| 0.009029
|
from HSM_Reactions import *
########## RIGHT MEMBERS OF ODEs, rewritten with only 10 equations to isolate those that are independent ##############
def f10eqs(t, y, ksetDict, TparamSet, REACparamSet, DirectControlnuPp, IC_PplusPp, IC_SplusSs):
#P = y[0]
Ph = y[0]
#S = y[2]
Ss = y[1]
F = y[2]
Fs = y[3]
G = y[4]
FsG = y[5]
FG = y[6]
RF = y[7]
RHP = y[8]
HP = y[9]
kP0 = ksetDict["kP0"]
kP0p = ksetDict["kP0p"]
kS = ksetDict["kS"]
kSp0 = ksetDict["kSp0"]
kFp0 = ksetDict["kFp0"]
kF0 = ksetDict["kF0"]
kFpi0 = ksetDict["kFpi0"]
kFGp = ksetDict["kFGp"]
kFG = ksetDict["kFG"]
ketaF = ksetDict["ketaF"]
kFsG = ksetDict["kFsG"]
kFsGp = ksetDict["kFsGp"]
kFsp = ksetDict["kFsp"]
kFs = ksetDict["kFs"]
kpiRF = ksetDict["kpiRF"]
kpiRH = ksetDict["kpiRH"]
kpiHP = ksetDict["kpiHP"]
ketaHP = ksetDict["ketaHP"]
ketaRF = ksetDict["ketaRF"]
ketaRHP = ksetDict["ketaRHP"]
n1 = REACparamSet["n1"]
n2 = REACparamSet["n2"]
P0const = REACparamSet["P0const"]
I = REACparamSet["I"]
T0const = REACparamSet["T0const"]
piRFconst = REACparamSet["piRFconst"]
piRHPconst = REACparamSet["piRHPconst"]
PplusPpCONST = IC_PplusPp # (microM) Initial Condition protein P
SplusSsCONST = IC_SplusSs # (microM) Initial Condition stresskinease S
system = [
#nuP(Ph, HP, kP0) - nuPp(P, t, kP0p, n1, T0const, TparamSet, DirectControlnuPp), # P
- nuP(Ph, HP, kP0) + nuPp(PplusPpCONST - Ph, t, kP0p, n1, T0const, TparamSet, DirectControlnuPp), # Ph
#nuS(Ss, kS) - nuSp(S, Ph, kSp0, n2, P0const), # S
- nuS(Ss, kS) + nuSp(SplusSsCONST - Ss, Ph, kSp0, n2, P0const), # Ss
nuF(I, Fs, kF0) + piF(RF, kFpi0) + nuFGp(FG, kFGp) - nuFG(G, F, kFG) - nuFp(F, Ss, kFp0) - etaF(F, ketaF), # F
- nuF(I, Fs, kF0) + nuFp(F, Ss, kFp0) + nuFsGp(FsG, kFsGp) - nuFsG(G, Fs, kFsG), # Fs
nuFsGp(FsG, kFsGp) + nuFGp(FG, kFGp) - nuFG(G, F, kFG) - nuFsG(G, Fs, kFsG), # G
nuFsG(G, Fs, kFsG) + nuFs(FG, kFs) - nuFsp(FsG, I, kFsp) - nuFsGp(FsG, kFsGp), # FsG
nuFsp(FsG, I, kFsp) + nuFG(G, F, kFG) - nuFGp(FG, kFGp) - nuFs(FG, kFs), # FG
piRF(FsG, kpiRF) + piRFAddConst(piRFconst) - etaRF(RF, ketaRF), # RF Added const to Alex model
piRHP(FsG, kpiRH) + piRHPAddConst(piRHPconst) - etaRHP(RHP, ketaRHP), # RHP Aded const to Alex model
piHP(RHP, kpiHP) - etaHP(HP, ketaHP)] # HP
# Notice presence of nuFG() in line of F, presence of nuFsG() in that of Fs, absence of pi in that of FsG.
return system
########## RIGHT MEMBERS OF ODEs, rewritten with only 9 equations to isolate those that are independent ##############
def f9eqs(t, y, ksetDict, TparamSet, REACparamSet, DirectControlnuPp, IC_PplusPp, IC_SplusSs, IC_GplusFsGplusFG):
#P = y[0]
Ph = y[0]
#S = y[2]
Ss = y[1]
F = y[2]
Fs = y[3]
#G = y[4]
FsG = y[4]
FG = y[5]
RF = y[6]
RHP = y[7]
HP = y[8]
kP0 = ksetDict["kP0"]
kP0p = ksetDict["kP0p"]
kS = ksetDict["kS"]
kSp0 = ksetDict["kSp0"]
kFp0 = ksetDict["kFp0"]
kF0 = ksetDict["kF0"]
kFpi0 = ksetDict["kFpi0"]
kFGp = ksetDict["kFGp"]
kFG = ksetDict["kFG"]
ketaF = ksetDict["ketaF"]
kFsG = ksetDict["kFsG"]
kFsGp = ksetDict["kFsGp"]
kFsp = ksetDict["kFsp"]
kFs = ksetDict["kFs"]
kpiRF = ksetDict["kpiRF"]
kpiRH = ksetDict["kpiRH"]
kpiHP = ksetDict["kpiHP"]
ketaHP = ksetDict["ketaHP"]
ketaRF = ksetDict["ketaRF"]
ketaRHP = ksetDict["ketaRHP"]
n1 = REACparamSet["n1"]
n2 = REACparamSet["n2"]
P0const = REACparamSet["P0const"]
I = REACparamSet["I"]
T0const = REACparamSet["T0const"]
piRFconst = REACparamSet["piRFconst"]
piRHPconst = REACparamSet["piRHPconst"]
PplusPpCONST = IC_PplusPp # (microM) Initial Condition protein P
SplusSsCONST = IC_SplusSs # (microM) Initial Condition stresskinease S
GplusFsGplusFG = IC_GplusFsGplusFG # (microM) Initial Condition gene G
G = GplusFsGplusFG - FsG - FG
system = [
#nuP(Ph, HP, kP0) - nuPp(P, t, kP0p, n1, T0const, TparamSet, DirectControlnuPp), # P
- nuP(Ph, HP, kP0) + nuPp(PplusPpCONST - Ph, t, kP0p, n1, T0const, TparamSet, DirectControlnuPp), # Ph
#nuS(Ss, kS) - nuSp(S, Ph, kSp0, n2, P0const), # S
- nuS(Ss, kS) + nuSp(SplusSsCONST - Ss, Ph, kSp0
|
, n2, P0const), # Ss
nuF(I, Fs, kF0) + piF(RF, kFpi0) + nuFGp(FG, kFGp) - nuFG(G, F, kFG) - nuFp(F,
|
Ss, kFp0) - etaF(F, ketaF), # F
- nuF(I, Fs, kF0) + nuFp(F, Ss, kFp0) + nuFsGp(FsG, kFsGp) - nuFsG(G, Fs, kFsG), # Fs
#nuFsGp(FsG, kFsGp) + nuFGp(FG, kFGp) - nuFG(G, F, kFG) - nuFsG(G, Fs, kFsG), # G
nuFsG(G, Fs, kFsG) + nuFs(FG, kFs) - nuFsp(FsG, I, kFsp) - nuFsGp(FsG, kFsGp), # FsG
nuFsp(FsG, I, kFsp) + nuFG(G, F, kFG) - nuFGp(FG, kFGp) - nuFs(FG, kFs), # FG
piRF(FsG, kpiRF) + piRFAddConst(piRFconst) - etaRF(RF, ketaRF), # RF Added const to Alex model
piRHP(FsG, kpiRH) + piRHPAddConst(piRHPconst) - etaRHP(RHP, ketaRHP), # RHP Aded const to Alex model
piHP(RHP, kpiHP) - etaHP(HP, ketaHP)] # HP
# Notice presence of nuFG() in line of F, presence of nuFsG() in that of Fs, absence of pi in that of FsG.
return system
|
LingyuGitHub/codingofly
|
python/threading/mthreading.py
|
Python
|
gpl-3.0
| 699
| 0.013413
|
#!/usr/bin/env python3
#########################################################################
# File Na
|
me: mthreading.py
# Author: ly
# Created Time: Wed 05 Jul 2017 08:46:57 PM CST
# Description:
#################
|
########################################################
# -*- coding: utf-8 -*-
import time
import threading
def play(name,count):
for i in range(1,count):
print('%s %d in %d' %(name, i, count))
time.sleep(1)
return
if __name__=='__main__':
t1=threading.Thread(target=play, args=('t1',10))
# 设置为守护线程
t1.setDaemon(True)
t1.start()
print("main")
# 等待子线程结束
t1.join()
exit(1)
|
pymedusa/SickRage
|
medusa/tv/base.py
|
Python
|
gpl-3.0
| 2,303
| 0.001303
|
# coding=utf-8
"""TV base class."""
from __future__ import unicode_literals
import threading
from builtins import object
from medusa.indexers.config import INDEXER_TVDBV2
class Identifier(object):
"""Base identifier class."""
def __bool__(self):
"""Magic method."""
raise NotImplementedError
def __ne__(self, other):
"""Magic method."""
return not self == other
class TV(object):
"""Base class for Series and Episode."""
def __init__(self, indexer, indexerid, ignored_properties):
|
"""Initialize class.
:param indexer:
:type indexer: int
:param indexerid:
:type indexerid: int
:param ignored_properties:
:type ignored_properties: set(str)
"""
self.__dirty = True
self.__ignored_properties = ignored_properties | {'lock'}
self.indexer = in
|
t(indexer)
self.indexerid = int(indexerid)
self.lock = threading.Lock()
@property
def series_id(self):
"""To make a clear distinction between an indexer and the id for the series. You can now also use series_id."""
return self.indexerid
def __setattr__(self, key, value):
"""Set the corresponding attribute and use the dirty flag if the new value is different from the old value.
:param key:
:type key: str
:param value:
"""
if key == '_location' or (not key.startswith('_') and key not in self.__ignored_properties):
self.__dirty |= self.__dict__.get(key) != value
super(TV, self).__setattr__(key, value)
@property
def dirty(self):
"""Return the dirty flag.
:return:
:rtype: bool
"""
return self.__dirty
def reset_dirty(self):
"""Reset the dirty flag."""
self.__dirty = False
@property
def tvdb_id(self):
"""Get the item's tvdb_id."""
if self.indexerid and self.indexer == INDEXER_TVDBV2:
return self.indexerid
def __getstate__(self):
"""Make object serializable."""
d = dict(self.__dict__)
del d['lock']
return d
def __setstate__(self, d):
"""Un-serialize the object."""
d['lock'] = threading.Lock()
self.__dict__.update(d)
|
OSSOS/MOP
|
src/ossos/core/ossos/match.py
|
Python
|
gpl-3.0
| 13,649
| 0.005495
|
from astropy.io import ascii
from astropy.table import MaskedColumn, Table, Column
import logging
import math
import numpy
import os
from .downloads.cutouts.downloader import ImageDownloader
from . import util
from .downloads.cutouts.source import SourceCutout
from astropy.time import Time
from .astrom import Observation
from . import storage
BRIGHT_LIMIT = 23.0
OBJECT_PLANTED = "Object.planted"
MINIMUM_BRIGHT_DETECTIONS = 5
MINIMUM_BRIGHT_FRACTION = 0.5
def match_mopfiles(mopfile1, mopfile2):
"""
Given an input list of 'real' detections and candidate detections provide a result file that contains
the measured values from candidate detections with a flag indicating if they are real or false.
@rtype MOPFile
@return mopfile2 with a new column containing index of matching entry in mopfile1
"""
pos1 = pos2 = numpy.array([])
if len(mopfile1.data) > 0:
X_COL = "X_{}".format(mopfile1.header.file_ids[0])
Y_COL = "Y_{}".format(mopfile1.header.file_ids[0])
pos1 = numpy.array([mopfile1.data[X_COL].data, mopfile1.data[Y_COL].data]).transpose()
if len(mopfile2.data) > 0:
X_COL = "X_{}".format(mopfile2.header.file_ids[0])
Y_COL = "Y_{}".format(mopfile2.header.file_ids[0])
pos2 = numpy.array([mopfile2.data[X_COL].data, mopfile2.data[Y_COL].data]).transpose()
# match_idx is an order list. The list is in the order of the first list of positions and each entry
# is the index of the matching position from the second list.
match_idx1, match_idx2 = util.match_lists(pos1, pos2)
mopfile1.data.add_column(Column(data=match_idx1.filled(-1), name="real", length=len(mopfile1.data)))
idx = 0
for file_id in mopfile1.header.file_ids:
idx += 1
mopfile1.data.add_column(Column(data=[file_id]*len(mopfile1.data), name="ID_{}".format(idx)))
return mopfile1
def measure_mags(measures):
"""
Given a list of readings compute the magnitudes for all sources in each reading.
@param measures: list of readings
@return: None
"""
from . import daophot
image_downloader = ImageDownloader()
observations = {}
for measure in measures:
for reading in measure:
if reading.obs not in observations:
observations[reading.obs] = {'x': [],
'y': [],
'source': image_downloader.download(reading, needs_apcor=True)}
assert isinstance(reading.obs, Observation)
observations[reading.obs]['x'].append(reading.x)
observations[reading.obs]['y'].append(reading.y)
for observation in observations:
source = observations[observation]['source']
assert isinstance(source, SourceCutout)
hdulist_index = source.get_hdulist_idx(observation.ccdnum)
#source.update_pixel_location
|
((observations[observation]['x'],
# observations[observation]['y']), hdulist_index)
observations[observation]['mags'] = daophot.phot(source._hdu_on_disk(hdulist_index),
observations[observation]['x'],
|
observations[observation]['y'],
aperture=source.apcor.aperture,
sky=source.apcor.sky,
swidth=source.apcor.swidth,
apcor=source.apcor.apcor,
zmag=source.zmag,
maxcount=30000,
extno=0)
return observations
def match_planted(fk_candidate_observations, match_filename, bright_limit=BRIGHT_LIMIT, object_planted=OBJECT_PLANTED,
minimum_bright_detections=MINIMUM_BRIGHT_DETECTIONS, bright_fraction=MINIMUM_BRIGHT_FRACTION):
"""
Using the fk_candidate_observations as input get the Object.planted file from VOSpace and match
planted sources with found sources.
The Object.planted list is pulled from VOSpace based on the standard file-layout and name of the
first exposure as read from the .astrom file.
:param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted
:param match_filename: a file that will contain a list of all planted sources and the matched found source
@param minimum_bright_detections: if there are too few bright detections we raise an error.
"""
found_pos = []
detections = fk_candidate_observations.get_sources()
for detection in detections:
reading = detection.get_reading(0)
# create a list of positions, to be used later by match_lists
found_pos.append([reading.x, reading.y])
# Now get the Object.planted file, either from the local FS or from VOSpace.
objects_planted_uri = object_planted
if not os.access(objects_planted_uri, os.F_OK):
objects_planted_uri = fk_candidate_observations.observations[0].get_object_planted_uri()
try:
lines = storage.open_vos_or_local(objects_planted_uri)
lines = lines.read().decode('utf-8')
except Exception as ex:
logging.critical(f'{ex}')
print(lines)
raise ex
# we are changing the format of the Object.planted header to be compatible with astropy.io.ascii but
# there are some old Object.planted files out there so we do these string/replace calls to reset those.
new_lines = lines.replace("pix rate", "pix_rate")
new_lines = new_lines.replace("""''/h rate""", "sky_rate")
planted_objects_table = ascii.read(new_lines, header_start=-1, data_start=0)
planted_objects_table.meta = None
# The match_list method expects a list that contains a position, not an x and a y vector, so we transpose.
planted_pos = numpy.transpose([planted_objects_table['x'].data, planted_objects_table['y'].data])
# match_idx is an order list. The list is in the order of the first list of positions and each entry
# is the index of the matching position from the second list.
(match_idx, match_fnd) = util.match_lists(numpy.array(planted_pos), numpy.array(found_pos))
assert isinstance(match_idx, numpy.ma.MaskedArray)
assert isinstance(match_fnd, numpy.ma.MaskedArray)
false_positives_table = Table()
# Once we've matched the two lists we'll need some new columns to store the information in.
# these are masked columns so that object.planted entries that have no detected match are left 'blank'.
new_columns = [MaskedColumn(name="measure_x", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_y", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_rate", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_angle", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_mag1", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_merr1", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_mag2", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_merr2", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_mag3", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_merr3", length=len(planted_objects_table), mask=True)]
planted_objects_table.add_columns(new_columns)
tlength = 0
new_columns = [MaskedColumn(name="measure_x", length=tlength, mask=True),
MaskedColumn(name="measure_y", length=tlength, mask=True),
MaskedColumn(name="measure_rate", length=0, mask=True),
MaskedColumn(name="measure_angle", length=0, mask=True),
MaskedCol
|
sdlBasic/sdlbrt
|
win32/mingw/opt/lib/python2.7/idlelib/EditorWindow.py
|
Python
|
lgpl-2.1
| 66,626
| 0.001816
|
import sys
import os
import platform
import re
import imp
from Tkinter import *
import tkSimpleDialog
import tkMessageBox
import webbrowser
from idlelib.MultiCall import MultiCallCreator
from idlelib import idlever
from idlelib import WindowList
from idlelib import SearchDialog
from idlelib import GrepDialog
from idlelib import ReplaceDialog
from idlelib import PyParse
from idlelib.configHandler import idleConf
from idlelib import aboutDialog, textView, configDialog
from idlelib import macosxSupport
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
_py_version = ' (%s)' % platform.python_version()
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
if micro:
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
def _find_module(fullname, path=None):
"""Version of imp.find_module() that handles hierarchical module names"""
file = None
for tgt in fullname.split('.'):
if file is not None:
file.close() # close intermediate files
(file, filename, descr) = imp.find_module(tgt, path)
if descr[2] == imp.PY_SOURCE:
break # find but not load the source file
module = imp.load_module(tgt, file, filename, descr)
try:
path = module.__path__
except AttributeError:
raise ImportError, 'No source for module ' + module.__name__
if descr[2] != imp.PY_SOURCE:
# If all of the above fails and didn't raise an exception,fallback
# to a straight import which can find __init__.py in a package.
m = __import__(fullname)
try:
filename = m.__file__
except AttributeError:
pass
else:
file = None
base, ext = os.path.splitext(filename)
if ext == '.pyc':
ext = '.py'
filename = base + ext
descr = filename, None, imp.PY_SOURCE
return file, filename, descr
class HelpDialog(object):
def __init__(self):
self.parent = None # parent of help window
self.dlg = None # the help window iteself
def display(self, parent, near=None):
""" Display the help dialog.
parent - parent widget for the help window
near - a Toplevel widget (e.g. EditorWindow or PyShell)
to use as a reference for placing the help window
"""
if self.dlg is None:
self.show_dialog(parent)
if near:
self.nearwindow(near)
def show_dialog(self, parent):
self.parent = parent
fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
self.dlg = dlg = textView.view_file(parent,'Help',fn, modal=False)
dlg.bind('<Destroy>', self.destroy, '+')
def nearwindow(self, near):
# Place the help dialog near the window specified by parent.
# Note - this may not reposition the window in Metacity
# if "/apps/metacity/general/disable_workarounds" is enabled
dlg = self.dlg
geom = (near.winfo_rootx() + 10, near.winfo_rooty() + 10)
dlg.withdraw()
dlg.geometry("=+%d+%d" % geom)
dlg.deiconify()
dlg.lift()
def destroy(self, ev=None):
self.dlg = None
self.parent = None
helpDialog = HelpDialog() # singleton instance
def _help_dialog(parent): # wrapper for htest
helpDialog.show_dialog(parent)
class EditorWindow(object):
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from
|
idlelib.IOBinding import IOBinding, filesystemencoding, encoding
from idlelib import Bindings
from Tkinter import Toplevel
from idlelib.MultiStatusBar import MultiStatusBar
help_url = None
def __i
|
nit__(self, flist=None, filename=None, key=None, root=None):
if EditorWindow.help_url is None:
dochome = os.path.join(sys.prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif sys.platform == 'darwin':
# documentation may be stored inside a python framework
dochome = os.path.join(sys.prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = "https://docs.python.org/%d.%d/" % sys.version_info[:2]
currentTheme=idleConf.CurrentTheme()
self.flist = flist
root = root or flist.root
self.root = root
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
self.menubar = Menu(root)
self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict available to
#configDialog.py so it can access all EditorWindow instances
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main','EditorWindow','width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'width': self.width,
'height': idleConf.GetOption('main', 'EditorWindow', 'height', type='int')}
if TkVersion >= 8.5:
# Starting with tk 8.5 we have to set the new tabstyle option
# to 'wordprocessor' to achieve the same display of tabs as in
# older tk versions.
text_options['tabstyle'] = 'wordprocessor'
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosxSupport.isAquaTk():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button,
# so use control-click for pulldown menus there.
# (Note, AquaTk defines <2> as the right button if
# present and the Tk Text widget already binds <2>.)
text.bind("<C
|
dhylands/python_lcd
|
lcd/esp8266_i2c_lcd_test.py
|
Python
|
mit
| 1,476
| 0.002033
|
"""Implements a HD44780 character LCD connected via PCF8574 on I2C.
This was tested with: https://www.wemos.cc/product/d1-mini.html"""
|
from time import sleep_ms, ticks_ms
from machine import I2C, Pin
from esp8266_i2c_lcd import I2cLcd
# The PCF8574 has a jumper selectable address: 0x20 - 0x27
DEFAULT_I2C_ADDR = 0x27
def test_main():
"""Test function for verifying basic functionality."""
print("Running test_main")
i2c = I2C(scl=Pin(5), sda=Pin(
|
4), freq=100000)
lcd = I2cLcd(i2c, DEFAULT_I2C_ADDR, 2, 16)
lcd.putstr("It Works!\nSecond Line")
sleep_ms(3000)
lcd.clear()
count = 0
while True:
lcd.move_to(0, 0)
lcd.putstr("%7d" % (ticks_ms() // 1000))
sleep_ms(1000)
count += 1
if count % 10 == 3:
print("Turning backlight off")
lcd.backlight_off()
if count % 10 == 4:
print("Turning backlight on")
lcd.backlight_on()
if count % 10 == 5:
print("Turning display off")
lcd.display_off()
if count % 10 == 6:
print("Turning display on")
lcd.display_on()
if count % 10 == 7:
print("Turning display & backlight off")
lcd.backlight_off()
lcd.display_off()
if count % 10 == 8:
print("Turning display & backlight on")
lcd.backlight_on()
lcd.display_on()
#if __name__ == "__main__":
test_main()
|
mikkqu/rc-chrysalis
|
scapp/moment.py
|
Python
|
bsd-2-clause
| 500
| 0.006
|
from jinja2 import Markup
class momentjs(object):
def __init__(self, timestamp):
self.timestamp = timestamp
def render(self, format):
return Markup("<script>\ndocument.write(moment(\"%s\").%s);\n</script>" % (self.timestamp.strftime("%Y-%m-%dT%H:%M:%S
|
Z"), format))
def format(self, fmt):
return s
|
elf.render("format(\"%s\")" % fmt)
def calendar(self):
return self.render("calendar()")
def fromNow(self):
return self.render("fromNow()")
|
dc3-plaso/plaso
|
tests/parsers/test_lib.py
|
Python
|
apache-2.0
| 8,486
| 0.004949
|
# -*- coding: utf-8 -*-
"""Parser related functions and classes for testing."""
import heapq
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.containers import sessions
from plaso.engine import knowledge_base
from plaso.formatters import manager as formatters_manager
from plaso.formatters import mediator as formatters_mediator
from plaso.parsers import interface
from plaso.parsers import mediator
from plaso.storage import fake_storage
from tests import test_lib as shared_test_lib
class _EventsHeap(object):
"""Events heap."""
def __init__(self):
"""Initializes an events heap."""
super(_EventsHeap, self).__init__()
self._heap = []
def PopEvent(self):
"""Pops an event from the heap.
Returns:
EventObject: event.
"""
try:
_, _, _, event = heapq.heappop(self._heap)
return event
except IndexError:
return None
def PopEvents(self):
"""Pops events from the heap.
Yields:
EventObject: event.
"""
event = self.PopEvent()
while event:
yield event
event = self.PopEvent()
def PushEvent(self, event):
"""Pushes an event onto the heap.
Args:
event (EventObject): event.
"""
# TODO: replace this work-around for an event "comparable".
event_values = event.CopyToDict()
attributes = []
for attribute_name, attribute_value in sorted(event_values.items()):
if isinstance(attribute_value, dict):
attribute_value = sorted(attribute_value.items())
comparable = u'{0:s}: {1!s}'.format(attribute_name, attribute_value)
attributes.append(comparable)
comparable = u', '.join(attributes)
event_values = sorted(event.CopyToDict().items())
heap_values = (event.timestamp, event.timestamp_desc, comparable, event)
heapq.heappush(self._heap, heap_values)
def PushEvents(self, events):
"""Pushes events onto the heap.
Args:
events list[EventObject]: events.
"""
for event in events:
self.PushEvent(event)
class ParserTestCase(shared_test_lib.BaseTestCase):
"""Parser test case."""
def _CreateParserMediator(
self, storage_writer, file_entry=None, knowledge_base_values=None,
parser_chain=None, timezone=u'UTC'):
"""Creates a parser mediator.
Args:
storage_writer (StorageWriter): storage writer.
file_entry (Optional[dfvfs.FileEntry]): file entry object being parsed.
knowledge_base_values (Optional[dict]): knowledge base values.
parser_chain (Optional[str]): parsing chain up to this point.
timezone (str): timezone.
Returns:
ParserMediator: parser mediator.
"""
knowledge_base_object = knowledge_base.KnowledgeBase()
if knowledge_base_values:
for identifier, value in iter(knowledge_base_values.items()):
knowledge_base_object.SetValue(identifier, value)
knowledge_base_object.SetTimezone(timezone)
parser_mediator = mediator.ParserMediator(
storage_writer, knowledge_base_object)
if file_entry:
parser_mediator.SetFileEntry(file_entry)
if parser_chain:
parser_mediator.parser_chain = parser_chain
return parser_mediator
def _CreateStorageWriter(self):
"""Creates a storage writer object.
Returns:
FakeStorageWriter: storage writer.
"""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
return storage_writer
def _GetSortedEvents(self, events):
"""Retrieves events sorted in a deterministic order.
Args:
events (list[EventObject]): events.
Returns:
|
list[EventObject]: sorted events.
"""
events_heap = _EventsHeap()
events_heap.PushEvents(events)
return list(events_heap.PopEvents())
def _GetShortMessage(self, message_string):
"""Shortens a message string to a maximum of 80 character width.
Args:
me
|
ssage_string (str): message string.
Returns:
str: short message string, if it is longer than 80 characters it will
be shortened to it's first 77 characters followed by a "...".
"""
if len(message_string) > 80:
return u'{0:s}...'.format(message_string[0:77])
return message_string
def _ParseFile(
self, path_segments, parser, knowledge_base_values=None,
timezone=u'UTC'):
"""Parses a file with a parser and writes results to a storage writer.
Args:
path_segments (list[str]): path segments inside the test data directory.
parser (BaseParser): parser.
knowledge_base_values (Optional[dict]): knowledge base values.
timezone (str): timezone.
Returns:
FakeStorageWriter: storage writer.
"""
path = self._GetTestFilePath(path_segments)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=path)
return self._ParseFileByPathSpec(
path_spec, parser, knowledge_base_values=knowledge_base_values,
timezone=timezone)
def _ParseFileByPathSpec(
self, path_spec, parser, knowledge_base_values=None, timezone=u'UTC'):
"""Parses a file with a parser and writes results to a storage writer.
Args:
path_spec (dfvfs.PathSpec): path specification.
parser (BaseParser): parser.
knowledge_base_values (Optional[dict]): knowledge base values.
timezone (str): timezone.
Returns:
FakeStorageWriter: storage writer.
"""
storage_writer = self._CreateStorageWriter()
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
parser_mediator = self._CreateParserMediator(
storage_writer,
file_entry=file_entry,
knowledge_base_values=knowledge_base_values,
timezone=timezone)
if isinstance(parser, interface.FileEntryParser):
parser.Parse(parser_mediator)
elif isinstance(parser, interface.FileObjectParser):
file_object = file_entry.GetFileObject()
try:
parser.Parse(parser_mediator, file_object)
finally:
file_object.close()
else:
self.fail(u'Got unsupported parser type: {0:s}'.format(type(parser)))
return storage_writer
def _TestGetMessageStrings(
self, event, expected_message, expected_message_short):
"""Tests the formatting of the message strings.
This function invokes the GetMessageStrings function of the event
formatter on the event object and compares the resulting messages
strings with those expected.
Args:
event (EventObject): event.
expected_message (str): expected message string.
expected_message_short (str): expected short message string.
"""
formatter_mediator = formatters_mediator.FormatterMediator(
data_location=self._DATA_PATH)
message, message_short = (
formatters_manager.FormattersManager.GetMessageStrings(
formatter_mediator, event))
self.assertEqual(message, expected_message)
self.assertEqual(message_short, expected_message_short)
def _TestGetSourceStrings(
self, event, expected_source, expected_source_short):
"""Tests the formatting of the source strings.
This function invokes the GetSourceStrings function of the event
formatter on the event object and compares the resulting source
strings with those expected.
Args:
event (EventObject): event.
expected_source (str): expected source string.
expected_source_short (str): expected short source string.
"""
# TODO: change this to return the long variant first so it is consistent
# with GetMessageStrings.
source_short, source = (
formatters_manager.FormattersManager.GetSourceStrings(event))
self.assertEqual(source, expected_source)
self.assertEqual(source_short, expected_source_short)
def assertDictContains(self, received, expected):
"""Asserts if a dictionary contains every key-value pair as expected.
Recieved can contain new keys. If any value is a dict, this function is
called recursively.
Args:
received (di
|
biomodels/MODEL1201230000
|
setup.py
|
Python
|
cc0-1.0
| 377
| 0.005305
|
from setuptools import setup, find_packages
setup(name='MODEL1201230000',
|
version=20140916,
description='MODEL1201230000 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1201230000',
maintainer='Stanley Gu',
main
|
tainer_url='[email protected]',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
wakatime/wakatime
|
wakatime/packages/py27/pygments/lexers/ncl.py
|
Python
|
bsd-3-clause
| 63,986
| 0.004095
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.ncl
~~~~~~~~~~~~~~~~~~~
Lexers for NCAR Command Language.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['NCLLexer']
class NCLLexer(RegexLexer):
"""
Lexer for NCL code.
.. versionadded:: 2.2
"""
name = 'NCL'
aliases = ['ncl']
filenames = ['*.ncl']
mimetypes = ['text/ncl']
flags = re.MULTILINE
tokens = {
'root': [
(r';.*\n', Comment),
include('strings'),
include('core'),
(r'[a-zA-Z_]\w*', Name),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(words((
'begin', 'break', 'continue', 'create', 'defaultapp', 'do',
'else', 'end', 'external', 'exit', 'True', 'False', 'file', 'function',
'getvalues', 'graphic', 'group', 'if', 'list', 'load', 'local',
'new', '_Missing', 'Missing', 'noparent', 'procedure',
'quit', 'QUIT', 'Quit', 'record', 'return', 'setvalues', 'stop',
'then', 'while'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
# Data Types
(words((
'ubyte', 'uint', 'uint64', 'ulong', 'string', 'byte',
'character', 'double', 'float', 'integer', 'int64', 'logical',
'long', 'short', 'ushort', 'enumeric', 'numeric', 'snumeric'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword.Type),
# Operators
(r'[\%^*+\-/<>]', Operator),
# punctuation:
(r'[\[\]():@$!&|.,\\{}]', Punctuation),
(r'[=:]', Punctuation),
# Intrinsics
(words((
'abs', 'acos', 'addfile', 'addfiles', 'all', 'angmom_atm', 'any',
'area_conserve_remap', 'area_hi2lores', 'area_poly_sphere',
'asciiread', 'asciiwrite', 'asin', 'atan', 'atan2', 'attsetvalues',
'avg', 'betainc', 'bin_avg', 'bin_sum', 'bw_bandpass_filter',
'cancor', 'cbinread', 'cbinwrite', 'cd_calendar', 'cd_inv_calendar',
'cdfbin_p', 'cdfbin_pr', 'cdfbin_s', 'cdfbin_xn', 'cdfchi_p',
'cdfchi_x', 'cdfgam_p', 'cdfgam_x', 'cdfnor_p', 'cdfnor_x',
'cdft_p', 'cdft_t', 'ceil', 'center_finite_diff',
'center_finite_diff_n', 'cfftb', 'cfftf', 'cfftf_frq_reorder',
'charactertodouble', 'charactertofloat', 'charactertointeger',
'charactertolong', 'charactertoshort', 'charactertostring',
'chartodouble', 'chartofloat', 'chartoint', 'chartointeger',
'chartolong', 'chartoshort', 'chartostring', 'chiinv', 'clear',
'color_index_to_rgba', 'conform', 'conform_dims', 'cos', 'cosh',
'count_unique_values', 'covcorm', 'covcorm_xy', 'craybinnumrec',
'craybinrecread', 'create_graphic', 'csa1', 'csa1d', 'csa1s',
'csa1x', 'csa1xd', 'csa1xs', 'csa2', 'csa2d', 'csa2l', 'csa2ld',
'csa2ls', 'csa2lx', 'csa2lxd', 'csa2lxs', 'csa2s', 'csa2x',
'csa2xd', 'csa2xs', 'csa3', 'csa3d', 'csa3l', 'csa3ld', 'csa3ls',
'csa3lx', 'csa3lxd', 'csa3lxs', 'csa3s', 'csa3x', 'csa3xd',
'csa3xs', 'csc2s', 'csgetp', 'css2c', 'cssetp', 'cssgrid', 'csstri',
'csvoro', 'cumsum', 'cz2ccm', 'datatondc', 'day_of_week',
'day_of_year', 'days_in_month', 'default_fillvalue', 'delete',
'depth_to_pres', 'destroy', 'determinant', 'dewtemp_trh',
'dgeevx_lapack', 'dim_acumrun_n', 'dim_avg', 'dim_avg_n',
'dim_avg_wgt', 'dim_avg_wgt_n', 'dim_cumsum', 'dim_cumsum_n',
'dim_gamfit_n', 'dim_gbits', 'dim_max', 'dim_max_n', 'dim_median',
'dim_median_n', 'dim_min', 'dim_min_n', 'dim_num', 'dim_num_n',
'dim_numrun_n', 'dim_pqsort', 'dim_pqsort_n', 'dim_product',
'dim_product_n', 'dim_rmsd', 'dim_rmsd_n', 'dim_rmvmean',
'dim_rmvmean_n', 'dim_rmvmed', 'dim_rmvmed_n', 'dim_spi_n',
'dim_standardize', 'dim_standardize_n', 'dim_stat4', 'dim_stat4_n',
'dim_stddev', 'dim_stddev_n', 'dim_sum', 'dim_sum_n', 'dim_sum_wgt',
'dim_sum_wgt_n', 'dim_variance', 'dim_variance_n', 'dimsizes',
'doubletobyte', 'doubletochar', 'doubletocharacter',
'doubletofloat', 'doubletoint', 'doubletointeger', 'doubletolong',
'doubletoshort', 'dpres_hybrid_ccm', 'dpres_plevel', 'draw',
'draw_color_palette', 'dsgetp', 'dsgrid2', 'dsgrid2d', 'dsgrid2s',
'dsgrid3', 'dsgrid3d', 'dsgrid3s', 'dspnt2', 'dspnt2d', 'dspnt2s',
'dspnt3', 'dspnt3d', 'dspnt3s', 'dssetp', 'dtrend', 'dtrend_msg',
'dtrend_msg_n', 'dtrend_n', 'dtrend_quadratic',
'dtrend_quadratic_msg_n', 'dv2uvf', 'dv2uvg', 'dz_height
|
',
'echo_off', 'echo_on', 'eof2data', 'eof_varimax', 'eofcor',
'
|
eofcor_pcmsg', 'eofcor_ts', 'eofcov', 'eofcov_pcmsg', 'eofcov_ts',
'eofunc', 'eofunc_ts', 'eofunc_varimax', 'equiv_sample_size', 'erf',
'erfc', 'esacr', 'esacv', 'esccr', 'esccv', 'escorc', 'escorc_n',
'escovc', 'exit', 'exp', 'exp_tapersh', 'exp_tapersh_wgts',
'exp_tapershC', 'ezfftb', 'ezfftb_n', 'ezfftf', 'ezfftf_n',
'f2fosh', 'f2foshv', 'f2fsh', 'f2fshv', 'f2gsh', 'f2gshv', 'fabs',
'fbindirread', 'fbindirwrite', 'fbinnumrec', 'fbinread',
'fbinrecread', 'fbinrecwrite', 'fbinwrite', 'fft2db', 'fft2df',
'fftshift', 'fileattdef', 'filechunkdimdef', 'filedimdef',
'fileexists', 'filegrpdef', 'filevarattdef', 'filevarchunkdef',
'filevarcompressleveldef', 'filevardef', 'filevardimsizes',
'filwgts_lancos', 'filwgts_lanczos', 'filwgts_normal',
'floattobyte', 'floattochar', 'floattocharacter', 'floattoint',
'floattointeger', 'floattolong', 'floattoshort', 'floor',
'fluxEddy', 'fo2fsh', 'fo2fshv', 'fourier_info', 'frame', 'fspan',
'ftcurv', 'ftcurvd', 'ftcurvi', 'ftcurvp', 'ftcurvpi', 'ftcurvps',
'ftcurvs', 'ftest', 'ftgetp', 'ftkurv', 'ftkurvd', 'ftkurvp',
'ftkurvpd', 'ftsetp', 'ftsurf', 'g2fsh', 'g2fshv', 'g2gsh',
'g2gshv', 'gamma', 'gammainc', 'gaus', 'gaus_lobat',
'gaus_lobat_wgt', 'gc_aangle', 'gc_clkwise', 'gc_dangle',
'gc_inout', 'gc_latlon', 'gc_onarc', 'gc_pnt2gc', 'gc_qarea',
'gc_tarea', 'generate_2d_array', 'get_color_index',
'get_color_rgba', 'get_cpu_time', 'get_isolines', 'get_ncl_version',
'get_script_name', 'get_script_prefix_name', 'get_sphere_radius',
'get_unique_values', 'getbitsone', 'getenv', 'getfiledimsizes',
'getfilegrpnames', 'getfilepath', 'getfilevaratts',
'getfilevarchunkdimsizes', 'getfilevardims', 'getfilevardimsizes',
'getfilevarnames', 'getfilevartypes', 'getvaratts', 'getvardims',
'gradsf', 'gradsg', 'greg2jul', 'grid2triple', 'hlsrgb', 'hsvrgb',
'hydro', 'hyi2hyo', 'idsfft', 'igradsf', 'igradsg', 'ilapsf',
'ilapsg', 'ilapvf', 'ilapvg', 'ind', 'ind_resolve', 'int2p',
'int2p_n', 'integertobyte', 'integertochar', 'integertocharacter',
'integertoshort', 'inttobyte', 'inttochar', 'inttoshort',
'inverse_matrix', 'isatt', 'isbigendian', 'isbyte', 'ischar',
'iscoord', 'isdefined', 'isdim', 'isdimnamed', 'isdouble',
'isenumeric', 'isfile', 'isfilepresent', 'isfilevar',
|
AustereCuriosity/astropy
|
astropy/tests/helper.py
|
Python
|
bsd-3-clause
| 18,299
| 0
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the astropy test suite
from the installed astropy. It makes use of the `pytest` testing framework.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import functools
import os
import sys
import types
import warnings
import pytest
from ..extern import six
from ..extern.six.moves import cPickle as pickle
try:
# Import pkg_resources to prevent it from issuing warnings upon being
# imported from within py.test. See
# https://github.com/astropy/astropy/pull/537 for a detailed explanation.
import pkg_resources # pylint: disable=W0611
except ImportError:
pass
from ..utils.exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning)
# For backward-compatibility with affiliated packages
from .runner import TestRunner # pylint: disable=W0611
__all__ = ['raises', 'enable_deprecations_as_exceptions', 'remote_data',
'treat_deprecations_as_exceptions', 'catch_warnings',
'assert_follows_unicode_guidelines', 'quantity_allclose',
'assert_quantity_allclose', 'check_pickling_recovery',
'pickle_protocol', 'generic_recursive_equality_test']
# pytest marker to mark tests which get data from the web
remote_data = pytest.mark.remote_data
# This is for Python 2.x and 3.x compatibility. distutils expects
# options to all be byte strings on Python 2 and Unicode strings on
# Python 3.
def _fix_user_options(options):
def to_str_or_none(x):
if x is None:
return None
return str(x)
return [tuple(to_str_or_none(x) for x in y) for y in options]
def _save_coverage(cov, result, rootdir, testing_path):
"""
This method is called after the tests have been run in coverage mode
to cleanup and then save the coverage data and report.
"""
from ..utils.console import color_print
if result != 0:
return
# The coverage report includes the full path to the
|
temporary
# directory, so we replace all the paths with the true source
# path. Note that this will not work properly for packages that still
# rely on 2to3.
try:
# Coverage 4.0: _harvest_data has been renamed to get_data, the
# lines dict is private
cov.get_data()
except AttributeError:
# Coverage < 4.0
cov._harvest_data()
lines = cov.data.lines
else:
lines = cov.data._lines
for key in list(
|
lines.keys()):
new_path = os.path.relpath(
os.path.realpath(key),
os.path.realpath(testing_path))
new_path = os.path.abspath(
os.path.join(rootdir, new_path))
lines[new_path] = lines.pop(key)
color_print('Saving coverage data in .coverage...', 'green')
cov.save()
color_print('Saving HTML coverage report in htmlcov...', 'green')
cov.html_report(directory=os.path.join(rootdir, 'htmlcov'))
class raises(object):
"""
A decorator to mark that a test should raise a given exception.
Use as follows::
@raises(ZeroDivisionError)
def test_foo():
x = 1/0
This can also be used a context manager, in which case it is just
an alias for the ``pytest.raises`` context manager (because the
two have the same name this help avoid confusion by being
flexible).
"""
# pep-8 naming exception -- this is a decorator class
def __init__(self, exc):
self._exc = exc
self._ctx = None
def __call__(self, func):
@functools.wraps(func)
def run_raises_test(*args, **kwargs):
pytest.raises(self._exc, func, *args, **kwargs)
return run_raises_test
def __enter__(self):
self._ctx = pytest.raises(self._exc)
return self._ctx.__enter__()
def __exit__(self, *exc_info):
return self._ctx.__exit__(*exc_info)
_deprecations_as_exceptions = False
_include_astropy_deprecations = True
_modules_to_ignore_on_import = set([
'compiler', # A deprecated stdlib module used by py.test
'scipy',
'pygments',
'ipykernel',
'setuptools'])
_warnings_to_ignore_entire_module = set([])
_warnings_to_ignore_by_pyver = {
(3, 4): set([
# py.test reads files with the 'U' flag, which is now
# deprecated in Python 3.4.
r"'U' mode is deprecated",
# BeautifulSoup4 triggers warning in stdlib's html module.x
r"The strict argument and mode are deprecated\.",
r"The value of convert_charrefs will become True in 3\.5\. "
r"You are encouraged to set the value explicitly\."]),
(3, 5): set([
# py.test raised this warning in inspect on Python 3.5.
# See https://github.com/pytest-dev/pytest/pull/1009
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) instead"]),
(3, 6): set([
# inspect raises this slightly different warning on Python 3.6.
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) or inspect\.getfullargspec\(\)"])}
def enable_deprecations_as_exceptions(include_astropy_deprecations=True,
modules_to_ignore_on_import=[],
warnings_to_ignore_entire_module=[],
warnings_to_ignore_by_pyver={}):
"""
Turn on the feature that turns deprecations into exceptions.
Parameters
----------
include_astropy_deprecations : bool
If set to `True`, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also turned into exceptions.
modules_to_ignore_on_import : list of str
List of additional modules that generate deprecation warnings
on import, which are to be ignored. By default, these are already
included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and
``setuptools``.
warnings_to_ignore_entire_module : list of str
List of modules with deprecation warnings to ignore completely,
not just during import. If ``include_astropy_deprecations=True``
is given, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also ignored for the modules.
warnings_to_ignore_by_pyver : dict
Dictionary mapping tuple of ``(major, minor)`` Python version to
a list of deprecation warning messages to ignore. This is in
addition of those already ignored by default
(see ``_warnings_to_ignore_by_pyver`` values).
"""
global _deprecations_as_exceptions
_deprecations_as_exceptions = True
global _include_astropy_deprecations
_include_astropy_deprecations = include_astropy_deprecations
global _modules_to_ignore_on_import
_modules_to_ignore_on_import.update(modules_to_ignore_on_import)
global _warnings_to_ignore_entire_module
_warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module)
global _warnings_to_ignore_by_pyver
for key, val in six.iteritems(warnings_to_ignore_by_pyver):
if key in _warnings_to_ignore_by_pyver:
_warnings_to_ignore_by_pyver[key].update(val)
else:
_warnings_to_ignore_by_pyver[key] = set(val)
def treat_deprecations_as_exceptions():
"""
Turn all DeprecationWarnings (which indicate deprecated uses of
Python itself or Numpy, but not within Astropy, where we use our
own deprecation warning class) into exceptions so that we find
out about them early.
This completely resets the warning filters and any "already seen"
warning state.
"""
# First, totally reset the warning state. The modules may change during
# this iteration thus we copy the original state to a list to iterate
# on. See https://github.com/astropy/astropy/pull/5513.
for module in list(six.iter
|
Stackato-Apps/py3kwsgitest
|
tables.py
|
Python
|
mit
| 647
| 0.006182
|
import sqlalchemy
metadata = sqlalchemy.MetaData()
log_table = sqlalchemy.Table('log', metadata,
sqlalchemy.Column('id', sqlalchemy.Inte
|
ger, primary_key=True),
sqlalchemy.Column('filename', sqlalchemy.Unicode),
sqlalchemy.Column('digest', sqlalchemy.Unicode),
sqlalchemy.Column('comment', sqlalchemy.Unicode),
sqlalchemy.Column('user_agent', sqlal
|
chemy.Unicode),
sqlalchemy.Column('traceback', sqlalchemy.Unicode))
def init(engine):
metadata.create_all(bind=engine)
|
pashinin-com/pashinin.com
|
src/core/migrations/0002_auto_20161030_1553.py
|
Python
|
gpl-3.0
| 478
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-30 12:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migra
|
tion):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='added',
),
migrations.RemoveField(
|
model_name='user',
name='changed',
),
]
|
Yarrick13/hwasp
|
tests/wasp1/AllAnswerSets/edbidb_3.test.py
|
Python
|
apache-2.0
| 199
| 0.005025
|
input = """
g(1).
g(2).
g(3).
f(a,b).
f(A,B):- g(A), g(B).
f(a,a).
"""
output
|
= """
{f(
|
1,1), f(1,2), f(1,3), f(2,1), f(2,2), f(2,3), f(3,1), f(3,2), f(3,3), f(a,a), f(a,b), g(1), g(2), g(3)}
"""
|
kcompher/velox-modelserver
|
bin/cluster/fabfile.py
|
Python
|
apache-2.0
| 27
| 0
|
from velox_deploy
|
im
|
port *
|
Azulinho/ansible
|
lib/ansible/plugins/lookup/redis.py
|
Python
|
gpl-3.0
| 3,113
| 0.002891
|
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: redis
author:
- Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
- Ansible Core
version_added: "2.5"
short_description: fetch data from Redis
description:
- This looup returns a list of results from a Redis DB corresponding to a list of items given to it
requirements:
- redis (python library https://github.com/andymccurdy/redis-py/)
options:
_terms:
description: list of keys to query
host:
description: location of Redis host
default: '127.0.0.1'
env:
- name: ANSIBLE_REDIS_HOST
ini:
- section: lookup_redis
key: host
port:
port:
description: port on which Redis is listening on
default: 6379A
type: int
env:
- name: ANSIBLE_REDIS_PORT
ini:
- section: lookup_redis
key: port
socket:
description: path to socket on which to query Redis, this option overrides host and port options when set.
type: path
env:
- name: ANSIBLE_REDIS_SOCKET
ini:
- section: lookup_redis
key: socket
"""
EXAMPLES = """
- name: query redis for somekey (default or configured settings used)
debug: msg="{{ lookup('redis', 'somekey'}}"
- name: query redis for list of keys and non-default host and port
debug: msg="{{ lookup('redis', item, host='myredis.internal.com', port=2121) }}"
loop: '{{list_of_redis_keys}}'
- name: use list directly
debug: msg="{{ lookup('redis', 'key1', 'key2', 'key3') }}"
- name: use list directly with a socket
debug: msg="{{ lookup('redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}"
"""
RETURN = """
_raw:
description: value(s) stored in Redis
"""
import os
HAVE_REDIS = False
try:
import redis
HAVE_REDIS = True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs
|
):
if not HAVE_REDIS:
raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
# get options
self.set_options(direct=kwargs)
# setup connection
host
|
= self.get_option('host')
port = self.get_option('port')
socket = self.get_option('socket')
if socket is None:
conn = redis.Redis(host=host, port=port)
else:
conn = redis.Redis(unix_socket_path=socket)
ret = []
for term in terms:
try:
res = conn.get(term)
if res is None:
res = ""
ret.append(res)
except Exception:
ret.append("") # connection failed or key not found
return ret
|
tschmorleiz/amcat
|
amcat/scripts/article_upload/controller.py
|
Python
|
agpl-3.0
| 3,148
| 0.0054
|
from __future__ import absolute_import
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
Module for running scrapers
"""
import logging;log = logging.getLogger(__name__)
from collections import namedtuple
from amcat.models import Article, Project
ScrapeError = namedtuple("ScrapeError", ["i", "unit", "error"])
class Controller(object):
def __init__(self):
self.errors = []
self.articles = []
def run(self, scraper):
try:
units = list(scraper._get_units())
except Exception as e:
self.errors.append(ScrapeError(None,None,e))
log.exception("scraper._get_units failed")
return self.articles
for i, unit in enumerate(units):
try:
articles = list(scraper._scrape_unit(unit))
except Exception as e:
log.exception("scraper._scrape_unit failed")
self.errors.append(ScrapeError(i,unit,e))
continue
self.articles += articles
for article in self.articles:
_set_default(article, 'project', scrape
|
r.project)
try:
articles, errors = Article.create_articles(self.articles, scraper.articleset)
self.saved_article_ids = {getattr(a, "duplicate_of", a.id) for a in self.articles}
for e in errors:
self.errors.append(ScrapeError(None,None,e))
except Exception as e
|
:
self.errors.append(ScrapeError(None,None,e))
log.exception("scraper._get_units failed")
return self.saved_article_ids
def _set_default(obj, attr, val):
try:
if getattr(obj, attr, None) is not None: return
except Project.DoesNotExist:
pass # django throws DNE on x.y if y is not set and not nullable
setattr(obj, attr, val)
|
dfunckt/django
|
tests/urlpatterns_reverse/tests.py
|
Python
|
bsd-3-clause
| 50,749
| 0.003074
|
# -*- coding: utf-8 -*-
"""
Unit tests for reverse URL lookups.
"""
from __future__ import unicode_literals
import sys
import threading
import unittest
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import (
SimpleTestCase, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch, RegexURLPattern, RegexURLResolver, Resolver404,
ResolverMatch, get_callable, get_resolver, resolve, reverse, reverse_lazy,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
(
'/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, tuple(),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/normal/42/37/', 'inc-normal-view', '', '', 'inc-normal-view', views.empty_view, tuple(),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/view_class/42/37/', 'inc-view-class', '', '', 'inc-view-class', views.view_class_instance, tuple(),
{'arg1': '42', 'arg2': '37'}
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
(
'/included/mixed_args/42/37/', 'inc-mixed-args', '', '', 'inc-mixed-args', views.empty_view, tuple(),
{'arg2': '37'}
),
(
|
'/included/12/mixed_args/42/37/', 'inc-mixed-args', '', '', 'inc-mixed-args', views.empty_view, tuple(),
|
{'arg2': '37'}
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
'/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, tuple(),
{'arg1': '42', 'arg2': '37'}
),
(
'/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance,
tuple(), {'arg1': '42', 'arg2': '37'}
),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', '', '', 'inc-no-kwargs', views.empty_view, ('42', '37'), {}),
(
'/included/12/no_kwargs/42/37/', 'inc-no-kwargs', '', '', 'inc-no-kwargs', views.empty_view,
('12', '42', '37'), {}
),
# Namespaces
(
'/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/normal/42/37/', 'inc-normal-view', '', 'inc-ns1', 'inc-ns1:inc-normal-view', views.empty_view,
tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view', views.empty_view,
tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
# Nested namespaces
(
'/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3',
'inc-ns1:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp',
'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view,
tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/test3/inner/42/37/', 'urlobject-view', 'inc-app:testapp', 'inc-app:test-ns3',
'inc-app:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'inc-app:testapp',
'inc-app:inc-ns4:inc-ns2:test-ns3', 'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view,
tuple(), {'arg1': '42', 'arg2': '37'}
),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', '', 'inc-ns5', 'inc-ns5:inner-nothing', views.empty_view, tuple(), {'outer': '70'}),
(
'/inc78/extra/foobar/', 'inner-extra', '', 'inc-ns5', 'inc-ns5:inner-extra', views.empty_view, tuple(),
{'outer': '78', 'extra': 'foobar'}
),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
(
'windows', r'/windows_path/C:
|
ftomassetti/worldengine
|
worldengine/views/WatermapView.py
|
Python
|
mit
| 880
| 0
|
from worldengine.simulations.basic import *
import random
from worldengine.views.basic import color_prop
from PyQt4 import QtGui
class WatermapView(object):
def is_applicable(self, world):
return world.has_watermap()
def draw(self, world, canvas):
width = world.width
height = world.height
th = world.watermap['thresholds']['river']
for y in range(0, height):
|
for x in range(0, width):
if world.is_ocean((x, y)):
r = g = 0
b = 255
else:
w = world.watermap['data'][y][x]
|
if w > th:
r = g = 0
b = 255
else:
r = g = b = 0
col = QtGui.QColor(r, g, b)
canvas.setPixel(x, y, col.rgb())
|
NuGrid/NuGridPy
|
nugridpy/version.py
|
Python
|
bsd-3-clause
| 54
| 0
|
"""NuGridPy package ve
|
rsion"""
__
|
version__ = '0.7.6'
|
pegler/django-thumbs
|
setup.py
|
Python
|
bsd-2-clause
| 691
| 0.004342
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# -*- mode: python -*-
# vi: set ft=python :
import os
from setuptools
|
import setup, find_packages
README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README')
DESCRIPTION = 'Easy image thumbnails in Django.'
if os.path.exists(README_PATH): LONG_DESCRIPTION = open(README_PATH).read()
else: LONG_DESCRIPTION = DESCRIPTION
setup(
name='django-thumbs',
ve
|
rsion='1.0.4',
install_requires=['django'],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author='Matt Pegler',
author_email='[email protected]',
url='https://github.com/pegler/django-thumbs/',
packages=['thumbs'],
)
|
sublee/lets
|
lets/transparentlet.py
|
Python
|
bsd-3-clause
| 600
| 0
|
# -*- coding: utf-8 -*-
"""
lets.transparentlet
~~~~~~~~~~~~~~~~~~~
Deprecated. gevent-1.1 keeps a traceback exactly.
If you want to just prevent to print an exception by the hub, use
:mod:`lets.quietlet` inste
|
ad.
:copyright: (c) 2013-2018 by Heungsub Lee
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from gevent.pool import Group as TransparentGroup
from lets.quietlet import quiet as no_error_handling
from lets.quietlet import Quietlet as Transparentlet
__all__
|
= ['Transparentlet', 'TransparentGroup', 'no_error_handling']
|
facetothefate/contrail-controller
|
src/opserver/partition_handler.py
|
Python
|
apache-2.0
| 23,447
| 0.00917
|
#!/usr/bin/python
from gevent import monkey
monkey.patch_all()
import logging
import gevent
from gevent.coros import BoundedSemaphore
from kafka import KafkaClient, KeyedProducer, SimpleConsumer, common
from uveserver import UVEServer
import os
import json
import copy
import traceback
import uuid
import struct
import socket
import discoveryclient.client as client
from sandesh_common.vns.constants import ALARM_PARTITION_SERVICE_NAME
from pysandesh.util import UTCTimestampUsec
import select
import redis
from collections import namedtuple
PartInfo = namedtuple("PartInfo",["ip_address","instance_id","acq_time","port"])
def sse_pack(d):
"""Pack data in SSE format"""
buffer = ''
for k in ['event','data']:
if k in d.keys():
buffer += '%s: %s\n' % (k, d[k])
return buffer + '\n'
class UveStreamPart(gevent.Greenlet):
def __init__(self, partno, logger, q, pi, rpass):
gevent.Greenlet.__init__(self)
self._logger = logger
self._q = q
self._pi = pi
self._partno = partno
self._rpass = rpass
def syncpart(self, redish):
inst = self._pi.instance_id
part = self._partno
keys = list(redish.smembers("AGPARTKEYS:%s:%d" % (inst, part)))
ppe = redish.pipeline()
for key in keys:
ppe.hgetall("AGPARTVALUES:%s:%d:%s" % (inst, part, key))
pperes = ppe.execute()
idx=0
for res in pperes:
for tk,tv in res.iteritems():
msg = {'event': 'sync', 'data':\
json.dumps({'partition':self._partno,
'key':keys[idx], 'type':tk, 'value':tv})}
self._q.put(sse_pack(msg))
idx += 1
def _run(self):
lredis = None
pb = None
while True:
try:
lredis = redis.StrictRedis(
host=self._pi.ip_address,
port=self._pi.port,
password=self._rpass,
db=2)
pb = lredis.pubsub()
inst = self._pi.instance_id
part = self._partno
pb.subscribe('AGPARTPUB:%s:%d' % (inst, part))
self.syncpart(lredis)
for message in pb.listen():
if message["type"] != "message":
continue
dataline = message["data"]
try:
elems = json.loads(dataline)
except:
self._logger.error("AggUVE Parsing failed: %s" % str(message))
continue
else:
self._logger.error("AggUVE loading: %s" % str(elems))
ppe = lredis.pipeline()
for elem in elems:
# This UVE was deleted
if elem["type"] is None:
ppe.exists("AGPARTVALUES:%s:%d:%s" % \
(inst, part, elem["key"]))
else:
ppe.hget("AGPARTVALUES:%s:%d:%s" % \
(inst, part, elem["key"]), elem["type"])
pperes = ppe.execute()
idx = 0
for elem in elems:
if elem["type"] is None:
msg = {'event': 'update', 'data':\
json.dumps({'partition':part,
'key':elem["key"], 'type':None})}
else:
vjson = pperes[idx]
if vjson is None:
vdata = None
else:
vdata = json.loads(vjson)
msg = {'event': 'update', 'data':\
json.dumps({'partition':part,
'key':elem["key"], 'type':elem["type"],
'value':vdata})}
self._q.put(sse_pack(msg))
idx += 1
except gevent.GreenletExit:
break
except Exception as ex:
template = "Exception {0} in uve stream proc. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s" % \
(messag, tr
|
aceback.format_exc()))
lredis = None
if pb is not None:
|
pb.close()
pb = None
gevent.sleep(2)
return None
class UveStreamer(gevent.Greenlet):
def __init__(self, logger, q, rfile, agp_cb, partitions, rpass):
gevent.Greenlet.__init__(self)
self._logger = logger
self._q = q
self._rfile = rfile
self._agp_cb = agp_cb
self._agp = {}
self._parts = {}
self._partitions = partitions
self._rpass = rpass
def _run(self):
inputs = [ self._rfile ]
outputs = [ ]
msg = {'event': 'init', 'data':\
json.dumps({'partitions':self._partitions})}
self._q.put(sse_pack(msg))
while True:
readable, writable, exceptional = select.select(inputs, outputs, inputs, 1)
if (readable or writable or exceptional):
break
newagp = self._agp_cb()
set_new, set_old = set(newagp.keys()), set(self._agp.keys())
intersect = set_new.intersection(set_old)
# deleted parts
for elem in set_old - intersect:
self.partition_stop(elem)
# new parts
for elem in set_new - intersect:
self.partition_start(elem, newagp[elem])
# changed parts
for elem in intersect:
if self._agp[elem] != newagp[elem]:
self.partition_stop(elem)
self.partition_start(elem, newagp[elem])
self._agp = newagp
for part, pi in self._agp.iteritems():
self.partition_stop(part)
def partition_start(self, partno, pi):
self._logger.error("Starting agguve part %d using %s" %( partno, pi))
msg = {'event': 'clear', 'data':\
json.dumps({'partition':partno, 'acq_time':pi.acq_time})}
self._q.put(sse_pack(msg))
self._parts[partno] = UveStreamPart(partno, self._logger,
self._q, pi, self._rpass)
self._parts[partno].start()
def partition_stop(self, partno):
self._logger.error("Stopping agguve part %d" % partno)
self._parts[partno].kill()
self._parts[partno].get()
del self._parts[partno]
class PartitionHandler(gevent.Greenlet):
def __init__(self, brokers, group, topic, logger, limit):
gevent.Greenlet.__init__(self)
self._brokers = brokers
self._group = group
self._topic = topic
self._logger = logger
self._limit = limit
self._uvedb = {}
self._partoffset = 0
self._kfk = None
def msg_handler(self, mlist):
self._logger.info("%s Reading %s" % (self._topic, str(mlist)))
return True
def _run(self):
pcount = 0
while True:
try:
self._logger.error("New KafkaClient %s" % self._topic)
self._kfk = KafkaClient(self._brokers , "kc-" + self._topic)
try:
consumer = SimpleConsumer(self._kfk, self._group, self._topic, buffer_size = 4096*4, max_buffer_size=4096*32)
#except:
except Exception as ex:
template = "Consumer Failure {0} occured. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.info("%s" % messag)
raise RuntimeError(messag)
self._logger.error("Starting %s" % self._topic)
# Find the offset of the last
|
Phyks/Flatisfy
|
modules/seloger/pages.py
|
Python
|
mit
| 9,785
| 0.002146
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of a woob module.
#
# This woob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This woob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this woob module. If not, see <http://www.gnu.org/licenses/>.
from woob.browser.pages import JsonPage, pagination, HTMLPage
from woob.browser.elements import ItemElement, DictElement, method
from woob.browser.filters.json import Dict
from woob.browser.filters.html import XPath
from woob.browser.filters.standard import (CleanText, CleanDecimal, Currency,
Env, Regexp, Field, BrowserURL)
from woob.capabilities.base import NotAvailable, NotLoaded
from woob.capabilities.housing import (Housing, HousingPhoto, City,
UTILITIES, ENERGY_CLASS, POSTS_TYPES,
ADVERT_TYPES)
from woob.capabilities.address import PostalAddress
from woob.tools.capabilities.housing.housing import PricePerMeterFilter
from woob.tools.json import json
from woob.exceptions import ActionNeeded
from .constants import TYPES, RET
import codecs
import decimal
class ErrorPage(HTMLPage):
def on_load(self):
raise ActionNeeded("Please resolve the captcha")
class CitiesPage(JsonPage):
@method
class iter_cities(DictElement):
ignore_duplicate = True
class item(ItemElement):
klass = City
obj_id = Dict('Params/ci')
obj_name = Dict('Display')
class SearchResultsPage(HTMLPage):
def __init__(self, *args, **kwargs):
HTMLPage.__init__(self, *args, **kwargs)
json_content = Regexp(CleanText('//script'),
r"window\[\"initialData\"\] = JSON.parse\(\"({.*})\"\);window\[\"tags\"\]")(self.doc)
json_content = codecs.unicode_escape_decode(json_content)[0]
json_content = json_content.encode('utf-8', 'surrogatepass').decode('utf-8')
self.doc = json.loads(json_content)
@pagination
@method
class iter_housings(DictElement):
item_xpath = 'cards/list'
# Prevent DataError on same ids
ignore_duplicate = True
def next_page(self):
page_nb = Dict('navigation/pagination/page')(self)
max_results = Dict('navigation/counts/count')(self)
results_per_page = Dict('navigation/pagination/resultsPerPage')(self)
if int(max_results) / int(results_per_page) > int(page_nb):
return BrowserURL('search', query=Env('query'), page_number=int(page_nb) + 1)(self)
# TODO handle bellesdemeures
class item(ItemElement):
klass = Housing
def condition(self):
return (
Dict('cardType')(self) not in ['advertising', 'ali', 'localExpert']
and Dict('id', default=False)(self)
and Dict('classifiedURL', default=False)(self)
)
obj_id = Dict('id')
def obj_type(self):
idType = int(Env('query_type')(self))
type = next(k for k, v in TYPES.items() if v == idType)
if type == POSTS_TYPES.FURNISHED_RENT:
# SeLoger does not let us discriminate between furnished and not furnished.
return POSTS_TYPES.RENT
return type
def obj_title(self):
return "{} - {} - {}".format(Dict('estateType')(self),
" / ".join(Dict('tags')(self)),
Field('location')(self))
def obj_advert_type(self):
is_agency = Dict('contact/agencyId', default=False)(self)
if is_agency:
return ADVERT_TYPES.PROFESSIONAL
else:
return ADVERT_TYPES.PERSONAL
obj_utilities = UTILITIES.EXCLUDED
def obj_photos(self):
photos = []
for photo in Dict('photos')(self):
photos.append(HousingPhoto(photo))
return photos
def obj_location(self):
quartier = Dict('districtLabel')(self)
quartier = quartier if quartier else ''
ville = Dict('cityLabel')(self)
ville = ville if ville else ''
cp = Dict('zipCode')(self)
cp = cp if cp else ''
return u'%s %s (%s)' % (quartier, ville, cp)
obj_url = Dict('classifiedURL')
obj_text = Dict('description')
obj_cost = CleanDecimal(Dict('pricing/price', default=NotLoaded), default=NotLoaded)
obj_currency = Currency(Dict('pricing/price', default=NotLoaded), default=NotLoaded)
obj_price_per_meter = CleanDecimal(Dict('pricing/squareMeterPrice'), default=PricePerMeterFilter)
class HousingPage(HTMLPage):
def __init__(self, *args, **kwargs):
HTMLPage.__init__(self, *args, **kwargs)
json_content = Regexp(
CleanText('//script'),
r"window\[\"initialData\"\] = JSON.parse\(\"({.*})\"\);"
)(self.doc)
json_content = codecs.unicode_escape_decode(json_content)[0]
json_content = json_content.encode('utf-8', 'surrogatepass').decode('utf-8')
self.doc = {
"advert": json.loads(json_content).get('advert', {}).get('mainAdvert', {}),
"agency": json.loads(json_content).get('agency', {})
}
@method
class get_housing(ItemElement):
klass = Housing
def parse(self, el):
|
self.agency_doc = el['agency']
self.el = el['advert']
obj_id = Dict('id')
def obj_house_type(self):
naturebien = Dict('propertyNatureId')(self)
try:
return
|
next(k for k, v in RET.items() if v == naturebien)
except StopIteration:
return NotLoaded
def obj_type(self):
idType = Dict('idTransactionType')(self)
try:
type = next(k for k, v in TYPES.items() if v == idType)
if type == POSTS_TYPES.FURNISHED_RENT:
# SeLoger does not let us discriminate between furnished and not furnished.
return POSTS_TYPES.RENT
return type
except StopIteration:
return NotAvailable
def obj_advert_type(self):
if 'Agences' in self.agency_doc['type']:
return ADVERT_TYPES.PROFESSIONAL
else:
return ADVERT_TYPES.PERSONAL
def obj_photos(self):
photos = []
for photo in Dict('photoList')(self):
photos.append(HousingPhoto(photo['fullscreenUrl']))
return photos
obj_title = Dict('title')
def obj_location(self):
address = Dict('address')(self)
return u'%s %s (%s)' % (address['neighbourhood'], address['city'],
address['zipCode'])
def obj_address(self):
address = Dict('address')(self)
p = PostalAddress()
p.street = address['street']
p.postal_code = address['zipCode']
p.city = address['city']
p.full_address = Field('location')(self)
return p
obj_text = Dict('description')
def obj_cost(self):
propertyPrice = Dict('propertyPrice')(self)
return decimal.Decimal(propertyPrice['prix'])
def obj_currency(self):
propertyPrice =
|
droidzone/Supernova-Kernel
|
tools/tools/perf/scripts/python/check-perf-trace.py
|
Python
|
gpl-2.0
| 2,501
| 0.02479
|
# perf trace event handlers, generated by perf trace -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_n
|
ame, common_cpu, common_secs, common_nsecs,
co
|
mmon_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
richo/flysight-manager
|
flysight_manager/config.py
|
Python
|
mit
| 6,142
| 0.002279
|
# flake8: noqa
import sys
import toml
import log
from .uploader import DropboxUploader
from .file_manager import DirectoryPoller, VolumePoller
SECT = 'flysight-manager'
class ConfigError(Exception):
pass
class FlysightConfig(object):
pass
class DropboxConfig(object):
pass
class VimeoConfig(object):
pass
class YoutubeConfig(object):
pass
class SendgridConfig(object):
pass
class PushoverConfig(object):
pass
class CameraConfig(object):
def __init__(self, name, cfg):
self._name = name
self._mountpoint = cfg["mountpoint"]
self._uuid = cfg["uuid"]
@property
def mountpoint(self):
return self._mountpoint
@property
def uuid(self):
return self._uuid
class GoProConfig(object):
def __init__(self):
self._cameras = {}
def add_camera(self, name, config):
self._cameras[name] = CameraConfig(name, config)
def cameras(self):
return self._cameras
class GswoopConfig(object):
pass
def get_poller(ty):
if ty == 'flysight':
get_sect = lambda cfg: cfg.flysight_cfg
elif ty == 'gopro':
get_sect = lambda cfg: cfg
else:
raise "Unknown ty: %s" % (repr(ty))
platform = sys.platform
if platform.startswith('linux'):
return lambda name, cfg: VolumePoller(name, get_sect(cfg).uuid, ty)
elif platform == 'darwin':
return lambda name, cfg: DirectoryPoller(name, get_sect(cfg).mountpoint, ty)
else:
raise 'Unknown platform: %s' % (repr(platform))
@log.make_loggable
class Configuration(object):
"""Stub class to be replaced by a real configuration system"""
CONFIG_FILE = 'flysight-manager.ini'
def __init__(self):
self.flysight_enabled = False
self.gopro_enabled = False
self.gswoop_enabled = False
self.vimeo_enabled = False
self.youtube_enabled = False
self.sendgrid_enabled = False
self.noop = False
self.preserve = False
self.processors = []
self.info("Loading config from %s" % self.CONFIG_FILE)
cfg = toml.load(open(self.CONFIG_FILE, 'rb'))
self.load_config(cfg)
self._uploader = None
if self.gswoop_enabled:
self.info("Enabling gswoop processor")
self.processors.append("gswoop")
def load_config(self, cfg):
"""Validate the configuration"""
get = lambda x: cfg[SECT][x]
# TODO: Confirm how this handles bools
enabled = lambda x: cfg[x]["enabled"]
backend = get('storage_backend')
if backend == 'dropbox':
self.storage_backend = 'dropbox'
self.dropbox_cfg = self.load_dropbox_opts(cfg)
else:
raise ConfigError("Unknown storage_backend: %s" % backend)
if enabled("flysight"):
self.flysight_enabled = True
self.flysight_cfg = self.load_flysight_opts(cfg)
if enabled("gopro"):
self.gopro_enabled = True
self.gopro_cfg = self.load_gopro_opts(cfg)
if enabled("gswoop"):
self.gswoop_enabled = True
self.gswoop_cfg = self.load_gswoop_opts(cfg)
if enabled("vimeo"):
self.vimeo_enabled = True
self.vimeo_cfg = self.load_vimeo_opts(cfg)
if enabled("youtube"):
self.youtube_enabled = True
self.youtube_cfg = self.load_youtube_opts(cfg)
if enabled("sendgrid"):
self.sendgrid_enabled = True
self.sendgrid_cfg = self.load_sendgrid_opts(cfg)
if enabled("pushover"):
self.pushover_enabled = True
self.pushover_cfg = self.load_pushover_opts(cfg)
def load_dropbox_opts(self, cfg):
get = lambda x: cfg["dropbox"][x]
_cfg = DropboxConfig()
_cfg.token = get("token")
return _cfg
def load_vimeo_opts(self, cfg):
get = lambda x: cfg["vimeo"][x]
_cfg = VimeoConfig()
_cfg.token = get("token")
return _cfg
def load_sendgrid_opts(self, cfg):
get = lambda x: cfg["sendgrid"][x]
_cfg = SendgridConfig()
_cfg.token = get("token")
_cfg.from
|
_addr = get("from")
_cfg.to_addr = get("to")
_cfg.subject = get("subj
|
ect")
return _cfg
def load_pushover_opts(self, cfg):
get = lambda x: cfg["pushover"][x]
_cfg = PushoverConfig()
_cfg.token = get("token")
_cfg.user = get("user")
return _cfg
def load_youtube_opts(self, cfg):
get = lambda x: cfg["youtube"][x]
_cfg = YoutubeConfig()
_cfg.access_token = get("access_token")
_cfg.client_id = get("client_id")
_cfg.client_secret = get("client_secret")
_cfg.refresh_token = get("refresh_token")
_cfg.token_uri = get("token_uri")
return _cfg
def load_gopro_opts(self, cfg):
_cfg = GoProConfig()
# Extract the enabled key, then pray that anything else is a camera
for k, v in cfg["gopro"].items():
if isinstance(v, dict):
_cfg.add_camera(k, v)
return _cfg
def load_flysight_opts(self, cfg):
get = lambda x: cfg["flysight"][x]
_cfg = FlysightConfig()
_cfg.mountpoint = get("mountpoint")
_cfg.uuid = get("uuid")
return _cfg
def load_gswoop_opts(self, cfg):
get = lambda x: cfg["gswoop"][x]
_cfg = GswoopConfig()
_cfg.binary = get("binary")
return _cfg
@property
def uploader(self):
if not self._uploader:
if self.storage_backend == 'dropbox':
self._uploader = DropboxUploader(self.dropbox_cfg.token, self.noop)
else:
raise ConfigError('Unknown storage backend: %s' % self.storage_backend)
return self._uploader
def update_with_args(self, args):
if args.noop:
self.debug("Setting noop flag")
self.noop = args.noop
if args.preserve:
self.debug("Setting preserve flag")
self.preserve = args.preserve
|
OpenNingia/l5r-character-manager-3
|
l5r/dialogs/newrankdlg.py
|
Python
|
gpl-3.0
| 3,473
| 0.001152
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2022 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; i
|
f not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from PyQt5 import QtCore, QtGui, QtWidgets
import l5r.widgets as widgets
import l5r.api as api
import l5r.api.character.rankadv
class NextRankDlg(QtWidgets.QDialog):
def __init__(self, pc, parent=None):
super(NextRankDlg, self).__init__(parent)
self.pc = pc
self.build_ui()
self.connect_signals()
# self.setWindowFla
|
gs(QtCore.Qt.Tool)
self.setWindowTitle(self.tr("L5R: CM - Advance Rank"))
def build_ui(self):
vbox = QtWidgets.QVBoxLayout(self)
vbox.addWidget(QtWidgets.QLabel(self.tr("""\
You can now advance your Rank,
what would you want to do?
""")))
self.bt_go_on = QtWidgets.QPushButton(
self.tr("Advance in my current school")
)
self.bt_new_school = QtWidgets.QPushButton(
self.tr("Join a new school"))
for bt in [self.bt_go_on, self.bt_new_school]:
bt.setMinimumSize(QtCore.QSize(0, 38))
vbox.addWidget(self.bt_go_on)
vbox.addWidget(self.bt_new_school)
vbox.setSpacing(12)
is_path = api.data.schools.is_path(
api.character.schools.get_current()
)
former_school_adv = api.character.rankadv.get_former_school()
former_school = api.data.schools.get(former_school_adv.school) if former_school_adv else None
# check if the PC is following an alternate path
if is_path:
# offer to going back
if former_school:
self.bt_go_on.setText(self.tr("Continue ") + former_school.name)
else:
self.bt_go_on.setText(self.tr("Go back to your old school"))
self.bt_go_on.setEnabled(former_school != None)
def connect_signals(self):
self.bt_go_on.clicked.connect(self.simply_go_on)
self.bt_new_school.clicked.connect(self.join_new_school)
def join_new_school(self):
dlg = widgets.SchoolChooserDialog(self)
if dlg.exec_() == QtWidgets.QDialog.Rejected:
return
self.accept()
def simply_go_on(self):
is_path = api.data.schools.is_path(
api.character.schools.get_current()
)
# check if the PC is following an alternate path
if is_path:
# the PC want to go back to the old school.
# find the first school that is not a path
api.character.rankadv.leave_path()
else:
api.character.rankadv.advance_rank()
self.accept()
def test():
import sys
app = QtWidgets.QApplication(sys.argv)
dlg = NextRankDlg(None, None)
dlg.show()
sys.exit(app.exec_())
if __name__ == '__main__':
test()
|
ricardogsilva/django-mapserver
|
djangomapserver/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 8,539
| 0.00445
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ClassObj',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('expression', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DataStoreBase',
|
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LayerObj',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False
|
, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('layer_type', models.SmallIntegerField(choices=[(3, b'raster'), (2, b'vector polygon'), (1, b'vector line'), (0, b'vector point')])),
('projection', models.CharField(default=b'init=epsg:4326', help_text=b'PROJ4 definition of the layer projection', max_length=255)),
('data', models.CharField(help_text=b'Full filename of the spatial data to process.', max_length=255)),
('class_item', models.CharField(help_text=b'Item name in attribute table to use for class lookups.', max_length=255, blank=True)),
('ows_abstract', models.TextField(blank=True)),
('ows_enable_request', models.CharField(default=b'*', max_length=255)),
('ows_include_items', models.CharField(default=b'all', max_length=50, blank=True)),
('gml_include_items', models.CharField(default=b'all', max_length=50, blank=True)),
('ows_opaque', models.SmallIntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MapLayer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.SmallIntegerField(choices=[(0, b'off'), (1, b'on'), (2, b'default')])),
('layer_obj', models.ForeignKey(to='djangomapserver.LayerObj')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MapObj',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'Unique identifier.', max_length=255)),
('status', models.SmallIntegerField(choices=[(0, b'off'), (1, b'on'), (2, b'default')])),
('projection', models.CharField(default=b'init=epsg:4326', help_text=b'PROJ4 definition of the map projection', max_length=255)),
('units', models.SmallIntegerField(blank=True, choices=[(5, b'Decimal degrees')])),
('size', models.CommaSeparatedIntegerField(help_text=b'Map size in pixel units', max_length=10)),
('cell_size', models.FloatField(help_text=b'Pixel size in map units.', null=True, blank=True)),
('image_type', models.CharField(max_length=10, choices=[(b'png', b'png')])),
('ows_sld_enabled', models.BooleanField(default=True)),
('ows_abstract', models.TextField(blank=True)),
('ows_enable_request', models.CharField(default=b'*', max_length=255)),
('ows_encoding', models.CharField(default=b'utf-8', max_length=20)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MapServerColor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('red', models.IntegerField(null=True, blank=True)),
('green', models.IntegerField(null=True, blank=True)),
('blue', models.IntegerField(null=True, blank=True)),
('hex_string', models.CharField(max_length=9, blank=True)),
('attribute', models.CharField(max_length=255, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RectObj',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('max_x', models.FloatField()),
('max_y', models.FloatField()),
('min_x', models.FloatField()),
('min_y', models.FloatField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ShapefileDataStore',
fields=[
('datastorebase_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='djangomapserver.DataStoreBase')),
('path', models.CharField(help_text=b'Path to the directory holding shapefiles.', max_length=255)),
],
options={
},
bases=('djangomapserver.datastorebase',),
),
migrations.CreateModel(
name='SpatialiteDataStore',
fields=[
('datastorebase_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='djangomapserver.DataStoreBase')),
('path', models.CharField(help_text=b'Path to the Spatialite database file.', max_length=255)),
],
options={
},
bases=('djangomapserver.datastorebase',),
),
migrations.CreateModel(
name='StyleObj',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('class_obj', models.ForeignKey(to='djangomapserver.ClassObj')),
('color', models.ForeignKey(to='djangomapserver.MapServerColor')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='mapobj',
name='extent',
field=models.ForeignKey(help_text=b"Map's spatial extent.", to='djangomapserver.RectObj'),
preserve_default=True,
),
migrations.AddField(
model_name='mapobj',
name='image_color',
field=models.ForeignKey(blank=True, to='djangomapserver.MapServerColor', help_text=b'Initial map background color.', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='mapobj',
name='layers',
field=models.ManyToManyField(to='djangomapserver.LayerObj', null=True, through='djangomapserver.MapLayer', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='maplayer',
name='map_obj',
field=models.ForeignKey(to='djangomapserver.MapObj'),
preserve_default=True,
),
migrations.AddField(
model_name='maplayer',
name='style',
field=models.ForeignKey(blank=True, to='djangomapserver.StyleObj', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='layerobj',
name='data_store',
field=models.ForeignKey(to='djangomapserver.DataStoreBase'),
preserve_default=True,
),
migrations.AddField(
model_name='layerobj',
name='extent',
|
antoinecarme/pyaf
|
tests/artificial/transf_None/trend_MovingAverage/cycle_30/ar_12/test_artificial_32_None_MovingAverage_30_12_20.py
|
Python
|
bsd-3-clause
| 264
| 0.087121
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.p
|
rocess_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "None", sigma = 0.0, exog_count = 2
|
0, ar_order = 12);
|
dshlai/oyprojectmanager
|
oyProjectManager/db/__init__.py
|
Python
|
bsd-2-clause
| 5,475
| 0.009315
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Erkan Ozgur Yilmaz
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
"""
Database Module
===============
This is where all the magic happens.
.. versionadded:: 0.2.0
SQLite3 Database:
To hold the information about all the data created
:class:`~oyProjectManager.models.project.Project`\ s,
:class:`~oyProjectManager.models.sequence.Sequence`\ s,
:class:`~oyProjectManager.models.shot.Shot`\ s,
:class:`~oyProjectManager.models.asset.Asset`\ s and
:class:`~oyProjectManager.models.version.VersionType`\ s
, there is a ".metadata.db" file in the repository root. This SQLite3
database has all the information about everything.
With this new extension it is much faster to query any data needed.
Querying data is very simple and fun. To get any kind of data from the
database, just call the ``db.setup()`` and then use ``db.query`` to get the
data.
For a simple example, lets get all the shots for a Sequence called
"TEST_SEQ" in the "TEST_PROJECT"::
from oyProjectManager import db
from oyProjectManager import Project, Sequence, Shot
# setup the database session
db.setup()
all_shots = Shot.query().join(Sequence).\
filter(Sequence.project.name="TEST_PROJECT").\
filter(Shot.sequence.name=="TEST_SEQ").all()
that's it.
"""
import os
import logging
import sqlalchemy
import oyProjectManager
from oyProjectManager.db.declarative import Base
# SQLAlchemy database engine
engine = None
# SQLAlchemy session manager
session = None
query = None
# SQLAlchemy metadata
metadata = None
database_url = None
# create a logger
logger = logging.getLogger(__name__)
#logger.setLevel(logging.WARNING)
logger.setLevel(logging.DEBUG)
def setup(database_url_in=None):
"""Utility function that helps to connect the system to the given database.
Returns the created session
:param database_url_in: The database address, default is None. If the
database_url is skipped or given as None, the default database url
from the :mod:`oyProjectManager.config` will be used. This is good,
just call ``db.setup()`` and then use ``db.session`` and ``db.query``
to get the data.
:returns: sqlalchemy.orm.session
"""
global engine
global session
global query
global metadata
global database_url
# create engine
# TODO: create tests for this
if database_url_in is None:
logger.debug("using the default database_url from the config file")
# use the default database
conf = oyProjectManager.conf
database_url_in = conf.database_url
# expand user and env variables if any
# TODO: because the dialect part and the address part are now coming from
# from one source, it is not possible to expand any variables in the path,
# try to use SQLAlchemy to separate the dialect and the address part and
# expand any data and then merge it again
#database_url_in = os.path.expanduser(
# os.path.expandvars(
# os.path.expandvars(
# database_url_in
# )
# )
#)
while "$" in database_url_in or "~" in database_url_in:
database_url_in = os.path.expanduser(
os.path.expandvars(
database_url_in
)
)
database_url = database_url_in
logger.debug("setting up database in %s" % database_url)
engine = sqlalchemy.create_engine(database_url, echo=False)
# create the tables
metadata = Base.metadata
metadata.create_all(engine)
# create the Session class
Session = sqlalchemy.orm.session
|
maker(bind=engine)
# create and save session object to session
session = Session()
query = session.query
# initialize the db
__init_db__()
# TODO: create a test to check if the returned session is session
return session
def __init_db__():
"""initializes the just setup
|
database
It adds:
- Users
- VersionTypes
to the database.
"""
logger.debug("db is newly created, initializing the db")
global query
global session
# get the users from the config
from oyProjectManager import conf
# ------------------------------------------------------
# create the users
from oyProjectManager.models.auth import User
# get all users from db
users_from_db = query(User).all()
for user_data in conf.users_data:
name = user_data.get("name")
initials = user_data.get("initials")
email = user_data.get("email")
user_from_config = User(name, initials, email)
if user_from_config not in users_from_db:
session.add(user_from_config)
# ------------------------------------------------------
# add the VersionTypes
from oyProjectManager.models.version import VersionType
version_types_from_db = query(VersionType).all()
for version_type in conf.version_types:
version_type_from_conf = VersionType(**version_type)
if version_type_from_conf not in version_types_from_db:
session.add(version_type_from_conf)
session.commit()
logger.debug("finished initialization of the db")
|
ezequielpereira/Time-Line
|
libs64/wx/lib/agw/cubecolourdialog.py
|
Python
|
gpl-3.0
| 139,714
| 0.003285
|
# --------------------------------------------------------------------------- #
# CUBECOLOURDIALOG Widget wxPython IMPLEMENTATION
#
# Python Code By:
#
# Andrea Gavana, @ 16 Aug 2007
# Latest Revision: 14 Apr 2010, 12.00 GMT
#
#
# TODO List
#
# 1. Find A Way To Reduce Flickering On The 2 ColourPanels;
#
# 2. See Why wx.GCDC Doesn't Work As I Thought (!). It Looks Slow As A Turtle,
# But Probably I Am Doing Something Wrong While Painting The Alpha Textures.
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# [email protected]
# [email protected]
#
# Or, Obviously, To The wxPython
|
Mailing List!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""
CubeColourDialog is an alternative implementation of `wx.ColourDialog
|
`.
Description
===========
The CubeColourDialog is an alternative implementation of `wx.ColourDialog`, and it
offers different functionalities with respect to the default wxPython one. It
can be used as a replacement of `wx.ColourDialog` with exactly the same syntax and
methods.
Some features:
- RGB components may be controlled using spin controls or with mouse gestures
on a 3D RGB cube, with the 3 components laying on the X, Y, Z axes;
- HSB components may be controlled using spin controls or with mouse gestures
on a 2D colour wheel;
- Brightness has its own vertical slider to play with;
- The colour alpha channel can be controlled using another vertical slider, or
via spin control;
- The colour alpha channel controls can be completely hidden at startup or the
choice to use the alpha channel can be left to the user while playing with the
dialog, via a simple `wx.CheckBox`;
- The "old colour" and "new colour" are displayed in two small custom panel,
which support alpha transparency and texture;
- CubeColourDialog displays also the HTML colour code in hexadecimal format;
- When available, a corresponding "Web Safe" colour is generated using a 500
web colours "database" (a dictionary inside the widget source code). Web Safe
colours are recognized by all the browsers;
- When available, a corresponding "HTML name" for the selected colour is displayed,
by using the same 500 web colours "database";
- When available, a corresponding "Microsoft Access Code" for the selected colour
is displayed, by using the same 500 web colours "database".
And much more.
Window Styles
=============
This class supports the following window styles:
================== =========== ==================================================
Window Styles Hex Value Description
================== =========== ==================================================
``CCD_SHOW_ALPHA`` 0x1 Show the widget used to control colour alpha channels in `CubeColourDialog`.
================== =========== ==================================================
Events Processing
=================
`No custom events are available for this class.`
License And Version
===================
CubeColourDialog is distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 14 Apr 2010, 12.00 GMT
Version 0.3.
"""
__docformat__ = "epytext"
#----------------------------------------------------------------------
# Beginning Of CUBECOLOURDIALOG wxPython Code
#----------------------------------------------------------------------
import wx
import colorsys
from math import pi, sin, cos, sqrt, atan2
from wx.lib.embeddedimage import PyEmbeddedImage
# Define a translation string
_ = wx.GetTranslation
# Show the alpha control in the dialog
CCD_SHOW_ALPHA = 1
""" Show the widget used to control colour alpha channels in `CubeColourDialog`. """
# Radius of the HSB colour wheel
RADIUS = 100
""" Radius of the HSB colour wheel. """
# Width of the mouse-controlled colour pointer
RECT_WIDTH = 5
""" Width of the mouse-controlled colour pointer. """
# Dictionary keys for the RGB colour cube
RED, GREEN, BLUE = 0, 1, 2
""" Dictionary keys for the RGB colour cube. """
Vertex = wx.Point(95, 109)
Top = wx.Point(95, 10)
Left = wx.Point(16, 148)
Right = wx.Point(174, 148)
colourAttributes = ["r", "g", "b", "h", "s", "v"]
colourMaxValues = [255, 255, 255, 359, 255, 255]
checkColour = wx.Colour(200, 200, 200)
HTMLCodes = {'#B0171F': ['Indian red', '2037680', ''],
'#DC143C': ['Crimson', '3937500', '#CC0033'],
'#FFB6C1': ['Lightpink', '12695295', '#FFCCCC'],
'#FFAEB9': ['Lightpink 1', '12168959', ''],
'#EEA2AD': ['Lightpink 2', '11379438', ''],
'#CD8C95': ['Lightpink 3', '9800909', ''],
'#8B5F65': ['Lightpink 4', '6643595', ''],
'#FFC0CB': ['Pink', '13353215', '#FFCCCC'],
'#FFB5C5': ['Pink 1', '12957183', ''],
'#EEA9B8': ['Pink 2', '12102126', ''],
'#CD919E': ['Pink 3', '10392013', ''],
'#8B636C': ['Pink 4', '7103371', ''],
'#DB7093': ['Palevioletred', '9662683', '#CC6699'],
'#FF82AB': ['Palevioletred 1', '11240191', ''],
'#EE799F': ['Palevioletred 2', '10451438', ''],
'#CD6889': ['Palevioletred 3', '9005261', ''],
'#8B475D': ['Palevioletred 4', '6113163', ''],
'#FFF0F5': ['Lavenderblush 1 (lavenderblush)', '16118015', '#FFFFFF'],
'#EEE0E5': ['Lavenderblush 2', '15065326', ''],
'#CDC1C5': ['Lavenderblush 3', '12960205', ''],
'#8B8386': ['Lavenderblush 4', '8815499', ''],
'#FF3E96': ['Violetred 1', '9846527', ''],
'#EE3A8C': ['Violetred 2', '9190126', ''],
'#CD3278': ['Violetred 3', '7877325', ''],
'#8B2252': ['Violetred 4', '5382795', ''],
'#FF69B4': ['Hotpink', '11823615', '#FF66CC'],
'#FF6EB4': ['Hotpink 1', '11824895', ''],
'#EE6AA7': ['Hotpink 2', '10971886', ''],
'#CD6090': ['Hotpink 3', '9461965', ''],
'#8B3A62': ['Hotpink 4', '6437515', ''],
'#872657': ['Raspberry', '5711495', ''],
'#FF1493': ['Deeppink 1 (deeppink)', '9639167', '#FF0099'],
'#EE1289': ['Deeppink 2', '8983278', ''],
'#CD1076': ['Deeppink 3', '7737549', ''],
'#8B0A50': ['Deeppink 4', '5245579', ''],
'#FF34B3': ['Maroon 1', '11744511', ''],
'#EE30A7': ['Maroon 2', '10957038', ''],
'#CD2990': ['Maroon 3', '9447885', ''],
'#8B1C62': ['Maroon 4', '6429835', ''],
'#C71585': ['Mediumvioletred', '8721863', '#CC0066'],
'#D02090': ['Violetred', '9445584', ''],
'#DA70D6': ['Orchid', '14053594', '#CC66CC'],
'#FF83FA': ['Orchid 1', '16417791', ''],
'#EE7AE9': ['Orchid 2', '15301358', ''],
'#CD69C9': ['Orchid 3', '13199821', ''],
'#8B4789': ['Orchid 4', '8996747', ''],
'#D8BFD8': ['Thistle', '14204888', '#CCCCCC'],
'#FFE1FF': ['Thistle 1', '16769535', ''],
'#EED2EE': ['Thistle 2', '15651566', ''],
'#CDB5CD': ['Thistle 3', '13481421', ''],
'#8B7B8B': ['Thistle 4', '9141131', ''],
'#FFBBFF': ['Plum 1', '16759807', ''],
'#EEAEEE': ['Plum 2', '15642350', ''],
'#CD96CD': ['Plum 3', '13473485', ''],
'#8B668B': ['Plum 4', '9135755', ''],
'#DDA0DD': ['Plum', '14524637', '#CC99CC'],
'#EE82EE': ['Violet', '15631086', '#FF99FF'],
'#FF00FF': ['Magenta (fuchsia)', '16711935', '#FF00FF'],
'#EE00EE': ['Magenta 2', '15597806', ''],
'#CD00CD': ['Magenta 3', '13435085', ''],
'#8B008B': ['Magenta 4 (darkmagenta)', '9109643', '#990099'],
'#800080': ['Purple', '8388736', '#990099'],
'#BA55D3': ['Mediumorchid', '13850042', '#CC66CC'],
'#E066FF': ['Mediumorchid 1', '16738016', ''],
'#D15FEE': ['Mediumorchid 2', '15622097', ''],
'#B452CD': ['Mediumorchid 3', '13456052', ''],
'#7A378B': ['Mediumorchid 4', '9123706', ''],
'#9400D3': ['Darkvio
|
Buggaboo/gimp-plugin-export-layers
|
export_layers/pygimplib/pgitemdata.py
|
Python
|
gpl-3.0
| 14,487
| 0.015669
|
#-------------------------------------------------------------------------------
#
# This file is part of pygimplib.
#
# Copyright (C) 2014, 2015 khalim19 <[email protected]>
#
# pygimplib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygimplib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygimplib. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
"""
This module defines the following classes:
* `ItemData` - an associative container that stores all GIMP items and item
groups of a certain type
* subclasses of `ItemData`:
* `LayerData` for layers
* `ChannelData` for channels
* `PathData` for paths
* `_ItemDataElement` - wrapper for `gimp.Item` objects containing custom
attributes derived from the original `gimp.Item` attributes
"""
#===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
str = unicode
#===============================================================================
import os
import abc
from collections import OrderedDict
from collections import namedtuple
import gimp
from . import pgpath
from . import objectfilter
#============================================================
|
===================
pdb = gimp.pdb
#===============================================================================
class ItemData(object):
"""
This class is an interface to store all items (and item groups) of a certain
type (e.g. layers, channels or paths) of a GIMP image in an ordered
dictionary, allowing
|
to access the items via their names and get various
custom attributes derived from the existing item attributes.
Use one of the subclasses for items of a certain type:
* `LayerData` for layers,
* `ChannelData` for channels,
* `PathData` for paths (vectors).
For custom item attributes, see the documentation for the `_ItemDataElement`
class. `_ItemDataElement` is common for all `ItemData` subclasses.
Attributes:
* `image` - GIMP image to get item data from.
* `is_filtered` - If True, ignore items that do not match the filter
(`ObjectFilter`) in this object when iterating.
* `filter` (read-only) - `ObjectFilter` instance where you can add or remove
filter rules or subfilters to filter items.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, image, is_filtered=False, filter_match_type=objectfilter.ObjectFilter.MATCH_ALL):
self.image = image
self.is_filtered = is_filtered
# Filters applied to all items in self._itemdata
self._filter = objectfilter.ObjectFilter(filter_match_type)
# Contains all items (including item groups) in the item tree.
# key: `_ItemDataElement.orig_name` (derived from `gimp.Item.name`, which is unique)
# value: `_ItemDataElement` object
self._itemdata = OrderedDict()
# key `_ItemDataElement` object (parent) or None (root of the item tree)
# value: set of `_ItemDataElement` objects
self._uniquified_itemdata = {}
self._fill_item_data()
@property
def filter(self):
return self._filter
def __getitem__(self, name):
"""
Access an `_ItemDataElement` object by its `orig_name` attribute.
"""
return self._itemdata[name]
def __contains__(self, name):
"""
Return True if an `_ItemDataElement` object, specified by its `orig_name`
attribute, is in the item data. Otherwise return False.
"""
return name in self._itemdata
def __len__(self):
"""
Return the number of all item data elements - that is, all immediate
children of the image and all nested children.
"""
return len([item_elem for item_elem in self])
def __iter__(self):
"""
If `is_filtered` is False, iterate over all items. If `is_filtered` is True,
iterate only over items that match the filter in this object.
Yields:
* `item_elem` - The current `_ItemDataElement` object.
"""
if not self.is_filtered:
for item_elem in self._itemdata.values():
yield item_elem
else:
for item_elem in self._itemdata.values():
if self._filter.is_match(item_elem):
yield item_elem
def _items(self):
"""
Yield current (`gimp.Item.name`, `_ItemDataElement` object) tuple.
"""
if not self.is_filtered:
for name, item_elem in self._itemdata.items():
yield name, item_elem
else:
for name, item_elem in self._itemdata.items():
if self._filter.is_match(item_elem):
yield name, item_elem
def uniquify_name(self, item_elem, include_item_path=True,
uniquifier_position=None, uniquifier_position_parents=None):
"""
Make the `name` attribute in the specified `_ItemDataElement` object
unique among all other, already uniquified `_ItemDataElement` objects.
To achieve uniquification, a string ("uniquifier") in the form of
" (<number>)" is inserted at the end of the item names.
Parameters:
* `item_elem` - `_ItemDataElement` object whose `name` attribute
will be uniquified.
* `include_item_path` - If True, take the item path into account when
uniquifying.
* `uniquifier_position` - Position (index) where the uniquifier is inserted
into the current item. If the position is None, insert the uniquifier at
the end of the item name (i.e. append it).
* `uniquifier_position_parents` - Position (index) where the uniquifier is
inserted into the parents of the current item. If the position is None,
insert the uniquifier at the end of the name of each parent. This
parameter has no effect if `include_item_path` is False.
"""
if include_item_path:
for elem in item_elem.parents + [item_elem]:
parent = elem.parent
if parent not in self._uniquified_itemdata:
self._uniquified_itemdata[parent] = set()
if elem not in self._uniquified_itemdata[parent]:
item_names = set([elem_.name for elem_ in self._uniquified_itemdata[parent]])
if elem.name not in item_names:
self._uniquified_itemdata[parent].add(elem)
else:
if elem == item_elem:
position = uniquifier_position
else:
position = uniquifier_position_parents
elem.name = pgpath.uniquify_string(elem.name, item_names, position)
self._uniquified_itemdata[parent].add(elem)
else:
# Use None as the root of the item tree.
parent = None
if parent not in self._uniquified_itemdata:
self._uniquified_itemdata[parent] = set()
item_elem.name = pgpath.uniquify_string(
item_elem.name, self._uniquified_itemdata[parent], uniquifier_position)
self._uniquified_itemdata[parent].add(item_elem.name)
def _fill_item_data(self):
"""
Fill the _itemdata dictionary, containing
<gimp.Item.name, _ItemDataElement> pairs.
"""
_ItemTreeNode = namedtuple('_ItemTreeNode', ['children', 'parents'])
item_tree = [_ItemTreeNode(self._get_children_from_image(self.image), [])]
while item_tree:
node = item_tree.pop(0)
index = 0
for item in node.children:
parents = list(node.parents)
item_elem = _ItemDataElement(item, parents)
if pdb.gimp_item_is_group(item):
item_tree.insert(index, _ItemTreeNode(s
|
dnlcrl/PyFunt
|
pyfunt/spatial_up_sampling_nearest.py
|
Python
|
mit
| 2,179
| 0.001377
|
#!/usr/bin/env python
# coding: utf-8
from module import Module
import numpy as np
try:
from im2col_cyt import im2col_cython, col2im_cython
except ImportError:
print('Installation broken, please reinstall PyFunt')
from numpy.lib.stride_tricks import as_strided
def tile_array(a, b1, b2):
r, c = a.shape
rs, cs = a.strides
x = as_strided(a, (r, b1, c, b2), (rs, 0, cs, 0))
return x.reshape(r*b1, c*b2)
class SpatialUpSamplingNearest(Module):
def __init__(self, scale):
super(SpatialUpSamplingNearest, self).__init__()
self.scale_factor = scale
if self.scale_factor < 1:
raise Exception('scale_factor must be greater than 1')
if np.floor(self.scale_factor) != self.scale_factor:
raise Exception('scale_factor must be integer')
def update_output(self, x):
out_size = x.shape
out_size[x.ndim - 1] *= self.scale_factor
out_size[x.ndim - 2] *= self.scale_factor
N, C, H, W = out_size
stride = self.scale_factor
pool_height = pool_width = stri
|
de
x_reshaped = x.transpose(2, 3, 0, 1).flatten()
out_cols = np.zeros(out_size)
out_cols[:, np.arange(out_cols.shape[1])] = x_reshaped
out = col2im_cython(out_cols, N * C, 1, H, W, pool_height, pool_width,
padding=0, stride=stride)
out
|
= out.reshape(out_size)
return self.grad_input
return self.output
def update_grad_input(self, x, grad_output, scale=1):
N, C, H, W = grad_output.shape
pool_height = pool_width = self.scale_factor
stride = self.scale_factor
out_height = (H - pool_height) / stride + 1
out_width = (W - pool_width) / stride + 1
grad_output_split = grad_output.reshape(N * C, 1, H, W)
grad_output_cols = im2col_cython(
grad_output_split, pool_height, pool_width, padding=0, stride=stride)
grad_intput_cols = grad_output_cols[0, np.arange(grad_output_cols.shape[1])]
grad_input = grad_intput_cols.reshape(
out_height, out_width, N, C).transpose(2, 3, 0, 1)
self.output = grad_input
|
darren-wang/op
|
oslo_policy/_parser.py
|
Python
|
apache-2.0
| 8,552
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import six
from oslo_policy import _checks
from oslo_policy._i18n import _LE
LOG = logging.getLogger(__name__)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseStateMeta(type):
"""Metaclass for the :class:`.ParseState` class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
@six.add_metaclass(ParseStateMeta)
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the
:class:`Check` tree.
.. note::
Error reporting is rather lacking. The best we can get with this
parser formulation is an overall "parse failed" error. Fortunately, the
policy language is simple enough that this shouldn't be that big a
problem.
"""
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
|
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
|
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state.
Calls :meth:`reduce`.
"""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
:raises ValueError: If the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError('Could not parse rule')
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', _checks.AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', _checks.OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', _checks.NotCheck(check))]
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return _checks.FalseCheck()
elif rule == '@':
return _checks.TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_LE('Failed to understand rule %s') % rule)
# If the rule is invalid, we'll fail closed
return _checks.FalseCheck()
# Find what implements the check
if kind in _checks.registered_checks:
return _checks.registered_checks[kind](kind, match)
elif None in _checks.registered_checks:
return _checks.registered_checks[None](kind, match)
else:
LOG.error(_LE('No handler for matches of kind %s') % kind)
return _checks.FalseCheck()
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
def parse_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return
|
gahlberg/pynet_class_work
|
class2/ex2a_telnet.py
|
Python
|
apache-2.0
| 1,588
| 0.003149
|
#!/usr/bin/env python
import telnetlib
import time
import socket
import sys
import getpass
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def send_command(remote_conn, cmd):
'''
Initiate the Telnet Session
'''
cmd = cmd.rstrip()
remote_conn.write(cmd + '\n')
time.sleep(1)
return remote_conn.read_very_eager()
def login(remote_conn, username, password):
'''
Login to pynet-rtr1
'''
output = remote_conn.read_until("sername:", TELNET_TIMEOUT)
remote_conn.write(username + '\n')
output += remote_conn.read_until("ssword:", TELNET_TIMEOUT)
remote_conn.write(password + '\n')
return output
def no_more(remote_conn, paging_cmd='terminal length 0'):
'''
No paging of Output
'''
return send_command(remote_conn, paging_cmd)
def telnet_connect(ip_addr):
'''
Establish the Telnet Connection
'''
try:
return telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
except socket.timeout:
|
sys.exit("Connection timed-out")
def main():
'''
Connect to pynet-rtr1, login, and issue 'show ip int brief'
'''
ip_addr = raw_input("IP address: ")
ip_addr = ip_addr.strip()
username = 'pyclass'
password = getpass.getpass()
remote_conn = telnet_connect(ip_addr)
output = login(remote_conn, username, password)
time.sleep(1)
remote_conn
|
.read_very_eager()
no_more(remote_conn)
output = send_command(remote_conn, 'show ip int brief')
print "\n\n"
print output
print "\n\n"
remote_conn.close()
if __name__ == "__main__":
main()
|
csdevsc/colcat_crowdsourcing_application
|
manage/views.py
|
Python
|
mit
| 7,968
| 0.005522
|
from django.shortcuts import render, render_to_response
from django.shortcuts import redirect
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.conf import settings
from manage.forms import *
from manage.models import *
from tasks.models import *
import os
import csv
from django.http import HttpResponse, HttpRequest
# Views
def login(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
if (username == settings.MANAGE_USERNAME and password == settings.MANAGE_PASS):
return redirect('manage.views.main')
return render(request, 'manage/login.html', {})
def main(request):
# Make sure no direct access to main page
try:
referer = request.META['HTTP_REFERER']
except:
return redirect('manage.views.login')
if referer.startswith('http://colcat.calit2.uci.edu:8003'):
return render(request, 'manage/main.html', {})
return redirect('manage.views.login')
# LANGUAGES
def new_language(request):
if request.method == "POST":
form = LanguageForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.save()
return HttpResponseRedirect(reverse('manage.views.view_languages'))
else:
form = LanguageForm()
return render(request, 'manage/new-language.html', {'form': form})
def view_languages(request):
language_list = Language.objects.all()
context_dict = {'languages': language_list}
return render(request, 'manage/view-languages.html', context_dict)
# IMAGES
def new_image(request):
# Handle file upload
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
print request.FILES['image_filepath'].name
print request.FILES['image_filepath']
image_name = os.path.splitext(request.FILES['image_filepath'].name)[0]
newimg = Image_Data(image_filepath = request.FILES['image_filepath'], image_id = image_name, language_name = request.POST.get('language_name'), task_type_id = request.POST.get('task_type_id'))
newimg.save()
# Redirect to the document list after POST
return HttpResponseRedirect(reverse('manage.views.view_images'))
else:
form = ImageForm() # A empty, unbound form
return render(request, 'manage/new-image.html', {'form': form})
def view_images(request):
image_list = Image_Data.objects.all()
context_dict = {'images': image_list}
return render(request, 'manage/view-images.html', context_dict)
# DATA MODELS
def new_data_model(request):
if request.method == "POST":
form = DataModelForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.save()
return HttpResponseRedirect(reverse('manage.views.view_data_models'))
else:
form = DataModelForm()
return render(request, 'manage/new-data-model.html', {'form': form})
def view_data_models(request):
model_list = Data_Model.objects.all()
context_dict = {'models': model_list}
return render(request, 'manage/view-data-models.html', context_dict)
# TASKS
def new_task(request):
if request.method == "POST":
form = TaskForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.language_id = request.POST.get('language_id')
post.task_type_id = request.POST.get('task_type_id')
post.image_id = request.POST.get('image_id')
post.task_name = request.POST.get('language_id') + '_' + request.POST.get('task_type_id') + '_' + request.POST.get('image_id')
post.task_url = '/tasks/'+request.POST.get('language_id')+'/'+request.POST.get('task_type_id') + '/'+request.POST.get('image_id')
post.save()
return HttpResponseRedirect(reverse('manage.views.view_tasks'))
else:
form = TaskForm()
return render(request, 'manage/new-task.html', {'form': form})
def view_tasks(request):
if request.method == "POST":
if 'create_batch_file' in request.POST:
print "Creating batch file..."
task_choices = request.POST.getlist('task_choices')
|
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'
|
] = 'attachment; filename="batch.csv"'
writer = csv.writer(response)
headers = ['task_language_id', 'task_type_id', 'task_img_id']
writer.writerow(headers)
for tid in task_choices:
task = Task.objects.get(task_id=tid)
task_info = [task.language_id, task.task_type_id, task.image_id]
writer.writerow(task_info)
print 'Finished writing batch file'
return response
elif 'mark_tasks_complete' in request.POST:
print "Marking tasks complete..."
tasks_complete = request.POST.getlist('tasks_complete')
print tasks_complete
for tid in tasks_complete:
task = Task.objects.get(task_id=tid)
task.complete = True
task.save()
task_list = Task.objects.all()
context_dict = {'tasks': task_list}
return render(request, 'manage/view-tasks.html', context_dict)
def new_task_type(request):
if request.method == "POST":
form = TaskTypeForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.save()
return HttpResponseRedirect(reverse('manage.views.view_task_types'))
else:
form = TaskTypeForm()
return render(request, 'manage/new-task-type.html', {'form': form})
def view_task_types(request):
task_type_list = Task_Type.objects.all()
context_dict = {'task_types': task_type_list}
return render(request, 'manage/view-task-types.html', context_dict)
def new_task_template(request):
if request.method == "POST":
form = TaskTemplateForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.save()
return HttpResponseRedirect(reverse('manage.views.view_task_templates'))
else:
form = TaskTemplateForm()
return render(request, 'manage/new-task-template.html', {'form': form})
def view_task_templates(request):
template_list = Task_Template.objects.all()
context_dict = {'templates': template_list}
return render(request, 'manage/view-task-templates.html', context_dict)
# RESPONSES
def download_responses(request):
response_lists = []
# Add objects for each response type
try:
response_list_foci_001 = Task_Foci_001.objects.all()
response_lists.append(response_list_foci_001)
except:
pass
try:
response_list_naming_001 = Task_Naming_001.objects.all()
response_lists.append(response_list_naming_001)
except:
pass
context_dict = {'response_lists': [r.model.__name__ for r in response_lists]}
for rlist in response_lists:
write_responses_to_csv(rlist, 'uploads/responses/'+rlist.model.__name__+'.csv')
return render(request, 'manage/download-responses.html', context_dict)
import csv
from django.db.models.loading import get_model
def write_responses_to_csv(qs, outfile_path):
model = qs.model
writer = csv.writer(open(outfile_path, 'w'))
headers = []
for field in model._meta.fields:
headers.append(field.name)
writer.writerow(headers)
for obj in qs:
row = []
for field in headers:
val = getattr(obj, field)
if callable(val):
val = val()
if type(val) == unicode:
val = val.encode("utf-8")
row.append(val)
writer.writerow(row)
|
smjhnits/Praktikum_TU_D_16-17
|
Fortgeschrittenenpraktikum/Protokolle/V27_Zeeman-Effekt/Python/blau_s.py
|
Python
|
mit
| 2,866
| 0.010479
|
import numpy as np
from scipy.stats import sem
import scipy.constants as const
from uncertainties import ufloat
import uncertainties.unumpy as unp
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
import matplo
|
tlib.pyplot as plt
from scipy.optimize import curve_fit
from PIL import Image
import scipy.misc
from pint import UnitRegistry
u = UnitRegistry()
Q_ = u.Quantity
## Wellenlängen in nm
lambda_b = Q_(480.0, 'nanometer')
n_b = 1.4635
h = Q_(const.h, 'joule * second')
e_0 = Q_(const.e, 'coulomb')
mu_bohr = Q_(const.physical_constants['Bo
|
hr magneton'][0], 'joule/tesla')
c = Q_(const.c, 'meter / second')
d = Q_(4, 'millimeter')
dispsgebiet_b = lambda_b**2 / (2 * d) * np.sqrt(1 / (n_b**2 - 1))
## Hysterese, B in mT
def poly(x, a, b, c, d):
return a * x**3 + b * x**2 + c * x + d
B_auf = np.array([4, 87, 112,174, 230, 290, 352, 419,
476, 540, 600, 662, 714, 775, 823,872, 916, 959, 987,
1015, 1046, 1072])
B_ab = np.array([7, 57, 120, 180, 251, 306, 361, 428,
480, 550, 612, 654, 715, 780, 830, 878, 924, 962,
993, 1020, 1050, 1072])
I = np.linspace(0, 21, 22)
params_B_auf, covariance_B_auf = curve_fit(poly, I, B_auf)
params_B_ab, covariance_B_ab = curve_fit(poly, I, B_ab)
### BLAU ###
## Bild eins Zeitstempel 10:33
## Bild zwei I = 5.6 A Pol = +-1
## Abstände zwischen zwei Linien zu den benachbarten
## beiden Linien gemessen +-> |*| |*| (so wurde 1 gemessen)
## zwei beinhaltet die Abstände der Peaks von einer gespaltenen Linie
## Pixelbreiten der 3 + 13 Linie
pixel_01_b = np.array([(1405 + 1244) / 2, (1690 + 1541) / 2, (1952
+ 1852) / 2, (2170 + 2055) / 2, (2399 + 2278) / 2, (2596 + 2481) / 2, (2781 +
2673) / 2, (2961 + 2861) / 2, (3130 + 3033) / 2, (3294 + 3202) / 2])
pixel_02_b_1 = np.array([(1419 + 1060) / 2, (1728 + 1419) / 2, (1973
+ 1728) / 2, (1973 + 1728) / 2, (2215 + 1973) / 2, (2435 + 2215) / 2, (2638 +
2435) / 2, (2816 + 2638) / 2, (3013 + 2816) / 2, (3176 + 3010) / 2, (3342 +
3176) / 2])
pixel_02_b_2 = np.array([(1494 -1339), (1776 - 1657), (2035 - 1910), (2273 - 2154), (2478 - 2377),
(2677 - 2582), (2873 - 2769), (3045 - 2959), 3217 - 3135, 3383 - 3303])
delta_S_b = np.zeros(len(pixel_01_b) - 1)
for i in range(0, len(pixel_01_b) - 1, 1):
delta_S_b[i] = pixel_01_b[i + 1] - pixel_01_b[i]
#print(delta_S_b)
del_S_b = pixel_02_b_2[1:10]#np.zeros(9)
#for i in range(0, len(pixel_02_b_2) - 1, 1):
# del_S_b[i] = pixel_02_b_2[i + 1] - pixel_02_b_2[i]
del_lambda_b = (1 / 2 * dispsgebiet_b * del_S_b / delta_S_b)
delta_E_b = (h * c / lambda_b**2 * del_lambda_b).to('eV')
g_b = (delta_E_b / (mu_bohr * Q_(poly(5.6, *params_B_auf), 'millitesla'))).to('dimensionless')
g_b_best = ufloat(np.mean(g_b), np.std(g_b, ddof=1))
print(g_b,'##', g_b_best)
print(del_S_b, '##', delta_S_b)
print('Hysterese 5.6 A', poly(5.6, *params_B_auf))
print((2 + 3/2) / 2)
|
peterdemin/mutant
|
src/mutant_django_json/__init__.py
|
Python
|
isc
| 341
| 0
|
fro
|
m mutant_django.generator import DjangoBase
def register(app):
app.extend_generator('django', django_json_field)
def django_json_field(gen):
gen.field_generators['JSON'] = JSONField
class JSONField(DjangoBase):
DJANGO_FIELD = 'JSONField'
def render_imports(self):
return ['from jsonfield import JSONField']
| |
tecnovert/particl-core
|
test/functional/wallet_descriptor.py
|
Python
|
mit
| 10,725
| 0.00317
|
#!/usr/bin/env python3
# Copyright (c) 2019-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descriptor wallet function."""
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error
)
class WalletDescriptorTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-keypool=100']]
self.wallet_names = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_sqlite()
def run_test(self):
if self.is_bdb_compiled():
# Make a legacy wallet and check it is BDB
self.nodes[0].createwallet(wallet_name="legacy1", descriptors=False)
wallet_info = self.nodes[0].getwalletinfo()
assert_equal(wallet_info['format'], 'bdb')
self.nodes[0].unloadwallet("le
|
gacy1")
else:
self.log.warning("Skipping BDB test")
# Make a descriptor wallet
self.log.info("Making a descriptor wallet")
self.nodes[0].createwa
|
llet(wallet_name="desc1", descriptors=True)
# A descriptor wallet should have 100 addresses * 4 types = 400 keys
self.log.info("Checking wallet info")
wallet_info = self.nodes[0].getwalletinfo()
assert_equal(wallet_info['format'], 'sqlite')
assert_equal(wallet_info['keypoolsize'], 400)
assert_equal(wallet_info['keypoolsize_hd_internal'], 400)
assert 'keypoololdest' not in wallet_info
# Check that getnewaddress works
self.log.info("Test that getnewaddress and getrawchangeaddress work")
addr = self.nodes[0].getnewaddress("", "legacy")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('pkh(')
assert_equal(addr_info['hdkeypath'], 'm/44\'/1\'/0\'/0/0')
addr = self.nodes[0].getnewaddress("", "p2sh-segwit")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('sh(wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/49\'/1\'/0\'/0/0')
addr = self.nodes[0].getnewaddress("", "bech32")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/84\'/1\'/0\'/0/0')
# Check that getrawchangeaddress works
addr = self.nodes[0].getrawchangeaddress("legacy")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('pkh(')
assert_equal(addr_info['hdkeypath'], 'm/44\'/1\'/0\'/1/0')
addr = self.nodes[0].getrawchangeaddress("p2sh-segwit")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('sh(wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/49\'/1\'/0\'/1/0')
addr = self.nodes[0].getrawchangeaddress("bech32")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/84\'/1\'/0\'/1/0')
# Make a wallet to receive coins at
self.nodes[0].createwallet(wallet_name="desc2", descriptors=True)
recv_wrpc = self.nodes[0].get_wallet_rpc("desc2")
send_wrpc = self.nodes[0].get_wallet_rpc("desc1")
# Generate some coins
self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 1, send_wrpc.getnewaddress())
# Make transactions
self.log.info("Test sending and receiving")
addr = recv_wrpc.getnewaddress()
send_wrpc.sendtoaddress(addr, 10)
# Make sure things are disabled
self.log.info("Test disabled RPCs")
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importprivkey, "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW")
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importpubkey, send_wrpc.getaddressinfo(send_wrpc.getnewaddress()))
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importaddress, recv_wrpc.getnewaddress())
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importmulti, [])
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.addmultisigaddress, 1, [recv_wrpc.getnewaddress()])
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpprivkey, recv_wrpc.getnewaddress())
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpwallet, 'wallet.dump')
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importwallet, 'wallet.dump')
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.sethdseed)
self.log.info("Test encryption")
# Get the master fingerprint before encrypt
info1 = send_wrpc.getaddressinfo(send_wrpc.getnewaddress())
# Encrypt wallet 0
send_wrpc.encryptwallet('pass')
send_wrpc.walletpassphrase('pass', 10)
addr = send_wrpc.getnewaddress()
info2 = send_wrpc.getaddressinfo(addr)
assert info1['hdmasterfingerprint'] != info2['hdmasterfingerprint']
send_wrpc.walletlock()
assert 'hdmasterfingerprint' in send_wrpc.getaddressinfo(send_wrpc.getnewaddress())
info3 = send_wrpc.getaddressinfo(addr)
assert_equal(info2['desc'], info3['desc'])
self.log.info("Test that getnewaddress still works after keypool is exhausted in an encrypted wallet")
for _ in range(500):
send_wrpc.getnewaddress()
self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet")
send_wrpc.walletpassphrase('pass', 10)
send_wrpc.importdescriptors([{
"desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/*h)#y4dfsj7n",
"timestamp": "now",
"range": [0,10],
"active": True
}])
send_wrpc.walletlock()
# Exhaust keypool of 100
for _ in range(100):
#send_wrpc.getnewaddress(address_type='bech32')
send_wrpc.getnewaddress('', 'bech32')
# This should now error
assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first", send_wrpc.getnewaddress, '', 'bech32')
self.log.info("Test born encrypted wallets")
self.nodes[0].createwallet('desc_enc', False, False, 'pass', False, True)
enc_rpc = self.nodes[0].get_wallet_rpc('desc_enc')
enc_rpc.getnewaddress() # Makes sure that we can get a new address from a born encrypted wallet
self.log.info("Test blank descriptor wallets")
self.nodes[0].createwallet(wallet_name='desc_blank', blank=True, descriptors=True)
blank_rpc = self.nodes[0].get_wallet_rpc('desc_blank')
assert_raises_rpc_error(-4, 'This wallet has no available keys', blank_rpc.getnewaddress)
self.log.info("Test descriptor wallet with disabled private keys")
self.nodes[0].createwallet(wallet_name='desc_no_priv', disable_private_keys=True, descriptors=True)
nopriv_rpc = self.nodes[0].get_wallet_rpc('desc_no_priv')
assert_raises_rpc_error(-4, 'This wallet has no available keys', nopriv_rpc.getnewaddress)
self.log.info("Test descriptor exports")
self.nodes[0].createwallet(wallet_name='desc_export', descriptors=True)
exp_rpc = self.nodes[0].get_wallet_rpc('desc_export')
self.nodes[0].createwallet(wallet_name='desc_import', disable_private_keys=True, descriptors=True)
imp_rpc = se
|
Cynary/distro6.01
|
arch/6.01Soft/lib601-F13-4/soar/worlds/oneDdiff.py
|
Python
|
mit
| 148
| 0.027027
|
dimensions(8
|
,2)
wall((0, 2), (8, 2))
wall((1, 1.5),(1.5, 1.5))
wall((2, 1.6),(2.8, 1.6))
wall((3.1, 1.4),(3.5, 1.4))
initialRobotLoc(1
|
.0, 1.0)
|
tridvaodin/Assignments-Valya-Maskaliova
|
LPTHW/projects/gothonweb/bin/app.py
|
Python
|
gpl-2.0
| 488
| 0.020492
|
import web
urls = (
'/hello','Index'
)
app = web.ap
|
plication(urls,globals())
render = web.template.render('/usr/local/LPTH
|
W/ex51/gothonweb/templates/',base="layout")
class Index(object):
def GET(self):
return render.hello_form()
def POST(self):
form = web.input(name="Nobody",greet="Hello")
greeting = "%s,%s" % (form.greet,form.name)
return render.index(greeting = greeting)
if __name__ == '__main__':
app.run()
|
quimaguirre/diana
|
scripts/old_scripts/run_experiment_cluster.py
|
Python
|
mit
| 5,102
| 0.010584
|
import os, sys, re
import ConfigParser
import optparse
import shutil
import subprocess
import difflib
import collections
#import numpy as np
# Alberto Meseguer file; 18/11/2016
# Modified by Quim Aguirre; 13/03/2017
# This file is the master coordinator of the DI
|
ANA project. It i
|
s used to run multiple DIANA commands in parallel in the cluster
#-------------#
# Functions #
#-------------#
#-------------#
# Options #
#-------------#
def parse_options():
'''
This function parses the command line arguments and returns an optparse object.
'''
parser = optparse.OptionParser("pddi.py [--dummy=DUMMY_DIR] -i INPUT_FILE [-o OUTPUT_DIR] [-v]")
# Directory arguments
parser.add_option("-i", action="store", type="string", dest="input_file", help="Input crossings file", metavar="INPUT_FILE")
parser.add_option("-s", action="store", type="string", dest="sif_file", help="Input SIF file")
parser.add_option("-t", action="store", type="string", dest="type_of_analysis", help="Type of analysis: 'profile_creation' or 'comparison'")
parser.add_option("--dummy_dir", default="dummy/", action="store", type="string", dest="dummy_dir", help="Dummy directory (default = ./)", metavar="DUMMY_DIR")
parser.add_option('-ws','--worspace',dest='workspace',action = 'store',default=os.path.join(os.path.dirname(__file__), 'workspace'),
help = """Define the workspace directory where the data directory and the results directory will be created""")
(options, args) = parser.parse_args()
if options.input_file is None or options.sif_file is None or options.type_of_analysis is None:
parser.error("missing arguments: type option \"-h\" for help")
return options
#-------------#
# Main #
#-------------#
# Add "." to sys.path #
src_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(src_path)
# Read configuration file #
config = ConfigParser.ConfigParser()
config_file = os.path.join(src_path, "config_marvin.ini")
config.read(config_file)
import hashlib
# Imports my functions #
import functions
# Define which python to be used #
python = os.path.join(config.get("Paths", "python_path"), "python")
# Arguments & Options #
options = parse_options()
# Directory arguments
input_file = os.path.abspath(options.input_file)
dummy_dir = os.path.abspath(options.dummy_dir)
# Create directories if necessary
logs_dir = src_path + "/logs"
if not os.path.exists(logs_dir):
os.mkdir(logs_dir)
f = open(input_file, "r")
# Depending on the type of analysis, we will submit different commands
if options.type_of_analysis == 'profile_creation':
analysis = '-prof'
all_drugs = set()
for line in f:
(drug1, drug2) = line.strip().split('---')
all_drugs.add(drug1)
all_drugs.add(drug2)
f.close()
for drug in all_drugs:
# Check if the p-value file is already created. If so, skip
pvalue_file = data_dir + "/" + drug + "/guild_results_using_sif/output_scores.sif.netcombo.pval"
if os.path.exists(pvalue_file):
continue
guild_path = '/gpfs42/robbyfs/homes/users/qaguirre/guild/scoreN'
command = 'python {}/diana_cluster/scripts/generate_profiles.py -d {} -pt geneid -sif {} -gu {}'.format( src_path, drug, options.sif_file, guild_path )
print(command)
# python /home/quim/project/diana_cluster/scripts/generate_profiles.py -d 'DCC0303' -pt 'geneid' -sif /home/quim/project/diana_cluster/workspace/sif/human_eAFF_geneid_2017.sif -gu /home/quim/project/diana_cluster/diana/toolbox/scoreN
# To run the command at the local machine
#os.system(command)
#To run in the cluster submitting files to queues
functions.submit_command_to_queue(command, max_jobs_in_queue=int(config.get("Cluster", "max_jobs_in_queue")), queue_file="command_queues_marvin.txt", dummy_dir=dummy_dir)
elif options.type_of_analysis == 'comparison':
analysis = '-comp'
for line in f:
(drug1, drug2) = line.strip().split('---')
# Check if the results are already done
comp_results_dir = res_dir + "/results_" + drug1 + "_" + drug2
table_file = comp_results_dir + '/table_results_' + drug1 + '_' + drug2 + '.txt'
if os.path.exists(table_file):
continue
command = 'python {}/diana_cluster/scripts/compare_profiles.py -d1 {} -d2 {} -pt geneid'.format( src_path, drug1, drug2 )
print(command)
# python /home/quim/project/diana_cluster/scripts/compare_profiles.py -d1 'DCC0303' -d2 'DCC1743' -pt 'geneid'
# To run the command at the local machine
#os.system(command)
#To run in the cluster submitting files to queues
functions.submit_command_to_queue(command, max_jobs_in_queue=int(config.get("Cluster", "max_jobs_in_queue")), queue_file="command_queues_marvin.txt", dummy_dir=dummy_dir)
f.close()
else:
print('The type of analysis has been wrongly defined. Introduce \'profile_creation\' or \'comparison\'')
sys.exit(10)
|
novalabs/core-tools
|
novalabs/core/CoreWorkspace.py
|
Python
|
gpl-3.0
| 16,247
| 0.000923
|
# COPYRIGHT (c) 2016-2018 Nova Labs SRL
#
# All rights reserved. All use of this software and documentation is
# subject to the License Agreement located in the file LICENSE.
from .Core import *
from .ModuleTarget import *
from .ParametersTarget import *
from abc import abstractmethod
class CoreWorkspaceBase:
def __init__(self):
self.so
|
urces = None
self.generated = None
self.build = None
@abstractmethod
def getCorePackage(self, name):
pass
@abstractmethod
|
def getCoreModule(self, name):
pass
@abstractmethod
def getCoreConfiguration(self, package, name):
pass
@abstractmethod
def getCoreMessage(self, package, name):
pass
@abstractmethod
def getRoot(self, cwd=None):
pass
@abstractmethod
def isValid(self):
pass
def getRoot(self, cwd=None):
if self.root is None: # Check for cached value
self.root = findFileGoingUp("WORKSPACE.json", cwd)
if self.root is not None:
CoreConsole.ok("CoreWorkspace::getRoot: Workspace found in " + CoreConsole.highlightFilename(self.root))
else:
self.reason = "CoreWorkspace::getRoot: Not inside a Workspace"
CoreConsole.fail(self.reason)
return self.root
def getSourcesPath(self):
if self.sources is None: # Check for cached value
if self.getRoot() is not None:
tmp = os.path.join(self.getRoot(), "src")
if os.path.isdir(tmp):
self.sources = tmp
else:
raise CoreError("'src' directory not found inside Workspace", context="CoreWorkspaceBase::getSourcesPath")
else:
self.sources = None
return self.sources
def getGeneratedPath(self):
if self.generated is None: # Check for cached value
if self.getRoot() is not None:
tmp = os.path.join(self.getRoot(), "generated")
if not os.path.isdir(tmp):
try:
os.makedirs(tmp)
except OSError as e:
raise CoreError("I/0 Error: " + str(e.strerror), e.filename, context="CoreWorkspaceBase::getGeneratedPath")
self.generated = tmp
else:
self.generated = None
return self.generated
def getBuildPath(self):
if self.build is None: # Check for cached value
if self.getRoot() is not None:
tmp = os.path.join(self.getRoot(), "build")
if not os.path.isdir(tmp):
try:
os.makedirs(tmp)
except OSError as e:
raise CoreError("I/0 Error: " + str(e.strerror), e.filename, context="CoreWorkspaceBase::getBuildPath")
self.build = tmp
else:
self.build = None
return self.build
def getPackagesRoot(self):
if not self.isValid():
raise CoreError("invalid", context="CoreWorkspaceBase::getPackagesRoot")
return os.path.join(self.getSourcesPath(), "packages")
def getModulesRoot(self):
if not self.isValid():
raise CoreError("invalid", context="CoreWorkspaceBase::getModulesRoot")
return os.path.join(self.getSourcesPath(), "modules")
def getModuleTargetsRoot(self):
if not self.isValid():
raise CoreError("invalid", context="CoreWorkspaceBase::getModuleTargetsRoot")
return os.path.join(self.getSourcesPath(), "targets")
def getParametersRoot(self):
if not self.isValid():
raise CoreError("invalid", context="CoreWorkspaceBase::getParametersRoot")
return os.path.join(self.getSourcesPath(), "targets")
def getParametersTargetsRoot(self):
if not self.isValid():
raise CoreError("invalid", context="CoreWorkspaceBase::getParametersTargetsRoot")
return os.path.join(self.getSourcesPath(), "params")
class CoreWorkspace(CoreContainer, CoreWorkspaceBase):
def __init__(self):
CoreContainer.__init__(self)
CoreWorkspaceBase.__init__(self)
self._validModuleTargets = []
self._invalidModuleTargets = []
self._validParameters = []
self._invalidParameters = []
self._validParametersTargets = []
self._invalidParametersTargets = []
self.root = None
self.sources = None
self.generated = None
self.build = None
self.valid = False
self.opened = False
self.reason = ""
def openJSON(self, jsonFile):
CoreConsole.info("WORKSPACE: " + CoreConsole.highlightFilename(jsonFile))
try:
self.valid = True
except CoreError as e:
self.reason = str(e)
CoreConsole.fail("CoreWorkspace::openJSON: " + self.reason)
self.valid = False
return False
return True
def open(self, root=None):
self.valid = False
try:
if root is not None:
self.root = root
else:
self.root = self.getRoot()
if self.root is None:
return False
jsonFile = os.path.join(self.root, "WORKSPACE.json")
if self.openJSON(jsonFile):
self.openPackages()
self.openModules()
self.openModuleTargets()
self.openParameters()
self.openParametersTargets()
return self.valid
except CoreError as e:
self.reason = str(e)
CoreConsole.fail("CoreWorkspace::open: " + self.reason)
return False
def isValid(self):
return self.valid
# --- MODULE TARGET -----------------------------------------------------------
def listModuleTargets(self):
path = self.getModuleTargetsRoot()
dirs = listDirectories(path, fullpath=True)
tmp = []
for x in dirs:
if ModuleTarget.check(x):
tmp.append(x)
if tmp is not None:
tmp.sort()
return tmp
def openModuleTargets(self):
list = self.listModuleTargets()
self._validModuleTargets = []
self._invalidModuleTargets = []
for x in list:
m = ModuleTarget()
if m.open(x):
self._validModuleTargets.append(m)
else:
self._invalidModuleTargets.append(m)
return self._validModuleTargets
def getModuleTargetByName(self, name):
if name is None:
raise CoreError("CoreContainer::getModule() name is None")
for x in self._validModuleTargets:
if x.name == name:
return x
return None
def validModuleTargets(self):
return self._validModuleTargets
def invalidModuleTargets(self):
return self._invalidModuleTargets
# --- PARAMETERS --------------------------------------------------------------
def listParameters(self):
path = self.getParametersRoot()
dirs = listDirectories(path, fullpath=True)
tmp = []
for x in dirs:
if Parameters.check(x):
tmp.append(x)
if tmp is not None:
tmp.sort()
return tmp
def openParameters(self):
list = self.listParameters()
self._validParameters = []
self._invalidParameters = []
for x in list:
m = Parameters()
if m.open(x):
self._validParameters.append(m)
else:
self._invalidParameters.append(m)
return self._validParameters
def getParameterByName(self, name):
if name is None:
raise CoreError("CoreContainer::getModule() name is None")
for x in self._validParameters:
if x.name == name:
return x
return None
def validParameters(self):
return self._validParameters
def invalidParamete
|
rancherio/python-agent
|
cattle/plugins/docker/compute.py
|
Python
|
apache-2.0
| 33,197
| 0.00009
|
import logging
import socket
import re
from os import path, remove, makedirs, rename, environ
from . import docker_client, pull_image
from . import DockerConfig
from . import DockerPool
from cattle import Config
from cattle.compute import BaseComputeDriver
from cattle.agent.handler import KindBasedMixin
from cattle.type_manager import get_type, MARSHALLER
from cattle import utils
from cattle.utils import JsonObject
from docker.errors import APIError, NotFound
from cattle.plugins.host_info.main import HostInfo
from cattle.plugins.docker.util import add_label, is_no_op, remove_container
from cattle.progress import Progress
from cattle.lock import lock
from cattle.plugins.docker.network import setup_ipsec, setup_links, \
setup_mac_and_ip, setup_ports, setup_network_mode, setup_dns
from cattle.plugins.docker.agent import setup_cattle_config_url
log = logging.getLogger('docker')
SYSTEM_LABEL = 'io.rancher.container.system'
UUID_LABEL = 'io.rancher.container.uuid'
CREATE_CONFIG_FIELDS = [
('labels', 'labels'),
('environment', 'environment'),
('directory', 'working_dir'),
('user', 'user'),
('domainName', 'domainname'),
('memory', 'mem_limit'),
('memorySwap', 'memswap_limit'),
('cpuSet', 'cpuset'),
('cpuShares', 'cpu_shares'),
('tty', 'tty'),
('stdinOpen', 'stdin_open'),
('detach', 'detach'),
('workingDir', 'working_dir'),
('entryPoint', 'entrypoint')]
START_CONFIG_FIELDS = [
('capAdd', 'cap_add'),
('capDrop', 'cap_drop'),
('dnsSearch', 'dns_search'),
('dns', 'dns'),
('extraHosts', 'extra_hosts'),
('publishAllPorts', 'publish_all_ports'),
('lxcConf', 'lxc_conf'),
('logConfig', 'log_config'),
('securityOpt', 'security_opt'),
('restartPolicy', 'restart_policy'),
('pidMode', 'pid_mode'),
('devices', 'devices')]
def _is_running(client, container):
if container is None:
return False
inspect = client.inspect_container(container)
try:
return inspect['State']['Running']
except KeyError:
return False
def _is_stopped(client, container):
return not _is_running(client, container)
def _to_upper_case(key):
return key[0].upper() + key[1:]
class DockerCompute(KindBasedMixin, BaseComputeDriver):
def __init__(self):
KindBasedMixin.__init__(self, kind='docker')
BaseComputeDriver.__init__(self)
self.host_info = HostInfo(docker_client())
self.system_images = self.get_agent_images(docker_client())
def get_agent_images(self, client):
images = client.images(filters={'label': SYSTEM_LABEL})
system_images = {}
for i in images:
try:
label_val = i['Labels'][SYSTEM_LABEL]
for l in i['RepoTags']:
system_images[l] = label_val
if l.endswith(':latest'):
alias = l[:-7]
system_images[alias] = label_val
except KeyError:
pass
return system_images
@staticmethod
def get_container_by(client, func):
containers = client.containers(all=True, trunc=False)
containers = filter(func, containers)
if len(containers) > 0:
return containers[0]
return None
@staticmethod
def find_first(containers, func):
containers = filter(func, containers)
if len(containers) > 0:
return containers[0]
return None
def on_ping(self, ping, pong):
if not DockerConfig.docker_enabled():
return
self._add_resources(ping, pong)
self._add_instances(ping, pong)
def _add_instances(self, ping, pong):
if not utils.ping_include_instances(ping):
return
utils.ping_add_resources(pong, {
'type': 'hostUuid',
'uuid': DockerConfig.docker_uuid()
})
containers = []
running, nonrunning = self._get_all_containers_by_state()
for key, container in running.iteritems():
self.add_container('running', container, containers)
for key, container in nonrunning.iteritems():
self.add_container('stopped', container, containers)
utils.ping_add_resources(pong, *containers)
utils.ping_set_option(pong, 'instances', True)
def add_container(self, state, container, containers):
try:
labels = container['Labels']
except KeyError:
labels = []
container_data = {
'type': 'instance',
'uuid': self._get_uuid(container),
'state': state,
'systemContainer': self._get_sys_container(container),
'dockerId': container['Id'],
'image': container['Image'],
'labels': labels,
'created': container['Created'],
}
containers.append(container_data)
def _get_all_containers_by_state(self):
client = docker_client(timeout=2)
nonrunning_containers = {}
for c in client.containers(all=True):
# Blank sta
|
tus only wait to distinguish created from stopped
if c['Status'] != '' and c['Status'] != 'Created':
nonrunning_containers[c['Id']] = c
running_containers = {}
for c in client.containers(all=False):
running_containers[c['Id']] = c
del nonrunning_containers[c['Id']]
return running_containers, nonrunning_containers
def _get_sys_container(self, container):
try:
image = container['Image']
|
if image in self.system_images:
return self.system_images[image]
except (TypeError, KeyError):
pass
try:
return container['Labels']['io.rancher.container.system']
except (TypeError, KeyError):
pass
def _get_uuid(self, container):
try:
uuid = container['Labels'][UUID_LABEL]
if uuid:
return uuid
except (TypeError, KeyError):
pass
names = container['Names']
if not names:
# No name?? Make one up
return 'no-uuid-%s' % container['Id']
if names[0].startswith('/'):
return names[0][1:]
else:
return names[0]
def _determine_state(self, container):
status = container['Status']
if status == '' or (status is not None and
status.lower() == 'created'):
return 'created'
elif 'Up ' in status:
return 'running'
elif 'Exited ' in status:
return 'stopped'
else:
# Unknown. Assume running and state should sync up eventually.
return 'running'
def _get_host_labels(self):
try:
return self.host_info.host_labels()
except:
log.exception("Error getting host labels")
return {}
def _get_host_create_labels(self):
labels = Config.labels()
if labels:
return labels
return {}
def _add_resources(self, ping, pong):
if not utils.ping_include_resources(ping):
return
stats = None
if utils.ping_include_stats(ping):
try:
stats = self.host_info.collect_data()
except:
log.exception("Error getting host info stats")
physical_host = Config.physical_host()
compute = {
'type': 'host',
'kind': 'docker',
'hostname': Config.hostname(),
'createLabels': self._get_host_create_labels(),
'labels': self._get_host_labels(),
'physicalHostUuid': physical_host['uuid'],
'uuid': DockerConfig.docker_uuid(),
'info': stats
}
pool = {
'type': 'storagePool',
'kind': 'docker',
'name': compute['hostname'] + ' Storage Pool',
'hostUuid': compute['uuid'],
'uuid': compute['uuid'] + '-pool'
}
resolved_ip = socket.gethostbyname(D
|
jdddog/einstein_robot
|
einstein_driver/src/einstein_controller.py
|
Python
|
bsd-3-clause
| 1,255
| 0.004781
|
#!/usr/bin/env python
__author__ = 'Jamie Diprose'
import rospy
from sensor_msgs.msg import JointState
from ros_pololu_servo.msg import servo_pololu
import math
class EinsteinController():
def __init__(self):
rospy.init_node('einstein_controller')
rospy.Subscriber("joint_angles", JointState, self.handle_joint_angl
|
es, queue_size=10)
self.pololu_pub = rospy.Publisher("cmd_pololu", servo_pololu)
self.joint_ids = {'neck_yaw': 23,
|
'neck_roll': 2, 'neck_pitch': 3}
def handle_joint_angles(self, msg):
rospy.logdebug("Received a joint angle target")
for i, joint_name in enumerate(msg.name):
servo_msg = servo_pololu()
servo_msg.id = self.joint_ids[joint_name]
servo_msg.angle = msg.position[i]
servo_msg.speed = (msg.velocity * 255.0)
servo_msg.acceleration = msg.effort #TODO: check this
self.pololu_pub.publish(servo_msg)
#tTODO: enforce joint angles
if __name__ == '__main__':
rospy.loginfo("Starting einstein_controller...")
controller = EinsteinController()
controller.start()
rospy.loginfo("einstein_controller started")
rospy.spin()
rospy.loginfo("einstein_controller stopped")
|
KraftSoft/together
|
location/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 767
| 0.002608
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-19 21:08
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
|
operations = [
migrations.CreateModel(
name='Subway',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coordinates', django.contrib.gis.db.models.fields.PointField(null=True,
|
srid=4326)),
('name', models.CharField(max_length=64)),
],
options={
'abstract': False,
},
),
]
|
mlecours/fake-switches
|
fake_switches/netconf/netconf_protocol.py
|
Python
|
apache-2.0
| 4,337
| 0.003689
|
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from twisted.internet.protocol import Protocol
from lxml import etree
from fake_switches.netconf import dict_2_etree, NS_BASE_1_0, normalize_operation_name, SimpleDatastore, \
Response, OperationNotSupported, NetconfError
from fake_switches.netconf.capabilities import Base1_0
class NetconfProtocol(Protocol):
def __init__(self, datastore=None, capabilities=None, additionnal_namespaces=None, logger=None):
self.logger = logger or logging.getLogger("fake_switches.netconf")
self.input_buffer = ""
self.session_count = 0
self.been_greeted = False
self.datastore = datastore or SimpleDatastore()
caps_class_list = capabilities or []
caps_class_list.insert(0, Base1_0)
self.capabilities = [cap(self.datastore) for cap in caps_class_list]
self.additionnal_namespaces = additionnal_namespaces or {}
def __call__(self, *args, **kwargs):
return self
def connectionMade(self):
self.logger.info("Connected, sending <hello>")
self.session_count += 1
self.say(dict_2_etree({
"hello": [
{"session-id": str(self.session_count)},
{"capabilities": [{"capability": cap.get_url()} for cap in self.capabilities]}
]
}))
def dataReceived(self, data):
self.logger.info("Received : %s" % repr(data))
self.input_buffer += data
if self.input_buffer.rstrip().endswith("]]>]]>"):
self.process(self.input_buffer.rstrip()[0:-6])
self.input_buffer = ""
def process(self, data):
if not self.been_greeted:
self.logger.info("Client's greeting received")
self.been_greeted = True
return
xml_request_root = remove_namespaces(etree.fromstring(data))
message_id = xml_request_root.get("message-id")
operation = xml_request_root[0]
self.logger.info("Operation requested %s" % repr(operation.tag))
handled = False
operation_name = normalize_operation_name(operation)
for capability in self.capabilities:
if hasattr(capability, operation_name):
try:
self.reply(message_id, getattr(capability, operation_name)(operation))
except NetconfError as e:
self.reply(message_id, error_to_response(e))
handled = True
if not handled:
self.reply(message_id, error_to_response(OperationNotSupported(operation_name)))
def reply(self, message_id, response):
reply = etree.Element("rpc-reply", xmlns=NS_BASE_1_0, nsmap=self.additionnal_namespaces)
reply.attrib["message-id"] = message_id
reply.append(response.etree)
self.say(reply)
|
if response.require_disconnect:
self.logger.info("Disconnecting")
self.transport.loseConnection()
def say(self, etree_root):
self.logger.info("Saying : %s" % repr(etree.tostring(etree_
|
root)))
self.transport.write(etree.tostring(etree_root, pretty_print=True) + "]]>]]>\n")
def error_to_response(error):
error_specs = {
"error-message": error.message
}
if error.type: error_specs["error-type"] = error.type
if error.tag: error_specs["error-tag"] = error.tag
if error.severity: error_specs["error-severity"] = error.severity
if error.info: error_specs["error-info"] = error.info
return Response(dict_2_etree({"rpc-error": error_specs}))
def remove_namespaces(xml_root):
xml_root.tag = unqualify(xml_root.tag)
for child in xml_root:
remove_namespaces(child)
return xml_root
def unqualify(tag):
return re.sub("\{[^\}]*\}", "", tag)
|
klis87/django-cloudinary-storage
|
tests/settings.py
|
Python
|
mit
| 2,246
| 0.000445
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = False
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
SECRET_KEY = 'my-key'
ROOT_URLCONF = 'tests.urls'
INSTALLED_APPS = [
'tests',
'cloudinary_storage',
# 'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
],
},
},
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'cloudinary_storage.storage.StaticHashedCloudinaryStorage'
MEDIA_URL = '/media/'
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage'
CLOUDINARY
|
_STORAGE = {
'CLOUD_NAME': os.getenv('CLOUDINARY_CLOUD_NAME', 'my-cloud-name'),
'API_KEY': os.getenv('CLOUDINARY_API_KEY', 'my-api-key'),
'API_SECRET': os.getenv('CLOUDINARY_API_SECRET', 'my-api-secret')
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'log
|
ging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
|
TGITS/programming-workouts
|
exercism/python/isbn-verifier/isbn_verifier_test.py
|
Python
|
mit
| 1,989
| 0
|
import unittest
from isbn_verifier import is_valid
# Tests adapted from `problem-specifications//canonical-data.json`
class IsbnVerifierTest(unittest.TestCase):
def test_valid_isbn(self):
self.assertIs(is_valid("3-598-21508-8"), True)
def test_invalid_isbn_check_digit(self):
self.assertIs(is_valid("3-598-21508-9"), False)
def test_valid_isbn_with_a_check_digit_of_10(self):
self.assertIs(is_valid("3-598-21507-X"), True)
def test_check_digit_is_a_character_other_than_x(self):
self.assertIs(is_valid("3-598-21507-A"), False)
def test_invalid_character_in_isbn(self):
self.assertIs(is_valid("3-598-P1581-X"), False)
def test_x_is_only_valid_as_a_check_digit(self):
self.assertIs(is_valid("3-598-2X507-9"), False)
def test_valid_isbn_without_separating_dashes(self):
self.assertIs(is_valid("3598215088"), True)
def test_isbn_without_separating_dashes_and_x_as_check_digit(self):
self.assertIs(is_valid("359821507X"), True)
def test_isbn_without_check_digit_and_dashes(self):
self.assertIs(is_valid("359821507"), False)
def test_too_long_isbn_and_no_dashes(self):
self.assertIs(is_valid("3598215078X"), False)
def test_too_short_isbn(self):
self.assertIs(is_valid("00"), False)
def test_isbn_without_check_digit(self):
self.assertIs(is_valid("3-598-21507"), False)
def test_check_digit_of_x_should_not_be_used_for_0(self):
self.assertIs(is_valid("3-598-21515-X"), False)
def test_empty_isbn(self):
self.assertIs(is_valid(""), False)
def test_input_is_9_characters(self):
self.assertIs(is_valid("134456729"), False)
def test_invalid_characters_are_not_ignored(self):
self.assertIs(is_valid("3132P34035"), False)
def test_input_is_too_long_b
|
ut_contain
|
s_a_valid_isbn(self):
self.assertIs(is_valid("98245726788"), False)
if __name__ == "__main__":
unittest.main()
|
ingenioustechie/zamboni
|
mkt/account/tests/test_serializers.py
|
Python
|
bsd-3-clause
| 3,416
| 0
|
from datetime import datetime
import mock
from nose.tools im
|
port eq_
import mkt
import mkt.site.tests
from mkt.account.serializers import (AccountSerializer, AccountInfoSerializer,
TOSSerializer)
from mkt.users.models import UserProfile
class TestAccountSerializer(mkt.site.tests.TestCase):
def setUp(self):
self.account = UserProfile()
def serializer(self):
return AccountSerializer(instance=self.account)
def test_display_
|
name_returns_name(self):
with mock.patch.object(UserProfile, 'name', 'Account name'):
eq_(self.serializer().data['display_name'], 'Account name')
def test_recommendations(self):
# Test default.
eq_(self.serializer().data['enable_recommendations'], True)
self.account.enable_recommendations = False
eq_(self.serializer().data['enable_recommendations'], False)
class TestAccountInfoSerializer(mkt.site.tests.TestCase):
UNKNOWN = mkt.LOGIN_SOURCE_LOOKUP[mkt.LOGIN_SOURCE_UNKNOWN]
FIREFOX_ACCOUNTS = mkt.LOGIN_SOURCE_LOOKUP[mkt.LOGIN_SOURCE_FXA]
PERSONA = mkt.LOGIN_SOURCE_LOOKUP[mkt.LOGIN_SOURCE_BROWSERID]
def setUp(self):
self.account = UserProfile()
self.account.pk = 25
def serializer(self):
return AccountInfoSerializer(instance=self.account)
def test_source_is_a_slug_default(self):
eq_(self.serializer().data['source'], self.PERSONA)
def test_source_is_unknown(self):
self.account.source = mkt.LOGIN_SOURCE_UNKNOWN
eq_(self.serializer().data['source'], self.PERSONA)
def test_source_is_fxa(self):
self.account.source = mkt.LOGIN_SOURCE_FXA
eq_(self.serializer().data['source'], self.FIREFOX_ACCOUNTS)
def test_source_is_invalid(self):
self.account.source = -1
eq_(self.serializer().data['source'], self.PERSONA)
def test_source_is_unrelated(self):
self.account.source = mkt.LOGIN_SOURCE_BROWSERID
eq_(self.serializer().data['source'], self.PERSONA)
def test_account_has_no_pk(self):
self.account.source = mkt.LOGIN_SOURCE_FXA
self.account.pk = None
eq_(self.serializer().data['source'], self.UNKNOWN)
def test_source_is_read_only(self):
serializer = AccountInfoSerializer(
instance=None,
data={'source': mkt.LOGIN_SOURCE_FXA, 'display_name': 'Hey!'},
partial=True)
eq_(serializer.is_valid(), True)
# This works because the model field is `editable=False`.
eq_(serializer.save().source, mkt.LOGIN_SOURCE_UNKNOWN)
def test_not_verified(self):
self.account.is_verified = False
eq_(self.serializer().data['verified'], False)
def test_verified(self):
self.account.is_verified = True
eq_(self.serializer().data['verified'], True)
class TestTOSSerializer(mkt.site.tests.TestCase):
def setUp(self):
self.account = UserProfile()
def serializer(self):
context = {
'request': mkt.site.tests.req_factory_factory('')
}
context['request'].user = self.account
return TOSSerializer(instance=self.account, context=context)
def test_has_signed(self):
eq_(self.serializer().data['has_signed'], False)
self.account.read_dev_agreement = datetime.now()
eq_(self.serializer().data['has_signed'], True)
|
gantzgraf/vape
|
vase/family_filter.py
|
Python
|
gpl-3.0
| 69,896
| 0.000229
|
from .sample_filter import SampleFilter, GtFilter
from .sv_gt_filter import SvGtFilter
import logging
from collections import OrderedDict, defaultdict
class FamilyFilter(object):
'''
Determine whether variants/alleles fit given inheritance
patterns for families.
'''
def __init__(self, ped, vcf, infer_inheritance=True, g2p=None,
check_g2p_consequence=None, force_inheritance=None,
logging_level=logging.WARNING):
'''
Initialize with Family object from ped_file.py and a
VcfReader object from vcf_reader.py. You may also specify an
inheritance pattern (either 'recessive' or 'dominant'). If
inheritance_pattern is not specified an attempt is made to
infer an appropriate inheritance pattern based on the family
structure and affecteds.
Args:
ped: A PedFile object from ped_file.py. Must contain
at least one affected individual.
vcf: A VcfReader object containing data from at least
some of the affected individuals in the given
family.
infer_inheritance:
If True, infer possible inheritance patterns
for each family in the PedFile. Inferred patterns
are stored in self.inheritance_patterns dict
(keys are families, values are lists of
inheritance patterns).
g2p: G2P object from vase.g2p for filtering on
presence and inheritance requirements from a G2P
file.
check_g2p_consequence:
If using a G2P object for gene filtering, also
filter on consequence type as described for each
gene. Note that the mapping of mutation
consequence to consequence type is quite crude
and should be used with caution (see the
mutation_to_csq dict in vase/g2p.py for the
mappings used).
force_inheritance:
Optionally specify an inheritance pattern to
test for each family - either 'dominant' or
'recessive' is allowed. If infer_inheritance is
True, these patterns will be tested in addition
to inferred patterns.
logging_level:
The level at which logging messages are
displayed. Defaults to logging.WARNING
'''
self.logger = self._get_logger(logging_level)
self.affected = tuple(ped.get_affected())
self.unaffected = tuple(ped.get_unaffected())
self.obligate_carriers = dict()
self.ped = ped
self.vcf = vcf
self.g2p = g2p
self.check_g2p_consequence = check_g2p_consequence
if not self.affected:
raise RuntimeError("No affected individuals found in PED file '{}'"
.format(ped.filename))
self.vcf_affected = list(x for x in self.affected
if x in self.vcf.header.samples)
if not self.vcf_affected:
raise RuntimeError("No affected individuals in PED file '{}'"
.format(ped.filename) + " found in VCF " +
"'{}'".format(vcf.filename))
self.vcf_unaffected
|
= list(x for x in self.unaffected
if x in self.
|
vcf.header.samples)
self.vcf_samples = self.vcf_affected + self.vcf_unaffected
self.inheritance_patterns = defaultdict(list)
if infer_inheritance:
self._infer_inheritance()
if force_inheritance:
if force_inheritance not in ('dominant', 'recessive'):
raise RuntimeError("Unrecognised inheritance pattern " +
"specified with 'force_inheritance' " +
"argument. Valid options are 'dominant' " +
"or 'recessive'.")
for fid in self.ped.families:
self.inheritance_patterns[fid].append(force_inheritance)
def _infer_inheritance(self):
'''
Simplistic method for determining likely relevant
inheritance pattern. For affected individuals in a family
a check is made whether parents or grandparents are also
affected. Currently only dominant or recessive inheritance
is inferred, no attempt to infer X-linked or mitochondrial
inheritance is made and it will not spot pseudodominance.
'''
for fid, fam in self.ped.families.items():
n_affected = 0
no_parents = True
both_pars_unaffected = False
dominant = False
denovo = False
recessive = False
self.logger.info("Assessing inheritance pattern of family {}"
.format(fid))
f_aff = tuple(fam.get_affected())
obligate_carriers = set()
if not f_aff:
continue
for iid in f_aff:
self.logger.info("Checking affected individual {}".format(iid))
n_affected += 1
indv = fam.individuals[iid]
if not indv.parents:
self.logger.info("No parents for affected individual {}"
.format(iid))
continue
no_parents = False
p_unaff = 0
for par in indv.parents:
# is parent affected
if par not in fam.individuals:
if par in self.vcf.header.samples:
self.logger.warn("Family '{}' parent '{}' ".format(
fid, par) + "not specified in " +
"PED, but present in VCF - " +
"assuming unaffected")
self.vcf_samples.append(par)
self.vcf_unaffected.append(par)
p_unaff += 1
continue
parent = fam.individuals[par]
par_to_child = False
gpar_to_child = False
if parent.is_affected():
self.logger.info("Apparent vertical transmission " +
"from {} -> {}" .format(par, iid))
par_to_child = True
else:
p_unaff += 1
for gpar in parent.parents:
if fam.individuals[gpar].is_affected():
gpar_to_child = True
msg = "Apparent vertical transmission "
if par_to_child:
msg += ("from {} -> {} -> {}"
.format(gpar, par, iid))
else:
msg += ("with partial penetrance from " +
"{} -> ({}) -> {}"
.format(gpar, par, iid))
obligate_carriers.add(par)
self.logger.info(msg)
if par_to_child or gpar_to_child:
dominant = True
if p_unaff == 2:
both_pars_unaffected = True
if not dominant:
recessive = True
if no_parents or not both_pars_unaffected:
# missing information on one/both parents - could be dominant
dominant = True
if recessive and n_affected == 1 and not no_parents:
f
|
huangshiyu13/RPNplus
|
train.py
|
Python
|
mit
| 12,215
| 0.00393
|
import inspect
import os
import time
import sys
import numpy as np
import tensorflow as tf
import shutil
import data_engine
VGG_MEAN = [103.939, 116.779, 123.68]
image_height = 720
image_width = 960
feature_height = int(np.ceil(image_height / 16.))
feature_width = int(np.ceil(image_width / 16.))
class RPN:
def __init__(self, vgg16_npy_path=None):
if vgg16_npy_path is None:
path = inspect.getfile(Vgg16)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, 'vgg16.npy')
vgg16_npy_path = path
print path
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
print('npy file loaded')
def build(self, rgb, label, label_weight, bbox_target, bbox_loss_weight, learning_rate):
start_time = time.time()
print('build model started')
# Convert RGB to BGR
red, green, blue = tf.split(rgb, 3, 3)
assert red.get_shape().as_list()[1:] == [image_height, image_width, 1]
assert green.get_shape().as_list()[1:] == [image_height, image_width, 1]
assert blue.get_shape().as_list()[1:] == [image_height, image_width, 1]
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
],3)
assert bgr.get_shape().as_list()[1:] == [image_height, image_width, 3]
# Conv layer 1
self.conv1_1 = self.conv_layer_const(bgr, 'conv1_1')
self.conv1_2 = self.conv_layer_const(self.conv1_1, 'conv1_2')
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
# Conv layer 2
self.conv2_1 = self.conv_layer_const(self.pool1, 'conv2_1')
self.conv2_2 = self.conv_layer_const(self.conv2_1, 'conv2_2')
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
# Conv layer 3
self.conv3_1, conv3_1_wd = self.conv_layer(self.pool2, 'conv3_1')
self.conv3_2, conv3_2_wd = self.conv_layer(self.conv3_1, 'conv3_2')
self.conv3_3, conv3_3_wd = self.conv_layer(self.conv3_2, 'conv3_3')
self.weight_dacay = conv3_1_wd + conv3_2_wd + conv3_3_wd
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
# Conv layer 4
self.conv4_1, conv4_1_wd = self.conv_layer(self.pool3, 'conv4_1')
self.conv4_2, conv4_2_wd = self.conv_layer(self.conv4_1, 'conv4_2')
self.conv4_3, conv4_3_wd = self.conv_layer(self.conv4_2, 'conv4_3')
self.weight_dacay += conv4_1_wd + conv4_2_wd + conv4_3_wd
self.pool4 = self.max_pool(self.conv4_3,
|
'pool4')
# Conv layer 5
self.conv5_1, conv5_1_wd = self.conv_layer(self.pool4, 'conv5_1')
self.conv5_2, conv5_2_wd = self.conv_layer(self.conv5_1, 'conv5_2')
self.conv5_3, conv5_3_wd = self.conv_layer(self.conv5_2, 'conv5_3')
self.weight_dacay += conv5_1_wd + conv5_2_wd + conv5_3_wd
# RPN_TEST_6(>=7)
normalization_factor = tf.sqrt(tf.reduce_mean(tf.square(self.conv5_3)))
|
self.gamma3 = tf.Variable(np.sqrt(2), dtype=tf.float32, name='gamma3')
self.gamma4 = tf.Variable(1.0, dtype=tf.float32, name='gamma4')
# Pooling to the same size
self.pool3_p = tf.nn.max_pool(self.pool3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME',
name='pool3_proposal')
# L2 Normalization
self.pool3_p = self.pool3_p / (
tf.sqrt(tf.reduce_mean(tf.square(self.pool3_p))) / normalization_factor) * self.gamma3
self.pool4_p = self.pool4 / (
tf.sqrt(tf.reduce_mean(tf.square(self.pool4))) / normalization_factor) * self.gamma4
# Proposal Convolution
self.conv_proposal_3, conv_proposal_3_wd = self.conv_layer_new(self.pool3_p, 'conv_proposal_3',
kernel_size=[5, 2], out_channel=256, stddev=0.01)
self.relu_proposal_3 = tf.nn.relu(self.conv_proposal_3)
self.conv_proposal_4, conv_proposal_4_wd = self.conv_layer_new(self.pool4_p, 'conv_proposal_4',
kernel_size=[5, 2], out_channel=512, stddev=0.01)
self.relu_proposal_4 = tf.nn.relu(self.conv_proposal_4)
self.conv_proposal_5, conv_proposal_5_wd = self.conv_layer_new(self.conv5_3, 'conv_proposal_5',
kernel_size=[5, 2], out_channel=512, stddev=0.01)
self.relu_proposal_5 = tf.nn.relu(self.conv_proposal_5)
self.weight_dacay += conv_proposal_3_wd + conv_proposal_4_wd + conv_proposal_5_wd
# Concatrate
self.relu_proposal_all = tf.concat( [self.relu_proposal_3, self.relu_proposal_4, self.relu_proposal_5],3)
# RPN_TEST_6(>=7)
self.conv_cls_score, conv_cls_wd = self.conv_layer_new(self.relu_proposal_all, 'conv_cls_score',
kernel_size=[1, 1], out_channel=18, stddev=0.01)
self.conv_bbox_pred, conv_bbox_wd = self.conv_layer_new(self.relu_proposal_all, 'conv_bbox_pred',
kernel_size=[1, 1], out_channel=36, stddev=0.01)
self.weight_dacay += conv_cls_wd + conv_bbox_wd
assert self.conv_cls_score.get_shape().as_list()[1:] == [feature_height, feature_width, 18]
assert self.conv_bbox_pred.get_shape().as_list()[1:] == [feature_height, feature_width, 36]
self.cls_score = tf.reshape(self.conv_cls_score, [-1, 2])
self.bbox_pred = tf.reshape(self.conv_bbox_pred, [-1, 4])
self.prob = tf.nn.softmax(self.cls_score, name="prob")
self.cross_entropy = tf.reduce_sum(
tf.nn.softmax_cross_entropy_with_logits(labels=label,
logits=self.cls_score) * label_weight) / tf.reduce_sum(label_weight)
bbox_error = tf.abs(self.bbox_pred - bbox_target)
bbox_loss = 0.5 * bbox_error * bbox_error * tf.cast(bbox_error < 1, tf.float32) + (bbox_error - 0.5) * tf.cast(
bbox_error >= 1, tf.float32)
self.bb_loss = tf.reduce_sum(
tf.reduce_sum(bbox_loss, reduction_indices=[1]) * bbox_loss_weight) / tf.reduce_sum(bbox_loss_weight)
self.loss = self.cross_entropy + 0.0005 * self.weight_dacay + 0.5 * self.bb_loss
self.train_step = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(self.loss)
self.data_dict = None
print('build model finished: %ds' % (time.time() - start_time))
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
weight_dacay = tf.nn.l2_loss(filt, name='weight_dacay')
return relu, weight_dacay
def conv_layer_const(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter_const(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias_const(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def conv_layer_new(self, bottom, name, kernel_size=[3, 3], out_channel=512, stddev=0.01):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()[-1]
filt = tf.Variable(
tf.random_normal([kernel_size[0], kernel_size[1], shape, out_channel], mean=0.0, stddev=stddev),
name='filter')
conv_biases = tf.Variable(tf.zeros([out_channel]), name='biases')
conv =
|
Cadasta/django-skivvy
|
setup.py
|
Python
|
agpl-3.0
| 3,207
| 0
|
import sys
impor
|
t os
import re
import shutil
from setuptools import setup
name = 'django-skivvy'
package = 'skivvy'
description = ('Write faster integration tests for Django views – with less '
'code.')
url = 'https://github.com/oliverroick/django-skivvy'
author = 'Oliver Roick'
author_email = '[email protected]'
license = 'AGPL'
readme_file = os.path.join(os.path.dirname(__file__), 'README.rst')
with ope
|
n(readme_file, 'r') as f:
long_description = f.readline().strip()
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
shutil.rmtree('dist', ignore_errors=True)
shutil.rmtree('build', ignore_errors=True)
os.system("python setup.py sdist")
os.system("python setup.py bdist_wheel")
os.system("twine upload dist/*")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
long_description=long_description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=[],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Testing :: Mocking',
]
)
|
sidartaoliveira/ansible
|
lib/ansible/modules/system/parted.py
|
Python
|
gpl-3.0
| 22,160
| 0.000632
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Fabrizio Colonna <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- "Fabrizio Colonna (@ColOfAbRiX)"
module: parted
short_description: Configure block device partitions
version_added: "2.3"
description:
- This module allows configuring block device partition using the C(parted)
command line tool. For a full description of the fields and the options
check the GNU parted manual.
notes:
- When fetching information about a new disk and when the version of parted
installed on the system is before version 3.1, the module queries the kernel
through C(/sys/) to obtain disk information. In this case the units CHS and
CYL are not supported.
requirements:
- This module requires parted version 1.8.3 and above.
- If the version of parted is below 3.1, it requires a Linux version running
the sysfs file system C(/sys/).
options:
device:
description: The block device (disk) where to operate.
required: True
align:
description: Set alignment for newly created partitions.
choices: ['none', 'cylinder', 'minimal', 'optimal']
default: optimal
number:
description:
- The number of the partition to work with or the number of the partition
that will be created. Required when performing any action on the disk,
except fetching information.
unit:
description:
- Selects the current default unit that Parted will use to display
locations and capacities on the disk and to interpret those given by the
user if they are not suffixed by an unit. When fetching information about
a disk, it is always recommended to specify a unit.
choices: [
's', 'B', 'KB', 'KiB', 'MB', 'MiB', 'GB', 'GiB', 'TB', 'TiB', '%', 'cyl',
'chs', 'compact'
]
default: KiB
label:
description: Creates a new disk label.
choices: [
'aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98',
'sun'
]
default: msdos
part_type:
description:
- Is one of 'primary', 'extended' or 'logical' and may be specified only
with 'msdos' or 'dvh' partition tables. A name must be specified for a
'gpt' partition table. Neither part-type nor name may be used with a
'sun' partition table.
choices: ['primary', 'extended', 'logical']
default: primary
part_start:
description:
- Where the partition will start as offset from the beginning of the disk,
that is, the "distance" from the start of the disk. The distance can be
specified with all the units supported by parted (except compat) and
it is case sensitive. E.g. C(10GiB), C(15%).
default: 0%
part_end :
description:
- Where the partition will end as offset from the beginning of the disk,
that is, the "distance" from the start of the disk. The distance can be
specified with all the units supported by parted (except compat) and
it is case sensitive. E.g. C(10GiB), C(15%).
default: 100%
name:
description:
- Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
flags:
description: A list of the flags that has to be set on the partition.
state:
description:
- If to create or delete a partition. If set to C(info) the module will
only return the device information.
choices: ['present', 'absent', 'info']
default: info
'''
RETURN = '''
partition_info:
description: Current partition information
returned: success
type: complex
contains:
device:
description: Generic device information.
type: dict
p
|
artitions:
description: List of device partitions.
type: list
sample: >
{
"disk": {
"dev": "/dev/sdb",
"logical_block": 512,
"model": "VMware Virtual disk",
"physical_block": 512,
"size": 5.0,
"table": "msdos",
"unit": "gib"
},
"partitions": [{
"begin": 0.0,
"end": 1.0,
"flags": ["boot", "lvm"],
"
|
fstype": "",
"name": "",
"num": 1,
"size": 1.0
}, {
"begin": 1.0,
"end": 5.0,
"flags": [],
"fstype": "",
"name": "",
"num": 2,
"size": 4.0
}]
}
'''
EXAMPLES = """
# Create a new primary partition
- parted:
device: /dev/sdb
number: 1
state: present
# Remove partition number 1
- parted:
device: /dev/sdb
number: 1
state: absent
# Create a new primary partition with a size of 1GiB
- parted:
device: /dev/sdb
number: 1
state: present
part_end: 1GiB
# Create a new primary partition for LVM
- parted:
device: /dev/sdb
number: 2
flags: [ lvm ]
state: present
part_start: 1GiB
# Read device information (always use unit when probing)
- parted: device=/dev/sdb unit=MiB
register: sdb_info
# Remove all partitions from disk
- parted:
device: /dev/sdb
number: "{{ item.num }}"
state: absent
with_items:
- "{{ sdb_info.partitions }}"
"""
from ansible.module_utils.basic import AnsibleModule
import math
import re
import os
# Reference prefixes (International System of Units and IEC)
units_si = ['B', 'KB', 'MB', 'GB', 'TB']
units_iec = ['B', 'KiB', 'MiB', 'GiB', 'TiB']
parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact']
def parse_unit(size_str, unit=''):
"""
Parses a string containing a size of information
"""
matches = re.search(r'^([\d.]+)([\w%]+)?$', size_str)
if matches is None:
# "<cylinder>,<head>,<sector>" format
matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str)
if matches is None:
module.fail_json(
msg="Error interpreting parted size output: '%s'" % size_str
)
size = {
'cylinder': int(matches.group(1)),
'head': int(matches.group(2)),
'sector': int(matches.group(3))
}
unit = 'chs'
else:
# Normal format: "<number>[<unit>]"
if matches.group(2) is not None:
unit = matches.group(2)
size = float(matches.group(1))
return size, unit
def parse_partition_info(parted_output, unit):
"""
Parses the output of parted and transforms the data into
a dictionary.
Parted Machine Parseable Output:
See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00
0573.html
- All lines end with a semicolon (;)
- The first line indicates the units in which the output is expressed.
CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively.
- The second line is made of disk information in the following format:
"path":"size":"transport-type":"logical-sector-size":"physical-sector-siz
e":"partition-table-type":"model-name";
- If the first line was either CYL or CHS, the next line will contain
information on no. of cylinders, heads, sectors and cylinder size.
- Partition information begins from the next line. This is of the format:
(for BYT)
"number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s
et";
(for CHS/CYL)
"number":"begin":"end":"filesystem-type":"partition-name":"flags-set";
"""
lines
|
olavurmortensen/gensim
|
gensim/similarities/index.py
|
Python
|
lgpl-2.1
| 3,188
| 0.002509
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import os
from smart_open import smart_open
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
from gensim.models.doc2vec import Doc2Vec
from gensim.models.word2vec import Word2Vec
try:
from annoy import AnnoyIndex
except ImportError:
raise ImportError("Annoy has not been installed, if you wish to use the annoy indexer, please run `pip install annoy`")
class AnnoyIndexer(object):
def __init__(self, model=None, num_trees=None):
self.index = None
self.labels = None
self.model = model
self.num_trees = num_trees
if model and num_trees:
if isinstance(self.model, Doc2Vec):
self.build_from_doc2vec()
elif isinstance(self.model, Word2Vec):
self.build_from_word2vec()
else:
raise ValueError("Only a Word2Vec or Doc2Vec instance can be used")
def save(self, fname, protocol=2):
fname_dict = fname + '.d'
self.index.save(fname)
d = {'f': self.model.vector_size, 'num_trees': self.num_trees, 'labels': self.labels}
with smart_open(fname_dict, 'wb') as fout:
_pickle.dump(d, fout, protocol=protocol)
def load(self, fname):
fname_dict = fname+'.d'
if not (os.path.exists(fname) and os.path.exists(fname_dict)):
raise IOError(
"Can't find index files '%s' and '%s' - Unable to restore AnnoyIndexer state." % (fname, fname_dict))
else:
with smart_open(fname_dict) as f:
d = _pickle.loads(f.read())
self.num_trees = d['num_trees']
self.index = AnnoyIndex(d['f'])
self.index.load(fname)
self.labels = d['labels']
def build_from_word2vec(self):
"""Build an Annoy index using word vectors from a Word2Vec model"""
self.model.init_sims()
return self._build_from_model(self.model.wv.syn0norm, self.model.index2word
, self.model.ve
|
ctor_size)
def build_from_doc2vec(self):
"""Build an Annoy index using document vectors from a Doc2Vec model"""
docvecs = self.model.docvecs
doc
|
vecs.init_sims()
labels = [docvecs.index_to_doctag(i) for i in range(0, docvecs.count)]
return self._build_from_model(docvecs.doctag_syn0norm, labels, self.model.vector_size)
def _build_from_model(self, vectors, labels, num_features):
index = AnnoyIndex(num_features)
for vector_num, vector in enumerate(vectors):
index.add_item(vector_num, vector)
index.build(self.num_trees)
self.index = index
self.labels = labels
def most_similar(self, vector, num_neighbors):
"""Find the top-N most similar items"""
ids, distances = self.index.get_nns_by_vector(
vector, num_neighbors, include_distances=True)
return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))]
|
Zulfikarlatief/tealinux-software-center
|
src/updatePage.py
|
Python
|
gpl-3.0
| 6,849
| 0.007446
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Deepin, Inc.
# 2011 Wang Yong
# 2012 Reza Faiz A
#
# Author: Wang Yong <[email protected]>
# Maintainer: Wang Yong <[email protected]>
# Reza Faiz A <[email protected]>
# Remixed : Reza Faiz A <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from appItem import *
from draw import *
from lang import __, getDefaultLanguage
import gtk
import updateView
import utils
class UpdatePage(object):
'''Interface for update page.'''
def __init__(self, repoCache, switchStatus, downloadQueue, entryDetailCallback,
sendVoteCallback, fetchVoteCallback, upgradeSelectedPkgsCallback,
addIgnorePkgCallback, showIgnorePageCallback):
'''Init for update page.'''
# Init.
self.repoCache = repoCache
self.box = gtk.VBox()
self.updateView = updateView.UpdateView(
repoCache,
switchStatus,
downloadQueue,
entryDetailCallback,
sendVoteCallback,
fetchVoteCallback,
addIgnorePkgCallback,
)
self.topbar = Topbar(self.repoCache,
|
self.updateView.selectAllPkg,
self.updateView.unselectAllPkg,
self.updateView.getSelectList,
upgradeSelectedPkgsCallback,
showIgnorePageCallback)
# Connect components.
|
self.box.pack_start(self.topbar.eventbox, False, False)
self.box.pack_start(self.updateView.scrolledwindow)
self.box.show_all()
class Topbar(object):
'''Top bar.'''
def __init__(self, repoCache,
selectAllPkgCallback, unselectAllPkgCallback,
getSelectListCallback, upgradeSelectedPkgsCallback,
showIgnorePageCallback):
'''Init for top bar.'''
# Init.
self.repoCache = repoCache
self.paddingX = 5
self.selectAllPkgCallback = selectAllPkgCallback
self.unselectAllPkgCallback = unselectAllPkgCallback
self.showIgnorePageCallback = showIgnorePageCallback
self.box = gtk.HBox()
self.boxAlign = gtk.Alignment()
self.boxAlign.set(0.0, 0.5, 1.0, 1.0)
self.boxAlign.set_padding(0, 0, TOPBAR_PADDING_LEFT, TOPBAR_PADDING_UPDATE_RIGHT)
self.boxAlign.add(self.box)
self.eventbox = gtk.EventBox()
drawTopbar(self.eventbox)
upgradeBox = gtk.HBox()
upgradeAlign = gtk.Alignment()
upgradeAlign.set(1.0, 0.0, 0.0, 1.0)
upgradeAlign.add(upgradeBox)
self.numLabel = gtk.Label()
self.ignoreNumBox = gtk.HBox()
self.ignoreNumAlign = gtk.Alignment()
self.ignoreNumAlign.set(0.0, 0.5, 0.0, 0.0)
self.ignoreNumAlign.add(self.ignoreNumBox)
self.selectAllId = "selectAll"
self.unselectAllId = "unselectAll"
self.labelId = self.selectAllId
(self.selectAllBox, self.selectAllEventBox) = setDefaultRadioButton(
__("Select All"), self.selectAllId, self.setLabelId, self.getLabelId, self.selectAllPkgStatus
)
upgradeBox.pack_start(self.selectAllBox, False, False, self.paddingX)
(self.unselectAllBox, self.unselectAllEventBox) = setDefaultRadioButton(
__("Unselect All"), self.unselectAllId, self.setLabelId, self.getLabelId, self.unselectAllPkgStatus
)
upgradeBox.pack_start(self.unselectAllBox, False, False, self.paddingX)
(self.upgradeButton, upgradeButtonAlign) = newActionButton(
"search", 0.0, 0.5, "cell", False, __("Action Update"), BUTTON_FONT_SIZE_MEDIUM, "bigButtonFont")
upgradeBox.pack_start(upgradeButtonAlign, False, False, 26)
self.upgradeButton.connect("button-press-event", lambda w, e: upgradeSelectedPkgsCallback(getSelectListCallback()))
# Connect.
self.updateNum(self.repoCache.getUpgradableNum())
self.numLabel.set_alignment(0.0, 0.5)
self.box.pack_start(self.numLabel, False, False, self.paddingX)
self.box.pack_start(self.ignoreNumAlign, True, True, self.paddingX)
self.box.pack_start(upgradeAlign, True, True, self.paddingX)
self.eventbox.add(self.boxAlign)
self.updateIgnoreNum(self.repoCache.getIgnoreNum())
def selectAllPkgStatus(self):
'''Select all pkg status.'''
self.selectAllEventBox.queue_draw()
self.unselectAllEventBox.queue_draw()
self.selectAllPkgCallback()
def unselectAllPkgStatus(self):
'''Select all pkg status.'''
self.selectAllEventBox.queue_draw()
self.unselectAllEventBox.queue_draw()
self.unselectAllPkgCallback()
def setLabelId(self, lId):
'''Set label id.'''
self.labelId = lId
def getLabelId(self):
'''Get label id.'''
return self.labelId
def updateIgnoreNum(self, ignoreNum):
'''Update ignore number label.'''
utils.containerRemoveAll(self.ignoreNumBox)
if ignoreNum > 0:
(ignoreLabel, ignoreEventBox) = setDefaultClickableDynamicLabel(
__("No Notify UpdatePage") % (ignoreNum),
"topbarButton",
)
ignoreEventBox.connect("button-press-event", lambda w, e: self.showIgnorePageCallback())
self.ignoreNumBox.add(ignoreEventBox)
self.ignoreNumBox.show_all()
def updateNum(self, upgradeNum):
'''Update number.'''
if upgradeNum == 0:
markup = ""
else:
markup = (__("Topbar UpdatePage") % (LABEL_FONT_SIZE,
appTheme.getDynamicColor("topbarNum").getColor(),
LABEL_FONT_SIZE,
str(upgradeNum),
LABEL_FONT_SIZE))
self.numLabel.set_markup(markup)
# LocalWords: efe
|
eukaryote/knowhow
|
tests/conftest.py
|
Python
|
mit
| 1,892
| 0
|
# coding=utf8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import os
from os.path import join
import tempfile
import shutil
from six.moves import configparser
import pytest
from tests import setenv, test_doc0
from knowhow.index import Index
import knowhow.util as util
@pytest.fixture
def tmpd(request):
tempdir = tempfile.mkdtemp()
request.addfinalizer(lambda: shutil.rmtree(tempdir))
return tempdir
@pytest.fixture
def conf():
try:
c = configparser.SafeConfigParser()
except AttributeError:
c = configparser.ConfigParser()
c.add_section("main")
c.set("main", "data", util.decode("/app/data"))
return c
@pytest.fixture
def conf_path(conf, tmpd):
path = join(tmpd, "knowhow.ini")
with open(path, "w") as f:
|
conf.write(f)
return path
@pytest.fixture
def tmp_app_index_dir_paths(tmpd):
app_dir = join(tmpd, "app")
index_dir = join(tmpd, "index")
return tmpd, app_dir, index_dir
@pytest.fixture
def tmp_app_index_dirs(tmp_app_index_dir_paths):
tmpd, appd, indexd = tmp_app_index_dir_paths
os.mkdir(appd)
os.mkdir(indexd)
|
return tmpd, appd, indexd
@pytest.fixture
def index_empty(request, tmp_app_index_dirs):
_, app_dir, index_dir = tmp_app_index_dirs
orig_home = os.environ.get("KNOWHOW_HOME")
orig_data = os.environ.get("KNOWHOW_DATA")
def restore():
setenv("KNOWHOW_HOME", orig_home)
setenv("KNOWHOW_DATA", orig_data)
request.addfinalizer(restore)
os.environ["KNOWHOW_HOME"] = app_dir
os.environ["KNOWHOW_DATA"] = index_dir
index = Index(app_dir=app_dir, index_dir=index_dir)
index.open(clear=True)
return index
@pytest.fixture
def index_one(index_empty):
index_empty.add(**test_doc0)
return index_empty
|
eryxlee/scrapy
|
sexy/sexy/items.py
|
Python
|
gpl-2.0
| 358
| 0.002793
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topi
|
cs/items.html
import scrapy
class SexyItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
d
|
irname = scrapy.Field()
file_urls = scrapy.Field()
files = scrapy.Field()
|
certik/sympy-oldcore
|
sympy/printing/printer.py
|
Python
|
bsd-3-clause
| 847
| 0
|
class Printer(object):
"""
"""
def __init__(self):
|
self._depth = -1
self._str = str
self.e
|
mptyPrinter = str
def doprint(self, expr):
"""Returns the pretty representation for expr (as a string)"""
return self._str(self._print(expr))
def _print(self, expr):
self._depth += 1
# See if the class of expr is known, or if one of its super
# classes is known, and use that pretty function
res = None
for cls in expr.__class__.__mro__:
if hasattr(self, '_print_'+cls.__name__):
res = getattr(self, '_print_'+cls.__name__)(expr)
break
# Unknown object, just use its string representation
if res is None:
res = self.emptyPrinter(expr)
self._depth -= 1
return res
|
BirkbeckCTP/janeway
|
src/repository/migrations/0020_vq_title_abstracts.py
|
Python
|
agpl-3.0
| 693
| 0.001443
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-11-02 10:04
from __future__ import unicode_literals
from django.db import migrations
def update_version_queues(apps, schema_editor):
VersionQueue = apps.get_model('repository', 'VersionQueue')
|
for queue in VersionQueue.objects.all():
queue.title = queue.preprint.title
queue.abstract = queue.preprint.abstract
queue.save()
class Migration(migrations.Migration):
dependencies = [
('repository', '0019_auto_20201030_1423'),
]
operations = [
migrations.RunPython(
update_version_queues,
|
reverse_code=migrations.RunPython.noop,
)
]
|
coreboot/chrome-ec
|
zephyr/test/ec_app/BUILD.py
|
Python
|
bsd-3-clause
| 195
| 0
|
#
|
Copyright 2021 The Chromium OS Authors. All rights reserved.
# Use of this source
|
code is governed by a BSD-style license that can be
# found in the LICENSE file.
register_host_test("ec_app")
|
zenoss/ZenPacks.community.SquidMon
|
setup.py
|
Python
|
gpl-2.0
| 2,623
| 0.012962
|
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.community.SquidMon"
VERSION = "1.0"
AUTHOR = "Josh Baird"
LICENSE = "GPLv2"
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.community']
PACKAGES = ['ZenPacks', 'ZenPacks.community', 'ZenPacks.community.SquidMon']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=2.4'
PREV_ZENPACK_NAME = ""
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack
|
edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of th
|
is ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# The MANIFEST.in file is the recommended way of including additional files
# in your ZenPack. package_data is another.
#package_data = {}
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
|
bradchristensen/cherrymusic
|
tinytag/__init__.py
|
Python
|
gpl-3.0
| 194
| 0.005155
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from .tinytag import TinyTag, StringWalker, ID3, Ogg, Wave, Flac
__versi
|
on__ = '0.9.1'
if __name__ == '__main__':
print(TinyT
|
ag.get(sys.argv[1]))
|
monk-ee/AWSBillingToDynamoDB
|
tests/__init__.py
|
Python
|
gpl-2.0
| 623
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015
|
Monk-ee ([email protected]).
#
"""__init__.py: Init for unit testing this module."""
__author__ = "monkee"
__maintainer__ = "monk-ee"
__email__ = "[email protected]"
__status__ = "Development
|
"
import unittest
from PuppetDBClientTestCaseV2 import PuppetDBClientTestCaseV2
from PuppetDBClientTestCaseV3 import PuppetDBClientTestCaseV3
def all_tests():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PuppetDBClientTestCaseV2))
suite.addTest(unittest.makeSuite(PuppetDBClientTestCaseV3))
return suite
|
yannrouillard/weboob
|
modules/voyagessncf/pages.py
|
Python
|
agpl-3.0
| 4,591
| 0.003921
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
from decimal import Decimal
from datetime import time, datetime, timedelta
from weboob.tools.browser import BasePage
from weboob.tools.json import json
from weboob.tools.mech import ClientForm
from weboob.capabilities.base import UserError, Currency
__all__ = ['CitiesPage', 'SearchPage', 'SearchErrorPage', 'SearchInProgressPage',
'ResultsPage', 'ForeignPage']
class ForeignPage(BasePage):
def on_loaded(self):
raise UserError('Your IP address is localized in a country not supported by this module (%s). Currently only the French website is supported.' % self.group_dict['country'])
class CitiesPage(BasePage):
def get_stations(self):
result = json.loads(self.document[self.document.find('{'):-2])
return result['CITIES']
class SearchPage(BasePage):
def search(self, departure, arrival, date, age, card, comfort_class):
self.browser.select_form(name='saisie')
self.browser['ORIGIN_CITY'] = departure.encode(self.browser.ENCODING)
self.browser['DESTINATION_CITY'] = arrival.encode(self.browser.ENCODING)
if date is None:
date = datetime.now() + timedelta(hours=1)
elif date < datetime.now():
raise UserError("You cannot look for older departures")
self.browser['OUTWARD_DATE'] = date.strftime('%d/%m/%y')
self.browser['OUTWARD_TIME'] = [str(date.hour)]
self.browser['PASSENGER_1'] = [age]
self.browser['PASSENGER_1_CARD'] = [card]
self.browser['COMFORT_CLASS'] = [str(comfort_class)]
self.browser.controls.append(ClientForm.TextControl('text', 'nbAnimalsForTravel', {'value': ''}))
self.browser['nbAnimalsForTravel'] = '0'
self.browser.submit()
class SearchErrorPage(BasePage):
def on_loaded(self):
p = self.document.getroot().cssselect('div.messagesError p')
if len(p) > 0:
message = p[0].text.strip()
raise UserError(message)
class SearchInProgressPage(BasePage):
def on_loaded(self):
link = self.document.xpath('//a[@id="url_redirect_proposals"]')[0]
self.browser.location(link.attrib['href'])
class ResultsPage(BasePage):
def get_value(self, div, name, last=Fals
|
e):
i = -1 i
|
f last else 0
p = div.cssselect(name)[i]
sub = p.find('p')
if sub is not None:
txt = sub.tail.strip()
if txt == '':
p.remove(sub)
else:
return unicode(txt)
return unicode(self.parser.tocleanstring(p))
def parse_hour(self, div, name, last=False):
txt = self.get_value(div, name, last)
hour, minute = map(int, txt.split('h'))
return time(hour, minute)
def iter_results(self):
for div in self.document.getroot().cssselect('div.train_info'):
info = None
price = None
currency = None
for td in div.cssselect('td.price'):
txt = self.parser.tocleanstring(td)
p = Decimal(re.sub('([^\d\.]+)', '', txt))
if price is None or p < price:
info = list(div.cssselect('strong.price_label')[0].itertext())[-1].strip().strip(':')
price = p
currency = Currency.get_currency(txt)
yield {'type': self.get_value(div, 'div.transporteur-txt'),
'time': self.parse_hour(div, 'div.departure div.hour'),
'departure': self.get_value(div, 'div.departure div.station'),
'arrival': self.get_value(div, 'div.arrival div.station', last=True),
'arrival_time': self.parse_hour(div, 'div.arrival div.hour', last=True),
'price': price,
'currency': currency,
'price_info': info,
}
|
altvod/pymander
|
examples/simple.py
|
Python
|
mit
| 1,893
| 0.002113
|
from pymander.exceptions import CantParseLine
from pymander.handlers import LineHandler, RegexLineHandler, ArgparseLineHandler
from pymander.contexts import StandardPrompt
from pymander.commander import Commander
from pymander.decorators import bind_command
class DeeperLineHandler(LineHandler):
def try_execute(self, line):
if line.strip() == 'deeper':
deeper_context = self.context.clone()
deeper_context.name = '{0} / ctx {1}'.format(self.context.name, id(deeper_context))
self.context.write('Going deeper!\nNow in: {0}\n'.format(deeper_context))
return deeper_context
raise CantParseLine(line)
class Raynor
|
LineHandler(LineHandler):
def try_execute(self, line):
if line.strip() == 'kerrigan':
self.context.write('Oh, Sarah...\n')
return
raise CantParseLine(line)
class BerryLineHandler(RegexLineHandler):
@bind_command(r'pick a (?P<berry_kind>\w+)')
def pick_berry(self, berry_kind):
self.context.write('Picked a {0}\n'.format(berry_kind))
@bind_command(r'make (?P<berry_kind>\w+) jam')
def make_jam(self
|
, berry_kind):
self.context.write('Made some {0} jam\n'.format(berry_kind))
class GameLineHandler(ArgparseLineHandler):
@bind_command('play', [
['game', {'type': str, 'default': 'nothing'}],
['--well', {'action': 'store_true'}],
])
def play(self, game, well):
self.context.write('I play {0}{1}\n'.format(game, ' very well' if well else ''))
@bind_command('win')
def win(self):
self.context.write('I just won!\n')
def main():
com = Commander(
StandardPrompt([
DeeperLineHandler(),
BerryLineHandler(),
GameLineHandler(),
RaynorLineHandler(),
])
)
com.mainloop()
if __name__ == '__main__':
main()
|
chepe4pi/sokoban_api
|
sokoban/urls.py
|
Python
|
gpl-2.0
| 2,156
| 0.00603
|
from django.conf.urls import include, url
from django.contrib import admin
from rest_framework.routers import DefaultRouter
from sk_map.api.map import MapViewSet, WallViewSet, BoxViewSet, PointViewSet, MenViewSet,\
WallListViewSet, BoxListViewSet, PointListViewSet, MenListViewSet, MapListViewSet
from sk_auth.api.auth import RegisterView, AuthAPIView
from sk_game.api.game import GameViewSet
from sk_skins.api.skins import SkinView
action = {'get': 'retrieve', 'put': 'update', 'delete': 'destroy'}
action_with_patch = {'ge
|
t': 'retrieve', 'put': 'update', 'delete': 'destroy', 'patch': 'partial_update'}
action_no_pk = {'get': 'list', 'post': 'create'}
router = DefaultRouter()
router.register(r'skins', SkinView)
router.register(r'auth/register', RegisterView)
urlpatterns = router.urls
urlpa
|
tterns_game = [
url('^game/(?P<map>\d+)/$', GameViewSet.as_view({'get': 'retrieve', 'patch': 'partial_update'})),
url('^game/$', GameViewSet.as_view({'get': 'retrieve', 'put': 'update', 'delete': 'destroy', 'post': 'create'})),
]
urlpatterns_map = {
url('^map/(?P<pk>\d+)/$', MapViewSet.as_view(action_with_patch)),
url('^map/$', MapListViewSet.as_view(action_no_pk)),
}
urlpatterns_map_obj = [
url('^wall/(?P<pk>\d+)/$', WallViewSet.as_view(action)),
url('^wall/$', WallListViewSet.as_view(action_no_pk)),
url('^box/(?P<pk>\d+)/$', BoxViewSet.as_view(action)),
url('^box/$', BoxListViewSet.as_view(action_no_pk)),
url('^point/(?P<pk>\d+)/$', PointViewSet.as_view(action)),
url('^point/$', PointListViewSet.as_view(action_no_pk)),
url('^men/(?P<pk>\d+)/$', MenViewSet.as_view(action)),
url('^men/$', MenListViewSet.as_view(action_no_pk)),
]
urlpatterns_admin =[
url(r'^admin/', include(admin.site.urls)),
]
urlpatterns_auth = [
url(r'^auth/', AuthAPIView.as_view(), name='login_view')
]
patterns_swagger = [
url(r'^docs/', include('rest_framework_swagger.urls')),
]
urlpatterns += urlpatterns_admin
urlpatterns += urlpatterns_auth
urlpatterns += patterns_swagger
urlpatterns += urlpatterns_map_obj
urlpatterns += urlpatterns_game
urlpatterns += urlpatterns_map
|
dahaic/outerspace
|
server/lib/ige/ospace/Rules/__init__.py
|
Python
|
gpl-2.0
| 11,626
| 0.027697
|
#
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import math
import ige.ospace.Const as Const
from ige.IDataHolder import makeIDataHolder
from Techs import noop as techDefaultHandler
def init(configDir):
global techs, Tech
import Techs
Techs.init(configDir)
from Techs import techs, Tech
## General
turnsPerDay = 24
galaxyStartDelay = turnsPerDay * 2
playerTimeout = 60 * 60 * 24 * 28 # 28 days
novicePlayerTimeout = 60 * 60 * 24 * 14 # 14 days
messageTimeout = 60 * 60 * 24 * 14 # 14 days
## New player
startingPopulation = 9000
startingBio = 1000
startingMin = 1000
startingEn = 1000
startingScannerPwr = 100
## Production
maxProdQueueLen = 10
buildOnSamePlanetMod = 1
buildOnAnotherPlanetMod = 2
unusedProdMod = 0.75
# structure economy revamp constants
basePlanetProdProd = 5 # prevents deadlocked planets, makes small planets more competitive
structDefaultHpRatio = 0.1 # structures are build with this percentage of HPs
structDefaultCpCosts = 0.2 # structures costs this amount of what is in XMLs
structFromShipHpRatio = 1.0 # structures from ships are build with this percentage of HPs
structNewPlayerHpRatio = 1.0 # structures from ships are build with this percentage of HPs
structTransferWaste = 0.5 # when replacing building, how much CP of old building is transfered to new one
structTransferMaxRatio = 0.5 # when replacing building, what is maximum effect of transfered CPs
# as we now build structures damaged, repair and decay are part of economy revamp
# repair
|
ratio is dynamic on cost of building. it's full of magic constants
# goal is to have 480 CP building to repair in ~2 days (which is twice the legacy repair
# ratio), and the most expansive ones (adv. stargate) ~ 6 days.
# We are usi
|
ng log10() as it's quicker than log()
_magicBase = 1.0 / (turnsPerDay * 2)
_repairMagicBase = math.log10(480 * structDefaultCpCosts) ** 2 * _magicBase
repairRatioFunc = lambda x: _repairMagicBase / math.log10(x) ** 2
# building decay ratio bigger or equivalent of 480 CP repair
decayRatioFunc = lambda x: min( _magicBase, repairRatioFunc(x))
decayProdQueue = 0.02
## Environment
envInterval = 1000
envAutoMod = 10.0
envMax = 200
envSelfUpgradeChance = {"H": 5, "C": 1, "B": 500, "m": 100, "r": 100, "p": 100, "e": 100} # in ten thousandths (10 000)
planetSpec = {}
planetSpec[u'A'] = makeIDataHolder(
minBio = 0,
maxBio = 0,
upgradeTo = None,
downgradeTo = None,
)
planetSpec[u'G'] = makeIDataHolder(
minBio = 0,
maxBio = 0,
upgradeTo = None,
downgradeTo = None,
)
planetSpec[u'C'] = makeIDataHolder(
minBio = 0,
maxBio = 6,
upgradeTo = u'D',
upgradeEnReqs = (5, 180),
downgradeTo = None,
)
planetSpec[u'R'] = makeIDataHolder(
minBio = 0,
maxBio = 6,
upgradeTo = u'D',
upgradeEnReqs = (5, 180),
downgradeTo = None,
)
planetSpec[u'D'] = makeIDataHolder(
minBio = 6,
maxBio = 12,
upgradeTo = u'H',
upgradeEnReqs = (25, 150),
downgradeTo = u'R',
)
planetSpec[u'H'] = makeIDataHolder(
minBio = 12,
maxBio = 25,
upgradeTo = u'M',
upgradeEnReqs = (50, 125),
downgradeTo = u'D',
)
planetSpec[u'M'] = makeIDataHolder(
minBio = 25,
maxBio = 75,
upgradeTo = u'E',
upgradeEnReqs = (50, 100),
downgradeTo = u'H',
)
planetSpec[u'E'] = makeIDataHolder(
minBio = 75,
maxBio = 125,
upgradeTo = u"I",
upgradeEnReqs = (50, 100),
downgradeTo = u'M',
)
planetSpec[u"I"] = makeIDataHolder( # gaia
minBio = 125,
maxBio = 200,
upgradeTo = None,
downgradeTo = u"E",
)
## New colony settings
colonyMinBio = 600
colonyMinMin = 600
colonyMinEn = 600
## Storage
popPerSlot = 0
bioPerSlot = 0
minPerSlot = 0
enPerSlot = 0
popBaseStor = 4800
bioBaseStor = 4800
minBaseStor = 4800
enBaseStor = 4800
autoMinStorTurns = 2
tlPopReserve = 100
## Resources
stratResRate = turnsPerDay * 6
stratResAmountBig = 10
stratResAmountSmall = 1
## Population
popGrowthRate = 0.02
popMinGrowthRate = int(5000 * popGrowthRate) # Increase the Minimum Population Growth from 20 to 100 per turn
popDieRate = 0.1
popMinDieRate = 100
popKillMod = 0.25
popSlotKillMod = 5 # how many people per 1 DMG get killed when slot is hit
popSlotHP = 100 # HP of habitable structures on slot (where people live)
## Research
maxRsrchQueueLen = 10
techBaseImprovement = 1
techMaxImprovement = 5
techImprCostMod = {1:480, 2:480, 3:720, 4:960, 5:1200, 6: 1440, 7: 1680} #per level
sciPtsPerCitizen = {1: 0, 2: 0.00075, 3: 0.00150, 4: 0.00175, 5: 0.00200, 6: 0.002125, 7: 0.00225, 99: 0} #per level
techImprEff = {1:0.750, 2:0.875, 3:1.000, 4:1.125, 5:1.250} #per sublevel
#maxSciPtsTL = {1:100, 2:200, 3:300, 4:400, 5:500, 6:600, 7:700}
#sciPtsStepFraction = 0.25
## Scanner
maxSignature = 100
scannerMinPwr = 1
scannerMaxPwr = 150
level1InfoScanPwr = 1000
level2InfoScanPwr = 1200
level3InfoScanPwr = 1400
level4InfoScanPwr = 1600
maxScanPwr = 200000
mapForgetScanPwr = 0.94
partnerScanPwr = 300000
## Fleets
maxCmdQueueLen = 10
signatureBase = 1.10
operProdRatio = 0.001
combatRetreatWait = 3
starGateDamage = 0.2 # damage for 100% speed boost (double for 200%, etc...)
shipDecayRatio = 0.04
maxDamageAbsorb = 5 # max absorbed damage for tech "damageAbsorb" property.
# max seq_mod equipments of equipType; anything not in list is unlimited
maxEquipType = {
'ECM' : 1, # +Missile DEF
'Combat Bonuses' : 1, # +%ATT, +%DEF
'Combat Modifiers' : 1, # +ATT, +DEF
'Shields' : 1, # not hardshields
'Stealth' : 1,
'Auto Repair' : 1,
}
## Buildings
plShieldRegen = 0.05 #regen rate of planetary shield
## Diplomacy
baseRelationChange = -5
relLostWhenAttacked = -1000000
defaultRelation = Const.REL_NEUTRAL
contactTimeout = 6 * turnsPerDay
voteForImpAnnounceOffset = 2 * turnsPerDay
voteForImpPeriod = 6 * turnsPerDay
ratioNeededForImp = 0.6666
pactDescrs = {}
pactDescrs[Const.PACT_ALLOW_CIVILIAN_SHIPS] = makeIDataHolder(
targetRel = 500,
relChng = 10,
validityInterval = (0, 10000),
)
pactDescrs[Const.PACT_ALLOW_MILITARY_SHIPS] = makeIDataHolder(
targetRel = 750,
relChng = 8,
validityInterval = (0, 10000),
)
pactDescrs[Const.PACT_ALLOW_TANKING] = makeIDataHolder(
targetRel = 750,
relChng = 7,
validityInterval = (0, 10000),
)
pactDescrs[Const.PACT_MINOR_CP_COOP] = makeIDataHolder(
targetRel = 1000,
relChng = 6,
effectivity = 0.05,
validityInterval = (625, 10000),
)
pactDescrs[Const.PACT_MAJOR_CP_COOP] = makeIDataHolder(
targetRel = 1000,
relChng = 1,
effectivity = 0.05,
validityInterval = (875, 10000),
)
pactDescrs[Const.PACT_SHARE_SCANNER] = makeIDataHolder(
targetRel = 1000,
relChng = 1,
validityInterval = (625, 10000),
)
pactDescrs[Const.PACT_MINOR_SCI_COOP] = makeIDataHolder(
targetRel = 750,
relChng = 1,
effectivity = 0.05,
validityInterval = (625, 10000),
)
pactDescrs[Const.PACT_MAJOR_SCI_COOP] = makeIDataHolder(
targetRel = 1000,
relChng = 1,
effectivity = 0.05,
validityInterval = (875, 10000),
)
## Morale
baseGovPwr = 50000
maxMorale = 100.0
minMoraleTrgt = 30.0
revoltThr = 25.0
moraleChngPerc = 0.03
moraleHighPopPenalty = 2.0
moraleBasePop = 10000
moraleLowPop = 5000
moraleLowPopBonus = 40.0
moraleLostWhenSurrender = 0.0
moraleLostNoFood = 1.0
moraleModPlHit = 96.0 # how many morale point per 1 per cent of damage
moralePerPointChance = 5.0 # for every point below revoltThr % chance for revolt
mora
|
Hoohm/pyHomeVM
|
pyHomeVM/__main__.py
|
Python
|
gpl-3.0
| 5,792
| 0.000691
|
"""__Main__."""
import sys
import os
import logging
import argparse
import traceback
import shelve
from datetime import datetime
from CONSTANTS import CONSTANTS
from settings.settings import load_config, load_core, load_remote, load_email
from settings.settings import load_html, load_sms
from core import read_structure, readStructureFromFile, updateStructure
from core import clean_video_db, syncDirTree, transferLongVersions
from core import executeToDoFile, build_html_report, umount
from core import check_and_correct_videos_errors, clean_remote
from core import get_new_file_ids_from_structure, mount, check_mkv_videos
from notifications import send_sms_notification, send_mail_report, send_mail_log
def get_args():
"""Get args."""
parser = argparse.ArgumentParser(description='pyHomeVM')
parser.add_argument('-c', '--config_file_path',
action='store',
default='settings/dev_config.cfg',
help='path to config file that is to be used.')
parser.add_argument('-s', '--sms', help='Enables sms notifications',
action='store_true')
parser.add_argument('-l', '--log', help='Enables log sending by e-mail',
action='store_true')
parser.add_argument('-r', '--report',
help='Enables html report sending by e-mail',
action='store_true')
parser.add_argument('-rem', '--remote',
help='Enables transfer of long versions to remote storage',
action='store_true')
parser.add_argument('-b', '--backup',
help='Enables backup of first videos',
action='store_true')
parser.add_argument('-stats',
help='Gets you statistics about your videos',
action='store_true')
args = parser.parse_args()
return args
def load_logger():
"""Load logger."""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(CONSTANTS['log_file_path'])
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def main(argv=None):
"""Run main."""
start_time = datetime.now()
args = get_args() # Get args
logger = load_logger() # Set logger
logger.info('PROGRAM STARTED')
pid = s
|
tr(os.getpid())
pidfile = "/tmp/pyHomeVM.pid"
config = load_config(args.
|
config_file_path) # load config file
if os.path.isfile(pidfile):
logger.info('Program already running')
html = load_html(config)
email = load_email(config)
send_mail_log(CONSTANTS['log_file_path'], email, html)
sys.exit()
file(pidfile, 'w').write(pid)
(ffmpeg, local) = load_core(config) # load core configs
remote = load_remote(config)
html = load_html(config)
sms = load_sms(config)
email = load_email(config)
if(args.log):
email = load_email(config)
if(args.report):
html = load_html(config)
if(args.remote):
remote = load_remote(config)
if(args.sms):
sms = load_sms(config)
video_db = shelve.open(CONSTANTS['video_db_path'], writeback=True)
try:
if not os.path.exists(CONSTANTS['structure_file_path']):
raise Exception("Directory structure definition file not found.")
past_structure = readStructureFromFile(CONSTANTS)
except Exception:
logger.info(traceback.format_exc())
logger.info('{} not found'.format(CONSTANTS['structure_file_path']))
past_structure = {} # Start as new
new_structure = read_structure(local)
video_ids = get_new_file_ids_from_structure(new_structure, video_db)
check_and_correct_videos_errors(video_ids, video_db, local, ffmpeg)
logger.info('Checked for errors and corrupted')
html_data = updateStructure(
past_structure,
read_structure(local),
local,
ffmpeg,
remote,
video_db)
sms_sent_file = os.path.join(CONSTANTS['script_root_dir'], 'sms_sent')
if(mount(remote)):
logger.info('Mount succesfull')
syncDirTree(local, remote)
transferLongVersions(local, remote, video_db)
if(os.path.isfile(CONSTANTS['todo_file_path'])):
executeToDoFile(CONSTANTS['todo_file_path'], local, CONSTANTS)
if(os.path.exists(sms_sent_file)):
os.remove(sms_sent_file)
logger.info('sms_sent file has been deleted')
clean_remote(remote)
umount(remote)
else:
logger.info('Mount unssuccesfull')
if(not os.path.exists(sms_sent_file) and args.sms):
send_sms_notification(sms)
logger.info('Sms sent')
with open(sms_sent_file, 'w') as sms_not:
msg = 'SMS has been sent {}'.format(CONSTANTS['TODAY'])
sms_not.write(msg)
logger.info(msg)
if(args.report and (
html_data['new'] != '' or
html_data['modified'] != '' or
html_data['deleted'] != '' or
html_data['moved'] != '')):
html_report = build_html_report(html_data, CONSTANTS, html)
send_mail_report(html_report, email)
logger.info('Mail report sent')
if(args.log):
send_mail_log(CONSTANTS['log_file_path'], email, html)
logger.info('log file sent')
clean_video_db(video_db)
check_mkv_videos(local, video_db)
logger.info('DB cleaned')
video_db.close()
logger.info('Script ran in {}'.format(datetime.now() - start_time))
os.unlink(pidfile)
if __name__ == "__main__":
sys.exit(main())
|
stormi/tsunami
|
src/secondaires/navigation/commandes/matelot/recruter.py
|
Python
|
bsd-3-clause
| 5,585
| 0.00072
|
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO Ematelot SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'recruter' de la commande 'matelot'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmRecruter(Parametre):
"""Commande 'matelot recruter'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "recruter", "recruit")
self.schema = "(<nombre> <personnage_present>)"
self.tronquer = True
self.aide_courte = "recrute un matelot"
self.aide_longue = \
"Cette commande permet de recruter un matelot présent " \
"dans la même salle que vous. Deux cas sont à distinguer " \
": si vous êtes à terre (si vous êtes dans un bureau de " \
"recrutement par exemple), vous pouvez demander aux matelots " \
"récemment recrutés de rejoindre votre bord. Si vous êtes " \
"sur un navire (que vous venez d'aborder, par exemple), vous " \
"pouvez demander à un matelot de rejoindre votre navire si " \
"celui-ci est assez proche. Cette commande prend deux " \
"arguments : le numéro correspondant à votre navire. Vous " \
"pouvez entrer la commande sans paramètre pour le connaître, " \
"les navires que vous possédez (et qui peuvent être utilisés " \
"pour le recrutement) seront affichés. Le second paramètre " \
"est un fragment du nom du personnage que vous souhaitez " \
"recruter. Si la commande réussi, le matelot recruté " \
"rejoindra le navire ciblé d'ici quelques instants. Veillez " \
"à rester accosté si vous êtes dans un port, sans quoi les " \
"matelots ne pourront pas vous rejoindre."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
salle = personnage.salle
navires = importeur.navigation.get_navires_possedes(personnage)
navire = getattr(salle, "navire", None)
if dic_masques["nombre"] and dic_masques["personnage_present"]:
nombre = dic_masques["nombre"].nombre
cible = dic_masques["personnage_present"].personna
|
ge
cle = getattr(cible, "cle", None)
try:
fiche = importeur.navigation.fiches[cle]
except KeyError:
personnage.envoyer("|err|Vous ne pouvez recruter {}.|ff|",
|
cible)
return
try:
n_cible = navires[nombre - 1]
except IndexError:
personnage << "|err|Ce navire n'est pas visible.|ff|"
return
if cible.etats:
personnage.envoyer("{} est occupé.", cible)
return
# Feint de partir
if navire is None:
sortie = [s for s in salle.sorties][0]
salle.envoyer("{{}} s'en va vers {}.".format(
sortie.nom_complet), cible)
else:
salle.envoyer("{} saute à l'eau.", cible)
matelot = navire.equipage.get_matelot_depuis_personnage(
cible)
if matelot:
navire.equipage.supprimer_matelot(matelot.nom)
cible.salle = None
nom = "matelot_" + cible.identifiant
importeur.diffact.ajouter_action(nom, 15, fiche.recruter,
cible, n_cible)
personnage.envoyer("Vous recrutez {{}} sur {}.".format(
n_cible.desc_survol), cible)
else:
if navires:
msg = "Navires que vous possédez :\n"
for i, navire in enumerate(navires):
msg += "\n |ent|{}|ff| - {}".format(i + 1,
navire.desc_survol)
else:
msg = "|att|Vous ne possédez aucun navire " \
"pouvant servir au recrutement.|ff|"
personnage << msg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.