Search is not available for this dataset
text
stringlengths
75
104k
def get_enrich(config, backend_section): """Execute the enrich phase for a given backend section :param config: a Mordred config object :param backend_section: the backend section where the enrich phase is executed """ TaskProjects(config).execute() task = TaskEnrich(config, backend_section=ba...
def get_panels(config): """Execute the panels phase :param config: a Mordred config object """ task = TaskPanels(config) task.execute() task = TaskPanelsMenu(config) task.execute() logging.info("Panels creation finished!")
def config_logging(debug): """Config logging level output output""" if debug: logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s') logging.debug("Debug mode activated") else: logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
def get_params_parser(): """Parse command line arguments""" parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-g', '--debug', dest='debug', action='store_true', help=argparse.SUPPRESS) parser.add_argument("--arthur", action='store_tru...
def get_params(): """Get params to execute the micro-mordred""" parser = get_params_parser() args = parser.parse_args() if not args.raw and not args.enrich and not args.identities and not args.panels: print("No tasks enabled") sys.exit(1) return args
def __kibiter_version(self): """ Get the kibiter vesion. :param major: major Elasticsearch version """ version = None es_url = self.conf['es_enrichment']['url'] config_url = '.kibana/config/_search' url = urijoin(es_url, config_url) version = None ...
def create_dashboard(self, panel_file, data_sources=None, strict=True): """Upload a panel to Elasticsearch if it does not exist yet. If a list of data sources is specified, upload only those elements (visualizations, searches) that match that data source. :param panel_file: file name o...
def __upload_title(self, kibiter_major): """Upload to Kibiter the title for the dashboard. The title is shown on top of the dashboard menu, and is Usually the name of the project being dashboarded. This is done only for Kibiter 6.x. :param kibiter_major: major version of kibite...
def __create_dashboard_menu(self, dash_menu, kibiter_major): """Create the menu definition to access the panels in a dashboard. :param menu: dashboard menu to upload :param kibiter_major: major version of kibiter """ logger.info("Adding dashboard menu") if kibit...
def __remove_dashboard_menu(self, kibiter_major): """Remove existing menu for dashboard, if any. Usually, we remove the menu before creating a new one. :param kibiter_major: major version of kibiter """ logger.info("Removing old dashboard menu, if any") if kibiter_major...
def __get_menu_entries(self, kibiter_major): """ Get the menu entries from the panel definition """ menu_entries = [] for entry in self.panels_menu: if entry['source'] not in self.data_sources: continue parent_menu_item = { 'name': entry['n...
def __get_dash_menu(self, kibiter_major): """Order the dashboard menu""" # omenu = OrderedDict() omenu = [] # Start with Overview omenu.append(self.menu_panels_common['Overview']) # Now the data _getsources ds_menu = self.__get_menu_entries(kibiter_major) ...
def compose_mbox(projects): """ Compose projects.json only for mbox, but using the mailing_lists lists change: 'https://dev.eclipse.org/mailman/listinfo/emft-dev' to: 'emfg-dev /home/bitergia/mboxes/emft-dev.mbox/emft-dev.mbox :param projects: projects.json :return: projects.json with mbox """...
def compose_gerrit(projects): """ Compose projects.json for gerrit, but using the git lists change: 'http://git.eclipse.org/gitroot/xwt/org.eclipse.xwt.git' to: 'git.eclipse.org_xwt/org.eclipse.xwt :param projects: projects.json :return: projects.json with gerrit """ git_projects = [projec...
def compose_git(projects, data): """ Compose projects.json for git We need to replace '/c/' by '/gitroot/' for instance change: 'http://git.eclipse.org/c/xwt/org.eclipse.xwt.git' to: 'http://git.eclipse.org/gitroot/xwt/org.eclipse.xwt.git' :param projects: projects.json :param data: eclipse J...
def compose_mailing_lists(projects, data): """ Compose projects.json for mailing lists At upstream has two different key for mailing list: 'mailings_lists' and 'dev_list' The key 'mailing_lists' is an array with mailing lists The key 'dev_list' is a dict with only one mailing list :param projects:...
def compose_github(projects, data): """ Compose projects.json for github :param projects: projects.json :param data: eclipse JSON :return: projects.json with github """ for p in [project for project in data if len(data[project]['github_repos']) > 0]: if 'github' not in projects[p]: ...
def compose_bugzilla(projects, data): """ Compose projects.json for bugzilla :param projects: projects.json :param data: eclipse JSON :return: projects.json with bugzilla """ for p in [project for project in data if len(data[project]['bugzilla']) > 0]: if 'bugzilla' not in projects[p]: ...
def compose_title(projects, data): """ Compose the projects JSON file only with the projects name :param projects: projects.json :param data: eclipse JSON with the origin format :return: projects.json with titles """ for project in data: projects[project] = { 'meta': { ...
def compose_projects_json(projects, data): """ Compose projects.json with all data sources :param projects: projects.json :param data: eclipse JSON :return: projects.json with all data sources """ projects = compose_git(projects, data) projects = compose_mailing_lists(projects, data) pr...
def __autorefresh_studies(self, cfg): """Execute autorefresh for areas of code study if configured""" if 'studies' not in self.conf[self.backend_section] or \ 'enrich_areas_of_code:git' not in self.conf[self.backend_section]['studies']: logger.debug("Not doing autorefresh fo...
def __studies(self, retention_time): """ Execute the studies configured for the current backend """ cfg = self.config.get_conf() if 'studies' not in cfg[self.backend_section] or not \ cfg[self.backend_section]['studies']: logger.debug('No studies for %s' % self.backend_se...
def retain_identities(self, retention_time): """Retain the identities in SortingHat based on the `retention_time` value declared in the setup.cfg. :param retention_time: maximum number of minutes wrt the current date to retain the SortingHat data """ enrich_es = self.conf['es_en...
def get_repos_by_backend_section(cls, backend_section, raw=True): """ return list with the repositories for a backend_section """ repos = [] projects = TaskProjects.get_projects() for pro in projects: if backend_section in projects[pro]: # if the projects.jso...
def convert_from_eclipse(self, eclipse_projects): """ Convert from eclipse projects format to grimoire projects json format """ projects = {} # We need the global project for downloading the full Bugzilla and Gerrit projects['unknown'] = { "gerrit": ["git.eclipse.org"], ...
def general_params(cls): """ Define all the possible config params """ params = {} # GENERAL CONFIG params_general = { "general": { "min_update_delay": { "optional": True, "default": 60, "type": int...
def set_param(self, section, param, value): """ Change a param in the config """ if section not in self.conf or param not in self.conf[section]: logger.error('Config section %s and param %s not exists', section, param) else: self.conf[section][param] = value
def _add_to_conf(self, new_conf): """Add new configuration to self.conf. Adds configuration parameters in new_con to self.conf. If they already existed in conf, overwrite them. :param new_conf: new configuration, to add """ for section in new_conf: if secti...
def es_version(self, url): """Get Elasticsearch version. Get the version of Elasticsearch. This is useful because Elasticsearch and Kibiter are paired (same major version for 5, 6). :param url: Elasticseearch url hosting Kibiter indices :returns: major version, as string ...
def execute_nonstop_tasks(self, tasks_cls): """ Just a wrapper to the execute_batch_tasks method """ self.execute_batch_tasks(tasks_cls, self.conf['sortinghat']['sleep_for'], self.conf['general']['min_update_delay'], F...
def execute_batch_tasks(self, tasks_cls, big_delay=0, small_delay=0, wait_for_threads=True): """ Start a task manager per backend to complete the tasks. :param task_cls: list of tasks classes to be executed :param big_delay: seconds before global tasks are executed, should be days usual...
def __execute_initial_load(self): """ Tasks that should be done just one time """ if self.conf['phases']['panels']: tasks_cls = [TaskPanels, TaskPanelsMenu] self.execute_tasks(tasks_cls) if self.conf['phases']['identities']: tasks_cls = [TaskI...
def start(self): """ This method defines the workflow of SirMordred. So it calls to: - initialize the databases - execute the different phases for the first iteration (collection, identities, enrichment) - start the collection and enrichment in parallel by data source ...
def run(self, halt_on_nonzero=True, quiet=False, q=False, streaming=False): """ After building your commands, call `run()` to have your code executed. """ commands = str(self) if not (quiet or q): self._echo.cmd(commands) env = self._context[0].get('env', {})...
def validate_config(self): ''' Validates the provided config to make sure all the required fields are there. ''' # first ensure that all the required fields are there for key, key_config in self.params_map.items(): if key_config['required']: i...
def stdout(self): """ Converts stdout string to a list. """ if self._streaming: stdout = [] while not self.__stdout.empty(): try: line = self.__stdout.get_nowait() stdout.append(line) except: ...
def stderr(self): """ Converts stderr string to a list. """ if self._streaming: stderr = [] while not self.__stderr.empty(): try: line = self.__stderr.get_nowait() stderr.append(line) except: ...
def print_stdout(self, always_print=False): """ Prints the stdout to console - if there is any stdout, otherwise does nothing. :param always_print: print the stdout, even if there is nothing in the buffer (default: false) """ if self.__stdout or always_print: self._...
def print_stderr(self, always_print=False): """ Prints the stderr to console - if there is any stdout, otherwise does nothing. :param always_print: print the stderr, even if there is nothing in the buffer (default: false) """ if self.__stderr or always_print: self._...
def print_traceback(self, always_print=False): """ Prints the traceback to console - if there is any traceback, otherwise does nothing. :param always_print: print the traceback, even if there is nothing in the buffer (default: false) """ if self._exception or always_print: ...
def format(self, record): """Customize the message format based on the log level.""" if isinstance(self.fmt, dict): self._fmt = self.fmt[record.levelname] if sys.version_info > (3, 2): # Update self._style because we've changed self._fmt # (code ba...
def replace_print(fileobj=sys.stderr): """Sys.out replacer, by default with stderr. Use it like this: with replace_print_with(fileobj): print "hello" # writes to the file print "done" # prints to stdout Args: fileobj: a file object to replace stdout. Yields: The printer. """ printer = _...
def compact_interval_string(value_list): """Compact a list of integers into a comma-separated string of intervals. Args: value_list: A list of sortable integers such as a list of numbers Returns: A compact string representation, such as "1-5,8,12-15" """ if not value_list: return '' value_li...
def _get_storage_service(credentials): """Get a storage client using the provided credentials or defaults.""" if credentials is None: credentials = oauth2client.client.GoogleCredentials.get_application_default( ) return discovery.build('storage', 'v1', credentials=credentials)
def _retry_storage_check(exception): """Return True if we should retry, False otherwise.""" now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') print_error( '%s: Exception %s: %s' % (now, type(exception).__name__, str(exception))) return isinstance(exception, oauth2client.client.AccessTokenRefreshError)
def _load_file_from_gcs(gcs_file_path, credentials=None): """Load context from a text file in gcs. Args: gcs_file_path: The target file path; should have the 'gs://' prefix. credentials: Optional credential to be used to load the file from gcs. Returns: The content of the text file as a string. ""...
def load_file(file_path, credentials=None): """Load a file from either local or gcs. Args: file_path: The target file path, which should have the prefix 'gs://' if to be loaded from gcs. credentials: Optional credential to be used to load the file from gcs. Returns: A python File obje...
def _file_exists_in_gcs(gcs_file_path, credentials=None): """Check whether the file exists, in GCS. Args: gcs_file_path: The target file path; should have the 'gs://' prefix. credentials: Optional credential to be used to load the file from gcs. Returns: True if the file's there. """ gcs_service...
def file_exists(file_path, credentials=None): """Check whether the file exists, on local disk or GCS. Args: file_path: The target file path; should have the 'gs://' prefix if in gcs. credentials: Optional credential to be used to load the file from gcs. Returns: True if the file's there. """ if ...
def _prefix_exists_in_gcs(gcs_prefix, credentials=None): """Check whether there is a GCS object whose name starts with the prefix. Since GCS doesn't actually have folders, this is how we check instead. Args: gcs_prefix: The path; should start with 'gs://'. credentials: Optional credential to be used to ...
def simple_pattern_exists_in_gcs(file_pattern, credentials=None): """True iff an object exists matching the input GCS pattern. The GCS pattern must be a full object reference or a "simple pattern" that conforms to the dsub input and output parameter restrictions: * No support for **, ? wildcards or [] chara...
def outputs_are_present(outputs): """True if each output contains at least one file or no output specified.""" # outputs are OutputFileParam (see param_util.py) # If outputs contain a pattern, then there is no way for `dsub` to verify # that *all* output is present. The best that `dsub` can do is to verify #...
def _build_pipeline_input_file_param(cls, var_name, docker_path): """Return a dict object representing a pipeline input argument.""" # If the filename contains a wildcard, then the target Docker path must # be a directory in order to ensure consistency whether the source pattern # contains 1 or multipl...
def _build_pipeline_docker_command(cls, script_name, inputs, outputs, envs): """Return a multi-line string of the full pipeline docker command.""" # We upload the user script as an environment argument # and write it to SCRIPT_DIR (preserving its local file name). # # The docker_command: # * wr...
def build_pipeline(cls, project, zones, min_cores, min_ram, disk_size, boot_disk_size, preemptible, accelerator_type, accelerator_count, image, script_name, envs, inputs, outputs, pipeline_name): """Builds a pipeline configuration for execution. Ar...
def build_pipeline_args(cls, project, script, job_params, task_params, reserved_labels, preemptible, logging_uri, scopes, keep_alive): """Builds pipeline args for execution. Args: project: string name of project. script: Body of the script to exec...
def _datetime_to_utc_int(date): """Convert the integer UTC time value into a local datetime.""" if date is None: return None # Convert localized datetime to a UTC integer epoch = dsub_util.replace_timezone(datetime.utcfromtimestamp(0), pytz.utc) return (date - epoch).total_seconds()
def get_filter(project, status=None, user_id=None, job_id=None, job_name=None, labels=None, task_id=None, task_attempt=None, create_time_min=None, create_time_max=None...
def is_dsub_operation(cls, op): """Determine if a pipelines operation is a dsub request. We don't have a rigorous way to identify an operation as being submitted by dsub. Our best option is to check for certain fields that have always been part of dsub operations. - labels: job-id, job-name, and u...
def list(cls, service, ops_filter, page_size=0): """Gets the list of operations for the specified filter. Args: service: Google Genomics API service object ops_filter: string filter of operations to return page_size: the number of operations to requested on each list operation to the ...
def prepare_job_metadata(self, script, job_name, user_id, create_time): """Returns a dictionary of metadata fields for the job.""" return google_base.prepare_job_metadata(script, job_name, user_id, create_time)
def _build_pipeline_request(self, task_view): """Returns a Pipeline objects for the job.""" job_metadata = task_view.job_metadata job_params = task_view.job_params job_resources = task_view.job_resources task_metadata = task_view.task_descriptors[0].task_metadata task_params = task_view.task_des...
def lookup_job_tasks(self, statuses, user_ids=None, job_ids=None, job_names=None, task_ids=None, task_attempts=None, labels=None, create...
def delete_jobs(self, user_ids, job_ids, task_ids, labels, create_time_min=None, create_time_max=None): """Kills the operations associated with the specified job or job.task. Args: user_ids: List o...
def get_field(self, field, default=None): """Returns a value from the operation for a specific set of field names. Args: field: a dsub-specific job metadata key default: default value to return if field does not exist or is empty. Returns: A text string for the field or a list for 'input...
def _operation_status_message(self): """Returns the most relevant status string and last updated date string. This string is meant for display only. Returns: A printable status string and date string. """ metadata = self._op['metadata'] if not self._op['done']: if 'events' in metad...
def _get_operation_input_field_values(self, metadata, file_input): """Returns a dictionary of envs or file inputs for an operation. Args: metadata: operation metadata field file_input: True to return a dict of file inputs, False to return envs. Returns: A dictionary of input field name v...
def error_message(self): """Returns an error message if the operation failed for any reason. Failure as defined here means; ended for any reason other than 'success'. This means that a successful cancelation will also create an error message here. Returns: string, string will be empty if job...
def _format_task_name(job_id, task_id, task_attempt): """Create a task name from a job-id, task-id, and task-attempt. Task names are used internally by dsub as well as by the docker task runner. The name is formatted as "<job-id>.<task-id>[.task-attempt]". Task names follow formatting conventions allowing them...
def _convert_suffix_to_docker_chars(suffix): """Rewrite string so that all characters are valid in a docker name suffix.""" # Docker container names must match: [a-zA-Z0-9][a-zA-Z0-9_.-] accepted_characters = string.ascii_letters + string.digits + '_.-' def label_char_transform(char): if char in accepted_c...
def _task_sort_function(task): """Return a tuple for sorting 'most recent first'.""" return (task.get_field('create-time'), int(task.get_field('task-id', 0)), int(task.get_field('task-attempt', 0)))
def _datetime_in_range(self, dt, dt_min=None, dt_max=None): """Determine if the provided time is within the range, inclusive.""" # The pipelines API stores operation create-time with second granularity. # We mimic this behavior in the local provider by truncating to seconds. dt = dt.replace(microsecond=...
def _get_task_from_task_dir(self, job_id, user_id, task_id, task_attempt): """Return a Task object with this task's info.""" # We need to be very careful about how we read and interpret the contents # of the task directory. The directory could be changing because a new # task is being created. The dire...
def _delocalize_logging_command(self, logging_path, user_project): """Returns a command to delocalize logs. Args: logging_path: location of log files. user_project: name of the project to be billed for the request. Returns: eg. 'gs://bucket/path/myfile' or 'gs://bucket/script-foobar-12' ...
def _task_directory(self, job_id, task_id, task_attempt): """The local dir for staging files for that particular task.""" dir_name = 'task' if task_id is None else str(task_id) if task_attempt: dir_name = '%s.%s' % (dir_name, task_attempt) return self._provider_root() + '/' + job_id + '/' + dir_na...
def _make_environment(self, inputs, outputs, mounts): """Return a dictionary of environment variables for the container.""" env = {} env.update(providers_util.get_file_environment_variables(inputs)) env.update(providers_util.get_file_environment_variables(outputs)) env.update(providers_util.get_file...
def _localize_inputs_recursive_command(self, task_dir, inputs): """Returns a command that will stage recursive inputs.""" data_dir = os.path.join(task_dir, _DATA_SUBDIR) provider_commands = [ providers_util.build_recursive_localize_command(data_dir, inputs, ...
def _get_input_target_path(self, local_file_path): """Returns a directory or file path to be the target for "gsutil cp". If the filename contains a wildcard, then the target path must be a directory in order to ensure consistency whether the source pattern contains one or multiple files. Args: ...
def _localize_inputs_command(self, task_dir, inputs, user_project): """Returns a command that will stage inputs.""" commands = [] for i in inputs: if i.recursive or not i.value: continue source_file_path = i.uri local_file_path = task_dir + '/' + _DATA_SUBDIR + '/' + i.docker_path...
def _delocalize_outputs_commands(self, task_dir, outputs, user_project): """Copy outputs from local disk to GCS.""" commands = [] for o in outputs: if o.recursive or not o.value: continue # The destination path is o.uri.path, which is the target directory # (rather than o.uri, whi...
def get_dsub_version(): """Get the dsub version out of the _dsub_version.py source file. Setup.py should not import dsub version from dsub directly since ambiguity in import order could lead to an old version of dsub setting the version number. Parsing the file directly is simpler than using import tools (whos...
def get_filtered_normalized_events(self): """Filter the granular v2 events down to events of interest. Filter through the large number of granular events returned by the pipelines API, and extract only those that are interesting to a user. This is implemented by filtering out events which are known to ...
def _map(self, event): """Extract elements from an operation event and map to a named event.""" description = event.get('description', '') start_time = google_base.parse_rfc3339_utc_string( event.get('timestamp', '')) for name, regex in _EVENT_REGEX_MAP.items(): match = regex.match(descri...
def _get_logging_env(self, logging_uri, user_project): """Returns the environment for actions that copy logging files.""" if not logging_uri.endswith('.log'): raise ValueError('Logging URI must end in ".log": {}'.format(logging_uri)) logging_prefix = logging_uri[:-len('.log')] return { 'L...
def _get_prepare_env(self, script, job_descriptor, inputs, outputs, mounts): """Return a dict with variables for the 'prepare' action.""" # Add the _SCRIPT_REPR with the repr(script) contents # Add the _META_YAML_REPR with the repr(meta) contents # Add variables for directories that need to be created...
def _get_localization_env(self, inputs, user_project): """Return a dict with variables for the 'localization' action.""" # Add variables for paths that need to be localized, for example: # INPUT_COUNT: 1 # INPUT_0: MY_INPUT_FILE # INPUT_RECURSIVE_0: 0 # INPUT_SRC_0: gs://mybucket/mypath/myfile ...
def _get_delocalization_env(self, outputs, user_project): """Return a dict with variables for the 'delocalization' action.""" # Add variables for paths that need to be delocalized, for example: # OUTPUT_COUNT: 1 # OUTPUT_0: MY_OUTPUT_FILE # OUTPUT_RECURSIVE_0: 0 # OUTPUT_SRC_0: gs://mybucket/my...
def _build_user_environment(self, envs, inputs, outputs, mounts): """Returns a dictionary of for the user container environment.""" envs = {env.name: env.value for env in envs} envs.update(providers_util.get_file_environment_variables(inputs)) envs.update(providers_util.get_file_environment_variables(ou...
def _get_mount_actions(self, mounts, mnt_datadisk): """Returns a list of two actions per gcs bucket to mount.""" actions_to_add = [] for mount in mounts: bucket = mount.value[len('gs://'):] mount_path = mount.docker_path actions_to_add.extend([ google_v2_pipelines.build_action( ...
def _build_pipeline_request(self, task_view): """Returns a Pipeline objects for the task.""" job_metadata = task_view.job_metadata job_params = task_view.job_params job_resources = task_view.job_resources task_metadata = task_view.task_descriptors[0].task_metadata task_params = task_view.task_de...
def submit_job(self, job_descriptor, skip_if_output_present): """Submit the job (or tasks) to be executed. Args: job_descriptor: all parameters needed to launch all job tasks skip_if_output_present: (boolean) if true, skip tasks whose output is present (see --skip flag for more explanation)...
def _operations_list(self, ops_filter, max_tasks, page_size, page_token): """Gets the list of operations for the specified filter. Args: ops_filter: string filter of operations to return max_tasks: the maximum number of job tasks to return or 0 for no limit. page_size: the number of operation...
def lookup_job_tasks(self, statuses, user_ids=None, job_ids=None, job_names=None, task_ids=None, task_attempts=None, labels=None, create...
def _operation_status(self): """Returns the status of this operation. Raises: ValueError: if the operation status cannot be determined. Returns: A printable status string (RUNNING, SUCCESS, CANCELED or FAILURE). """ if not google_v2_operations.is_done(self._op): return 'RUNNING' ...
def _operation_status_message(self): """Returns the most relevant status string and failed action. This string is meant for display only. Returns: A printable status string and name of failed action (if any). """ msg = None action = None if not google_v2_operations.is_done(self._op):...
def error_message(self): """Returns an error message if the operation failed for any reason. Failure as defined here means ended for any reason other than 'success'. This means that a successful cancelation will also return an error message. Returns: string, string will be empty if job did not e...
def get_field(self, field, default=None): """Returns a value from the operation for a specific set of field names. Args: field: a dsub-specific job metadata key default: default value to return if field does not exist or is empty. Returns: A text string for the field or a list for 'input...
def _validate_ram(ram_in_mb): """Rounds ram up to the nearest multiple of _MEMORY_MULTIPLE.""" return int(GoogleV2CustomMachine._MEMORY_MULTIPLE * math.ceil( ram_in_mb / GoogleV2CustomMachine._MEMORY_MULTIPLE))
def build_machine_type(cls, min_cores, min_ram): """Returns a custom machine type string.""" min_cores = min_cores or job_model.DEFAULT_MIN_CORES min_ram = min_ram or job_model.DEFAULT_MIN_RAM # First, min_ram is given in GB. Convert to MB. min_ram *= GoogleV2CustomMachine._MB_PER_GB # Only ma...
def build_machine(network=None, machine_type=None, preemptible=None, service_account=None, boot_disk_size_gb=None, disks=None, accelerators=None, labels=None, cpu_platform=None...