Search is not available for this dataset
text stringlengths 75 104k |
|---|
def create_app(config):
""" Create a fully configured Celery application object.
Args:
config (Config): A reference to a lightflow configuration object.
Returns:
Celery: A fully configured Celery application object.
"""
# configure the celery logging system with the lightflow sett... |
def _cleanup_workflow(config, task_id, args, **kwargs):
""" Cleanup the results of a workflow when it finished.
Connects to the postrun signal of Celery. If the signal was sent by a workflow,
remove the result from the result backend.
Args:
task_id (str): The id of the task.
args (tupl... |
def execute_workflow(self, workflow, workflow_id=None):
""" Celery task (aka job) that runs a workflow on a worker.
This celery task starts, manages and monitors the dags that make up a workflow.
Args:
self (Task): Reference to itself, the celery task object.
workflow (Workflow): Reference... |
def execute_dag(self, dag, workflow_id, data=None):
""" Celery task that runs a single dag on a worker.
This celery task starts, manages and monitors the individual tasks of a dag.
Args:
self (Task): Reference to itself, the celery task object.
dag (Dag): Reference to a Dag object that is ... |
def execute_task(self, task, workflow_id, data=None):
""" Celery task that runs a single task on a worker.
Args:
self (Task): Reference to itself, the celery task object.
task (BaseTask): Reference to the task object that performs the work
in its run() method.
w... |
def from_celery(cls, broker_dict):
""" Create a BrokerStats object from the dictionary returned by celery.
Args:
broker_dict (dict): The dictionary as returned by celery.
Returns:
BrokerStats: A fully initialized BrokerStats object.
"""
return BrokerStat... |
def to_dict(self):
""" Return a dictionary of the broker stats.
Returns:
dict: Dictionary of the stats.
"""
return {
'hostname': self.hostname,
'port': self.port,
'transport': self.transport,
'virtual_host': self.virtual_host
... |
def from_celery(cls, name, worker_dict, queues):
""" Create a WorkerStats object from the dictionary returned by celery.
Args:
name (str): The name of the worker.
worker_dict (dict): The dictionary as returned by celery.
queues (list): A list of QueueStats objects th... |
def to_dict(self):
""" Return a dictionary of the worker stats.
Returns:
dict: Dictionary of the stats.
"""
return {
'name': self.name,
'broker': self.broker.to_dict(),
'pid': self.pid,
'process_pids': self.process_pids,
... |
def from_celery(cls, worker_name, job_dict, celery_app):
""" Create a JobStats object from the dictionary returned by celery.
Args:
worker_name (str): The name of the worker this jobs runs on.
job_dict (dict): The dictionary as returned by celery.
celery_app: Referen... |
def to_dict(self):
""" Return a dictionary of the job stats.
Returns:
dict: Dictionary of the stats.
"""
return {
'name': self.name,
'id': self.id,
'type': self.type,
'workflow_id': self.workflow_id,
'queue': self.q... |
def from_event(cls, event):
""" Create a JobEvent object from the event dictionary returned by celery.
Args:
event (dict): The dictionary as returned by celery.
Returns:
JobEvent: A fully initialized JobEvent object.
"""
return cls(
uuid=even... |
def start_workflow(name, config, *, queue=DefaultJobQueueName.Workflow,
clear_data_store=True, store_args=None):
""" Start a single workflow by sending it to the workflow queue.
Args:
name (str): The name of the workflow that should be started. Refers to the
name of the w... |
def stop_workflow(config, *, names=None):
""" Stop one or more workflows.
Args:
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
names (list): List of workflow names, workflow ids or workflow job ids for the
w... |
def list_workflows(config):
""" List all available workflows.
Returns a list of all workflows that are available from the paths specified
in the config. A workflow is defined as a Python file with at least one DAG.
Args:
config (Config): Reference to the configuration object from which the
... |
def list_jobs(config, *, status=JobStatus.Active,
filter_by_type=None, filter_by_worker=None):
""" Return a list of Celery jobs.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
status (JobStatus): The status of the jo... |
def events(config):
""" Return a generator that yields workflow events.
For every workflow event that is sent from celery this generator yields an event
object.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
Returns:
... |
def run(self):
""" Drain the process output streams. """
read_stdout = partial(self._read_output, stream=self._process.stdout,
callback=self._callback_stdout,
output_file=self._stdout_file)
read_stderr = partial(self._read_output, stre... |
def _read_output(self, stream, callback, output_file):
""" Read the output of the process, executed the callback and save the output.
Args:
stream: A file object pointing to the output stream that should be read.
callback(callable, None): A callback function that is called for e... |
def run(self, data, store, signal, context, **kwargs):
""" The main run method of the Python task.
Args:
data (:class:`.MultiTaskData`): The data object that has been passed from the
predecessor task.
store (:class:`.DataStoreDocument`): The persistent data store... |
def _run_as(user, group):
""" Function wrapper that sets the user and group for the process """
def wrapper():
if user is not None:
os.setuid(user)
if group is not None:
os.setgid(group)
return wrapper |
def convert(self, value):
""" Convert the specified value to the type of the option.
Args:
value: The value that should be converted.
Returns:
The value with the type given by the option.
"""
if self._type is str:
return str(value)
el... |
def check_missing(self, args):
""" Returns the names of all options that are required but were not specified.
All options that don't have a default value are required in order to run the
workflow.
Args:
args (dict): A dictionary of the provided arguments that is checked for... |
def consolidate(self, args):
""" Consolidate the provided arguments.
If the provided arguments have matching options, this performs a type conversion.
For any option that has a default value and is not present in the provided
arguments, the default value is added.
Args:
... |
def define(self, schema, *, validate=True):
""" Store the task graph definition (schema).
The schema has to adhere to the following rules:
A key in the schema dict represents a parent task and the value one or more
children:
{parent: [child]} or {parent: [child1, child2]}
... |
def run(self, config, workflow_id, signal, *, data=None):
""" Run the dag by calling the tasks in the correct order.
Args:
config (Config): Reference to the configuration object from which the
settings for the dag are retrieved.
workflow_id (str): Th... |
def validate(self, graph):
""" Validate the graph by checking whether it is a directed acyclic graph.
Args:
graph (DiGraph): Reference to a DiGraph object from NetworkX.
Raises:
DirectedAcyclicGraphInvalid: If the graph is not a valid dag.
"""
if not nx.... |
def make_graph(schema):
""" Construct the task graph (dag) from a given schema.
Parses the graph schema definition and creates the task graph. Tasks are the
vertices of the graph and the connections defined in the schema become the edges.
A key in the schema dict represents a parent ta... |
def merge(self, dataset):
""" Merge the specified dataset on top of the existing data.
This replaces all values in the existing dataset with the values from the
given dataset.
Args:
dataset (TaskData): A reference to the TaskData object that should be merged
... |
def add_dataset(self, task_name, dataset=None, *, aliases=None):
""" Add a new dataset to the MultiTaskData.
Args:
task_name (str): The name of the task from which the dataset was received.
dataset (TaskData): The dataset that should be added.
aliases (list): A list ... |
def add_alias(self, alias, index):
""" Add an alias pointing to the specified index.
Args:
alias (str): The alias that should point to the given index.
index (int): The index of the dataset for which an alias should be added.
Raises:
DataInvalidIndex: If the... |
def flatten(self, in_place=True):
""" Merge all datasets into a single dataset.
The default dataset is the last dataset to be merged, as it is considered to be
the primary source of information and should overwrite all existing fields with
the same key.
Args:
in_pla... |
def set_default_by_alias(self, alias):
""" Set the default dataset by its alias.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
alias (str): The alias of the dataset that sh... |
def set_default_by_index(self, index):
""" Set the default dataset by its index.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
index (int): The index of the dataset that sh... |
def get_by_alias(self, alias):
""" Return a dataset by its alias.
Args:
alias (str): The alias of the dataset that should be returned.
Raises:
DataInvalidAlias: If the alias does not represent a valid dataset.
"""
if alias not in self._aliases:
... |
def get_by_index(self, index):
""" Return a dataset by its index.
Args:
index (int): The index of the dataset that should be returned.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset.
"""
if index >= len(self._datasets):
... |
def run(self, data, store, signal, context, **kwargs):
""" The main run method of the Python task.
Args:
data (:class:`.MultiTaskData`): The data object that has been passed from the
predecessor task.
store (:class:`.DataStoreDocument`): The persistent data store... |
def to_dict(self):
""" Return the task context content as a dictionary. """
return {
'task_name': self.task_name,
'dag_name': self.dag_name,
'workflow_name': self.workflow_name,
'workflow_id': self.workflow_id,
'worker_hostname': self.worker_ho... |
def start_worker(queues, config, *, name=None, celery_args=None, check_datastore=True):
""" Start a worker process.
Args:
queues (list): List of queue names this worker accepts jobs from.
config (Config): Reference to the configuration object from which the
settings for the worker a... |
def stop_worker(config, *, worker_ids=None):
""" Stop a worker process.
Args:
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
worker_ids (list): An optional list of ids for the worker that should be stopped.
"""
if... |
def list_workers(config, *, filter_by_queues=None):
""" Return a list of all available workers.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
filter_by_queues (list): Restrict the returned workers to workers that listen to
... |
def eval(self, data, data_store, *, exclude=None):
""" Return a new object in which callable parameters have been evaluated.
Native types are not touched and simply returned, while callable methods are
executed and their return value is returned.
Args:
data (MultiTaskData):... |
def eval_single(self, key, data, data_store):
""" Evaluate the value of a single parameter taking into account callables .
Native types are not touched and simply returned, while callable methods are
executed and their return value is returned.
Args:
key (str): The name of ... |
def get_lotw_users(**kwargs):
"""Download the latest offical list of `ARRL Logbook of the World (LOTW)`__ users.
Args:
url (str, optional): Download URL
Returns:
dict: Dictionary containing the callsign (unicode) date of the last LOTW upload (datetime)
Raises:
... |
def get_clublog_users(**kwargs):
"""Download the latest offical list of `Clublog`__ users.
Args:
url (str, optional): Download URL
Returns:
dict: Dictionary containing (if data available) the fields:
firstqso, lastqso, last-lotw, lastupload (datetime),
... |
def get_eqsl_users(**kwargs):
"""Download the latest official list of `EQSL.cc`__ users. The list of users can be found here_.
Args:
url (str, optional): Download URL
Returns:
list: List containing the callsigns of EQSL users (unicode)
Raises:
IOError: ... |
def copy_data_in_redis(self, redis_prefix, redis_instance):
"""
Copy the complete lookup data into redis. Old data will be overwritten.
Args:
redis_prefix (str): Prefix to distinguish the data in redis for the different looktypes
redis_instance (str): an Instance of Redi... |
def lookup_entity(self, entity=None):
"""Returns lookup data of an ADIF Entity
Args:
entity (int): ADIF identifier of country
Returns:
dict: Dictionary containing the country specific data
Raises:
KeyError: No matching entity found
Example:... |
def _strip_metadata(self, my_dict):
"""
Create a copy of dict and remove not needed data
"""
new_dict = copy.deepcopy(my_dict)
if const.START in new_dict:
del new_dict[const.START]
if const.END in new_dict:
del new_dict[const.END]
if const.... |
def lookup_callsign(self, callsign=None, timestamp=timestamp_now):
"""
Returns lookup data if an exception exists for a callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
... |
def _get_dicts_from_redis(self, name, index_name, redis_prefix, item):
"""
Retrieve the data of an item from redis and put it in an index and data dictionary to match the
common query interface.
"""
r = self._redis
data_dict = {}
data_index_dict = {}
if r... |
def _check_data_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks if the item is found in the index. An entry in the index points to the data
in the data_dict. This is mainly used retrieve callsigns and prefixes.
In case data is found for item, a dict containing the... |
def _check_inv_operation_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks if the callsign is marked as an invalid operation for a given timestamp.
In case the operation is invalid, True is returned. Otherwise a KeyError is raised.
"""
if item in data_index... |
def lookup_prefix(self, prefix, timestamp=timestamp_now):
"""
Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary contai... |
def is_invalid_operation(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)):
"""
Returns True if an operations is known as invalid
Args:
callsign (string): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Ret... |
def _check_zone_exception_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks the index and data if a cq-zone exception exists for the callsign
When a zone exception is found, the zone is returned. If no exception is found
a KeyError is raised
"""
if ... |
def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)):
"""
Returns a CQ Zone if an exception exists for the given callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
... |
def _lookup_clublogAPI(self, callsign=None, timestamp=timestamp_now, url="https://secure.clublog.org/dxcc", apikey=None):
""" Set up the Lookup object for Clublog Online API
"""
params = {"year" : timestamp.strftime("%Y"),
"month" : timestamp.strftime("%m"),
"day" : time... |
def _lookup_qrz_dxcc(self, dxcc_or_callsign, apikey, apiv="1.3.3"):
""" Performs the dxcc lookup against the QRZ.com XML API:
"""
response = self._request_dxcc_info_from_qrz(dxcc_or_callsign, apikey, apiv=apiv)
root = BeautifulSoup(response.text, "html.parser")
lookup = {}
... |
def _lookup_qrz_callsign(self, callsign=None, apikey=None, apiv="1.3.3"):
""" Performs the callsign lookup against the QRZ.com XML API:
"""
if apikey is None:
raise AttributeError("Session Key Missing")
callsign = callsign.upper()
response = self._request_callsign_... |
def _load_clublogXML(self,
url="https://secure.clublog.org/cty.php",
apikey=None,
cty_file=None):
""" Load and process the ClublogXML file either as a download or from file
"""
if self._download:
cty_file = self... |
def _load_countryfile(self,
url="https://www.country-files.com/cty/cty.plist",
country_mapping_filename="countryfilemapping.json",
cty_file=None):
""" Load and process the ClublogXML file either as a download or from file
"""
... |
def _download_file(self, url, apikey=None):
""" Download lookup files either from Clublog or Country-files.com
"""
import gzip
import tempfile
cty = {}
cty_date = ""
cty_file_path = None
filename = None
# download file
if apikey: # clubl... |
def _extract_clublog_header(self, cty_xml_filename):
"""
Extract the header of the Clublog XML File
"""
cty_header = {}
try:
with open(cty_xml_filename, "r") as cty:
raw_header = cty.readline()
cty_date = re.search("date='.+'", raw_heade... |
def _remove_clublog_xml_header(self, cty_xml_filename):
"""
remove the header of the Clublog XML File to make it
properly parseable for the python ElementTree XML parser
"""
import tempfile
try:
with open(cty_xml_filename, "r") as f:
c... |
def _parse_clublog_xml(self, cty_xml_filename):
"""
parse the content of a clublog XML file and return the
parsed values in dictionaries
"""
entities = {}
call_exceptions = {}
prefixes = {}
invalid_operations = {}
zone_exceptions = {}
ca... |
def _parse_country_file(self, cty_file, country_mapping_filename=None):
"""
Parse the content of a PLIST file from country-files.com return the
parsed values in dictionaries.
Country-files.com provides Prefixes and Exceptions
"""
import plistlib
cty_list = None... |
def _generate_random_word(self, length):
"""
Generates a random word
"""
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length)) |
def _check_html_response(self, response):
"""
Checks if the API Key is valid and if the request returned a 200 status (ok)
"""
error1 = "Access to this form requires a valid API key. For more info see: http://www.clublog.org/need_api.php"
error2 = "Invalid or missing API Key... |
def _serialize_data(self, my_dict):
"""
Serialize a Dictionary into JSON
"""
new_dict = {}
for item in my_dict:
if isinstance(my_dict[item], datetime):
new_dict[item] = my_dict[item].strftime('%Y-%m-%d%H:%M:%S')
else:
new_di... |
def _deserialize_data(self, json_data):
"""
Deserialize a JSON into a dictionary
"""
my_dict = json.loads(json_data.decode('utf8').replace("'", '"'),
encoding='UTF-8')
for item in my_dict:
if item == const.ADIF:
my_dict[item] = int(my_dic... |
def get_methods(*objs):
""" Return the names of all callable attributes of an object"""
return set(
attr
for obj in objs
for attr in dir(obj)
if not attr.startswith('_') and callable(getattr(obj, attr))
) |
def from_file(cls, filename, *, strict=True):
""" Create a new Config object from a configuration file.
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
... |
def load_from_file(self, filename=None, *, strict=True):
""" Load the configuration from a file.
The location of the configuration file can either be specified directly in the
parameter filename or is searched for in the following order:
1. In the environment variable given by LIGH... |
def load_from_dict(self, conf_dict=None):
""" Load the configuration from a dictionary.
Args:
conf_dict (dict): Dictionary with the configuration.
"""
self.set_to_default()
self._update_dict(self._config, conf_dict)
self._update_python_paths() |
def _update_from_file(self, filename):
""" Helper method to update an existing configuration with the values from a file.
Loads a configuration file and replaces all values in the existing configuration
dictionary with the values from the file.
Args:
filename (str): The pat... |
def _update_dict(self, to_dict, from_dict):
""" Recursively merges the fields for two dictionaries.
Args:
to_dict (dict): The dictionary onto which the merge is executed.
from_dict (dict): The dictionary merged into to_dict
"""
for key, value in from_dict.items()... |
def _update_python_paths(self):
""" Append the workflow and libraries paths to the PYTHONPATH. """
for path in self._config['workflows'] + self._config['libraries']:
if os.path.isdir(os.path.abspath(path)):
if path not in sys.path:
sys.path.append(path)
... |
def decode_char_spot(raw_string):
"""Chop Line from DX-Cluster into pieces and return a dict with the spot data"""
data = {}
# Spotter callsign
if re.match('[A-Za-z0-9\/]+[:$]', raw_string[6:15]):
data[const.SPOTTER] = re.sub(':', '', re.match('[A-Za-z0-9\/]+[:$]', raw_string[6:15]).group(0))
... |
def decode_pc11_message(raw_string):
"""Decode PC11 message, which usually contains DX Spots"""
data = {}
spot = raw_string.split("^")
data[const.FREQUENCY] = float(spot[1])
data[const.DX] = spot[2]
data[const.TIME] = datetime.fromtimestamp(mktime(strptime(spot[3]+" "+spot[4][:-1], "%d-%b-%Y %H... |
def decode_pc23_message(raw_string):
""" Decode PC23 Message which usually contains WCY """
data = {}
wcy = raw_string.split("^")
data[const.R] = int(wcy[1])
data[const.expk] = int(wcy[2])
data[const.CALLSIGN] = wcy[3]
data[const.A] = wcy[4]
data[const.SFI] = wcy[5]
data[const.K] = ... |
def _run(self, data, store, signal, context, *,
success_callback=None, stop_callback=None, abort_callback=None):
""" The internal run method that decorates the public run method.
This method makes sure data is being passed to and from the task.
Args:
data (MultiTaskDat... |
def latlong_to_locator (latitude, longitude):
"""converts WGS84 coordinates into the corresponding Maidenhead Locator
Args:
latitude (float): Latitude
longitude (float): Longitude
Returns:
string: Maidenhead locator
Raises:
ValueError: When ... |
def locator_to_latlong (locator):
"""converts Maidenhead locator in the corresponding WGS84 coordinates
Args:
locator (string): Locator, either 4 or 6 characters
Returns:
tuple (float, float): Latitude, Longitude
Raises:
ValueError: When called with wro... |
def calculate_distance(locator1, locator2):
"""calculates the (shortpath) distance between two Maidenhead locators
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Distance in km
... |
def calculate_distance_longpath(locator1, locator2):
"""calculates the (longpath) distance between two Maidenhead locators
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Distance in... |
def calculate_heading(locator1, locator2):
"""calculates the heading from the first to the second locator
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Heading in deg
Rais... |
def calculate_heading_longpath(locator1, locator2):
"""calculates the heading from the first to the second locator (long path)
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Long pa... |
def calculate_sunrise_sunset(locator, calc_date=datetime.utcnow()):
"""calculates the next sunset and sunrise for a Maidenhead locator at a give date & time
Args:
locator1 (string): Maidenhead Locator, either 4 or 6 characters
calc_date (datetime, optional): Starting datetime for th... |
def cloudpickle_dumps(obj, dumper=cloudpickle.dumps):
""" Encode Python objects into a byte stream using cloudpickle. """
return dumper(obj, protocol=serialization.pickle_protocol) |
def patch_celery():
""" Monkey patch Celery to use cloudpickle instead of pickle. """
registry = serialization.registry
serialization.pickle = cloudpickle
registry.unregister('pickle')
registry.register('pickle', cloudpickle_dumps, cloudpickle_loads,
content_type='application/x... |
def connect(self):
""" Connects to the redis database. """
self._connection = StrictRedis(
host=self._host,
port=self._port,
db=self._database,
password=self._password) |
def receive(self):
""" Returns a single request.
Takes the first request from the list of requests and returns it. If the list
is empty, None is returned.
Returns:
Response: If a new request is available a Request object is returned,
otherwise None is ... |
def send(self, response):
""" Send a response back to the client that issued a request.
Args:
response (Response): Reference to the response object that should be sent.
"""
self._connection.connection.set('{}:{}'.format(SIGNAL_REDIS_PREFIX, response.uid),
... |
def restore(self, request):
""" Push the request back onto the queue.
Args:
request (Request): Reference to a request object that should be pushed back
onto the request queue.
"""
self._connection.connection.rpush(self._request_key, pickle.dump... |
def send(self, request):
""" Send a request to the server and wait for its response.
Args:
request (Request): Reference to a request object that is sent to the server.
Returns:
Response: The response from the server to the request.
"""
self._connection.c... |
def verify_pattern(pattern):
"""Verifies if pattern for matching and finding fulfill expected structure.
:param pattern: string pattern to verify
:return: True if pattern has proper syntax, False otherwise
"""
regex = re.compile("^!?[a-zA-Z]+$|[*]{1,2}$")
def __verify_pattern__(__pa... |
def print_tree(sent, token_attr):
"""Prints sentences tree as string using token_attr from token(like pos_, tag_ etc.)
:param sent: sentence to print
:param token_attr: choosen attr to present for tokens(e.g. dep_, pos_, tag_, ...)
"""
def __print_sent__(token, attr):
print("{", en... |
def match_tree(sentence, pattern):
"""Matches given sentence with provided pattern.
:param sentence: sentence from Spacy(see: http://spacy.io/docs/#doc-spans-sents) representing complete statement
:param pattern: pattern to which sentence will be compared
:return: True if sentence match to... |
def find_tokens(sentence, pattern):
"""Find all tokens from parts of sentence fitted to pattern, being on the end of matched sub-tree(of sentence)
:param sentence: sentence from Spacy(see: http://spacy.io/docs/#doc-spans-sents) representing complete statement
:param pattern: pattern to which senten... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.