Search is not available for this dataset
text
stringlengths
75
104k
async def send_maps(self, map_list): """Sends a request to the server containing maps (dicts).""" params = { 'VER': 8, # channel protocol version 'RID': 81188, # request identifier 'ctype': 'hangouts', # client type } if self._gsessionid_param is no...
async def _fetch_channel_sid(self): """Creates a new channel for receiving push data. Sending an empty forward channel request will create a new channel on the server. There's a separate API to get the gsessionid alone that Hangouts for Chrome uses, but if we don't send a gsess...
async def _longpoll_request(self): """Open a long-polling request and receive arrays. This method uses keep-alive to make re-opening the request faster, but the remote server will set the "Connection: close" header once an hour. Raises hangups.NetworkError or ChannelSessionError. ...
async def _on_push_data(self, data_bytes): """Parse push data and trigger events.""" logger.debug('Received chunk:\n{}'.format(data_bytes)) for chunk in self._chunk_parser.get_chunks(data_bytes): # Consider the channel connected once the first chunk is received. if not s...
def user_id(self): """Who created the event (:class:`~hangups.user.UserID`).""" return user.UserID(chat_id=self._event.sender_id.chat_id, gaia_id=self._event.sender_id.gaia_id)
def from_str(text): """Construct :class:`ChatMessageSegment` list parsed from a string. Args: text (str): Text to parse. May contain line breaks, URLs and formatting markup (simplified Markdown and HTML) to be converted into equivalent segments. Retu...
def deserialize(segment): """Construct :class:`ChatMessageSegment` from ``Segment`` message. Args: segment: ``Segment`` message to parse. Returns: :class:`ChatMessageSegment` object. """ link_target = segment.link_data.link_target return ChatMess...
def serialize(self): """Serialize this segment to a ``Segment`` message. Returns: ``Segment`` message. """ segment = hangouts_pb2.Segment( type=self.type_, text=self.text, formatting=hangouts_pb2.Formatting( bold=self.is_bo...
def text(self): """Text of the message without formatting (:class:`str`).""" lines = [''] for segment in self.segments: if segment.type_ == hangouts_pb2.SEGMENT_TYPE_TEXT: lines[-1] += segment.text elif segment.type_ == hangouts_pb2.SEGMENT_TYPE_LINK: ...
def segments(self): """List of :class:`ChatMessageSegment` in message (:class:`list`).""" seg_list = self._event.chat_message.message_content.segment return [ChatMessageSegment.deserialize(seg) for seg in seg_list]
def attachments(self): """List of attachments in the message (:class:`list`).""" raw_attachments = self._event.chat_message.message_content.attachment if raw_attachments is None: raw_attachments = [] attachments = [] for attachment in raw_attachments: for ...
def participant_ids(self): """:class:`~hangups.user.UserID` of users involved (:class:`list`).""" return [user.UserID(chat_id=id_.chat_id, gaia_id=id_.gaia_id) for id_ in self._event.membership_change.participant_ids]
def _decode_field(message, field, value): """Decode optional or required field.""" if field.type == FieldDescriptor.TYPE_MESSAGE: decode(getattr(message, field.name), value) else: try: if field.type == FieldDescriptor.TYPE_BYTES: value = base64.b64decode(value) ...
def _decode_repeated_field(message, field, value_list): """Decode repeated field.""" if field.type == FieldDescriptor.TYPE_MESSAGE: for value in value_list: decode(getattr(message, field.name).add(), value) else: try: for value in value_list: if field....
def decode(message, pblite, ignore_first_item=False): """Decode pblite to Protocol Buffer message. This method is permissive of decoding errors and will log them as warnings and continue decoding where possible. The first element of the outer pblite list must often be ignored using the ignore_firs...
def send_private_msg(self, *, user_id, message, auto_escape=False): ''' 发送私聊消息 ------------ :param int user_id: 对方 QQ 号 :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return...
def send_private_msg_async(self, *, user_id, message, auto_escape=False): """ 发送私聊消息 (异步版本) ------------ :param int user_id: 对方 QQ 号 :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 ...
def send_group_msg(self, *, group_id, message, auto_escape=False): """ 发送群消息 ------------ :param int group_id: 群号 :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: {"me...
def send_group_msg_async(self, *, group_id, message, auto_escape=False): """ 发送群消息 (异步版本) ------------ :param int group_id: 群号 :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 ...
def send_discuss_msg(self, *, discuss_id, message, auto_escape=False): """ 发送讨论组消息 ------------ :param int discuss_id: 讨论组 ID(正常情况下看不到,需要从讨论组消息上报的数据中获得) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message`...
def send_discuss_msg_async(self, *, discuss_id, message, auto_escape=False): """ 发送讨论组消息 (异步版本) ------------ :param int discuss_id: 讨论组 ID(正常情况下看不到,需要从讨论组消息上报的数据中获得) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ...
def send_msg_async(self, *, message_type, user_id=None, group_id=None, discuss_id=None, message, auto_escape=False): """ 发送消息 (异步版本) ------------ :param str message_type: 消息类型,支持 `private`、`group`、`discuss`,分别对应私聊、群组、讨论组 :param int user_id: 对方 QQ 号(消息类型为 `private` 时需要) ...
def send_like(self, *, user_id, times=1): """ 发送好友赞 ------------ :param int user_id: 对方 QQ 号 :param int times: 赞的次数,每个好友每天最多 10 次 :return: None :rtype: None """ return super().__getattr__('send_like') \ (user_id=user_id, times=times)
def set_group_kick(self, *, group_id, user_id, reject_add_request=False): """ 群组踢人 ------------ :param int group_id: 群号 :param int user_id: 要踢的 QQ 号 :param bool reject_add_request: 拒绝此人的加群请求 :return: None :rtype: None """ return super()._...
def set_group_ban(self, *, group_id, user_id, duration=30 * 60): """ 群组单人禁言 ------------ :param int group_id: 群号 :param int user_id: 要禁言的 QQ 号 :param int duration: 禁言时长,单位秒,0 表示取消禁言 :return: None :rtype: None """ return super().__getattr_...
def set_group_anonymous_ban(self, *, group_id, flag, duration=30 * 60): """ 群组匿名用户禁言 ------------ :param int group_id: 群号 :param str flag: 要禁言的匿名用户的 flag(需从群消息上报的数据中获得) :param int duration: 禁言时长,单位秒,**无法取消匿名用户禁言** :return: None :rtype: None """ ...
def set_group_whole_ban(self, *, group_id, enable=True): """ 群组全员禁言 ------------ :param int group_id: 群号 :param bool enable: 是否禁言 :return: None :rtype: None """ return super().__getattr__('set_group_whole_ban') \ (group_id=group_id, e...
def set_group_admin(self, *, group_id, user_id, enable=True): """ 群组设置管理员 ------------ :param int group_id: 群号 :param user_id: 要设置管理员的 QQ 号 :param enable: True 为设置,False 为取消 :return: None :rtype: None """ return super().__getattr__('set_g...
def set_group_anonymous(self, *, group_id, enable=True): """ 群组匿名 ------------ :param int group_id: 群号 :param bool enable: 是否允许匿名聊天 :return: None :rtype: None """ return super().__getattr__('set_group_anonymous') \ (group_id=group_id,...
def set_group_card(self, *, group_id, user_id, card=None): """ 设置群名片(群备注) ------------ :param int group_id: 群号 :param int user_id: 要设置的 QQ 号 :param str | None card: 群名片内容,不填或空字符串表示删除群名片 :return: None :rtype: None """ return super().__geta...
def set_group_leave(self, *, group_id, is_dismiss=False): """ 退出群组 ------------ :param int group_id: 群号 :param bool is_dismiss: 是否解散,如果登录号是群主,则仅在此项为 true 时能够解散 :return: None :rtype: None """ return super().__getattr__('set_group_leave') \ ...
def set_group_special_title(self, *, group_id, user_id, special_title, duration=-1): """ 设置群组专属头衔 ------------ :param int group_id: 群号 :param int user_id: 要设置的 QQ 号 :param str special_title: 专属头衔,不填或空字符串表示删除专属头衔,只能保留前6个英文与汉字,Emoji 根据字符实际字符长度占用只能放最多3个甚至更少,超出长度部分会被截断 ...
def set_friend_add_request(self, *, flag, approve=True, remark=None): """ 处理加好友请求 ------------ :param str flag: 加好友请求的 flag(需从上报的数据中获得) :param bool approve: 是否同意请求 :param str remark: 添加后的好友备注(仅在同意时有效) :return: None :rtype: None """ return...
def set_group_add_request(self, *, flag, type, approve=True, reason=None): """ 处理加群请求、群组成员邀请 ------------ :param str flag: 加群请求的 flag(需从上报的数据中获得) :param str type: `add` 或 `invite`,请求类型(需要和上报消息中的 `sub_type` 字段相符) :param bool approve: 是否同意请求/邀请 :param str reason: ...
def get_stranger_info(self, *, user_id, no_cache=False): """ 获取陌生人信息 ------------ :param int user_id: QQ 号(不可以是登录号) :param bool no_cache: 是否不使用缓存(使用缓存可能更新不及时,但响应更快) :return: { "user_id": (QQ 号: int), "nickname": (昵称: str), "sex": (性别: str in ['male', 'female', 'unknown'...
def get_group_member_info(self, *, group_id, user_id, no_cache=False): """ 获取群成员信息 ------------ :param int group_id: 群号 :param int user_id: QQ 号(不可以是登录号) :param bool no_cache: 是否不使用缓存(使用缓存可能更新不及时,但响应更快) :return: { "group_id": (群号: int), "user_id": (QQ 号: int), "...
def get_record(self, *, file, out_format): """ 获取语音 ------------ :param str file: 收到的语音文件名,如 `0B38145AA44505000B38145AA4450500.silk` :param str out_format: 要转换到的格式,目前支持 `mp3`、`amr`、`wma`、`m4a`、`spx`、`ogg`、`wav`、`flac` :return: { "file": (转换后的语音文件名: str)} :rtype:...
def send(self, context, message, **kwargs): """ 便捷回复。会根据传入的context自动判断回复对象 ------------ :param dict context: 事件收到的content :return: None :rtype: None ------------ """ context = context.copy() context['message'] = message context.upda...
def toposort_flatten(data, sort=True): """Returns a single list of dependencies. For any set returned by toposort(), those items are sorted and appended to the result (just to make the results deterministic).""" result = [] for d in toposort(data): try: result.extend((sorted if sort els...
def _timezone_format(value): """ Generates a timezone aware datetime if the 'USE_TZ' setting is enabled :param value: The datetime value :return: A locale aware datetime """ return timezone.make_aware(value, timezone.get_current_timezone()) if getattr(settings, 'USE_TZ', False) else value
def guess_format(self, name): """ Returns a faker method based on the field's name :param name: """ name = name.lower() faker = self.faker if re.findall(r'^is[_A-Z]', name): return lambda x: faker.boolean() elif re.findall(r'(_a|A)t$', name): return lambda...
def guess_format(self, field): """ Returns the correct faker function based on the field type :param field: """ faker = self.faker provider = self.provider if isinstance(field, DurationField): return lambda x: provider.duration() if isinstance(field, UUID...
def guess_field_formatters(self, faker): """ Gets the formatter methods for each field using the guessers or related object fields :param faker: Faker factory object """ formatters = {} name_guesser = NameGuesser(faker) field_type_guesser = FieldTypeGuesse...
def execute(self, using, inserted_entities): """ Execute the stages entities to insert :param using: :param inserted_entities: """ def format_field(format, inserted_entities): if callable(format): return format(inserted_entities) r...
def add_entity(self, model, number, customFieldFormatters=None): """ Add an order for the generation of $number records for $entity. :param model: mixed A Django Model classname, or a faker.orm.django.EntitySeeder instance :type model: Model :param number: int The number...
def execute(self, using=None): """ Populate the database using all the Entity classes previously added. :param using A Django database connection name :rtype: A list of the inserted PKs """ if not using: using = self.get_connection() inserted_entitie...
def get_connection(self): """ use the first connection available :rtype: Connection """ klass = self.entities.keys() if not klass: message = 'No classed found. Did you add entities to the Seeder?' raise SeederException(message) klass = lis...
def _read(self, mux, gain, data_rate, mode): """Perform an ADC read with the provided mux, gain, data_rate, and mode values. Returns the signed integer result of the read. """ config = ADS1x15_CONFIG_OS_SINGLE # Go out of power-down mode for conversion. # Specify mux value. ...
def _read_comparator(self, mux, gain, data_rate, mode, high_threshold, low_threshold, active_low, traditional, latching, num_readings): """Perform an ADC read with the provided mux, gain, data_rate, and mode values and with the comparator enabled as spec...
def read_adc(self, channel, gain=1, data_rate=None): """Read a single ADC channel and return the ADC value as a signed integer result. Channel must be a value within 0-3. """ assert 0 <= channel <= 3, 'Channel must be a value within 0-3!' # Perform a single shot read and set the...
def read_adc_difference(self, differential, gain=1, data_rate=None): """Read the difference between two ADC channels and return the ADC value as a signed integer result. Differential must be one of: - 0 = Channel 0 minus channel 1 - 1 = Channel 0 minus channel 3 - 2 = Chan...
def start_adc(self, channel, gain=1, data_rate=None): """Start continuous ADC conversions on the specified channel (0-3). Will return an initial conversion result, then call the get_last_result() function to read the most recent conversion result. Call stop_adc() to stop conversions. ...
def start_adc_difference(self, differential, gain=1, data_rate=None): """Start continuous ADC conversions between two ADC channels. Differential must be one of: - 0 = Channel 0 minus channel 1 - 1 = Channel 0 minus channel 3 - 2 = Channel 1 minus channel 3 - 3 = C...
def start_adc_comparator(self, channel, high_threshold, low_threshold, gain=1, data_rate=None, active_low=True, traditional=True, latching=False, num_readings=1): """Start continuous ADC conversions on the specified channel (0-3) with the compara...
def start_adc_difference_comparator(self, differential, high_threshold, low_threshold, gain=1, data_rate=None, active_low=True, traditional=True, latching=False, num_readings=1): """Start continuous ADC conversions between two chann...
def get_last_result(self): """Read the last conversion result when in continuous conversion mode. Will return a signed integer value. """ # Retrieve the conversion register value, convert to a signed int, and # return it. result = self._device.readList(ADS1x15_POINTER_CON...
def remove_exited_dusty_containers(): """Removed all dusty containers with 'Exited' in their status""" client = get_docker_client() exited_containers = get_exited_dusty_containers() removed_containers = [] for container in exited_containers: log_to_client("Removing container {}".format(conta...
def remove_images(): """Removes all dangling images as well as all images referenced in a dusty spec; forceful removal is not used""" client = get_docker_client() removed = _remove_dangling_images() dusty_images = get_dusty_images() all_images = client.images(all=True) for image in all_images: ...
def update_nginx_from_config(nginx_config): """Write the given config to disk as a Dusty sub-config in the Nginx includes directory. Then, either start nginx or tell it to reload its config to pick up what we've just written.""" logging.info('Updating nginx with new Dusty config') temp_dir = tem...
def _compose_restart(services): """Well, this is annoying. Compose 1.2 shipped with the restart functionality fucking broken, so we can't set a faster timeout than 10 seconds (which is way too long) using Compose. We are therefore resigned to trying to hack this together ourselves. Lame. Releva...
def update_running_containers_from_spec(compose_config, recreate_containers=True): """Takes in a Compose spec from the Dusty Compose compiler, writes it to the Compose spec folder so Compose can pick it up, then does everything needed to make sure the Docker VM is up and running containers with the upda...
def resolve(cls, all_known_repos, name): """We require the list of all remote repo paths to be passed in to this because otherwise we would need to import the spec assembler in this module, which would give us circular imports.""" match = None for repo in all_known_repos: ...
def ensure_local_repo(self): """Given a Dusty repo object, clone the remote into Dusty's local repos directory if it does not already exist.""" if os.path.exists(self.managed_path): logging.debug('Repo {} already exists'.format(self.remote_path)) return logging.i...
def update_local_repo(self, force=False): """Given a remote path (e.g. github.com/gamechanger/gclib), pull the latest commits from master to bring the local copy up to date.""" self.ensure_local_repo() logging.info('Updating local repo {}'.format(self.remote_path)) managed_repo...
def update_local_repo_async(self, task_queue, force=False): """Local repo updating suitable for asynchronous, parallel execution. We still need to run `ensure_local_repo` synchronously because it does a bunch of non-threadsafe filesystem operations.""" self.ensure_local_repo() ta...
def nfs_path_exists(path): """ The normal HFS file system that your mac uses does not work the same way as the NFS file system. In HFS, capitalization does not matter, but in NFS it does. This function checks if a folder exists in HFS file system using NFS semantics (case sensitive)...
def update_managed_repos(force=False): """For any active, managed repos, update the Dusty-managed copy to bring it up to date with the latest master.""" log_to_client('Pulling latest updates for all active managed repos:') update_specs_repo_and_known_hosts() repos_to_update = get_all_repos(active_on...
def prep_for_start_local_env(pull_repos): """Daemon-side command to ensure we're running the latest versions of any managed repos, including the specs repo, before we do anything else in the up flow.""" if pull_repos: update_managed_repos(force=True) assembled_spec = spec_assembler.get_assem...
def log_in_to_required_registries(): """Client-side command which runs the user through a login flow (via the Docker command-line client so auth is persisted) for any registries of active images which require a login. This is based on the `image_requires_login` key in the individual specs.""" regist...
def start_local_env(recreate_containers): """This command will use the compilers to get compose specs will pass those specs to the systems that need them. Those systems will in turn launch the services needed to make the local environment go.""" assembled_spec = spec_assembler.get_assembled_specs()...
def stop_apps_or_services(app_or_service_names=None, rm_containers=False): """Stop any currently running Docker containers associated with Dusty, or associated with the provided apps_or_services. Does not remove the service's containers.""" if app_or_service_names: log_to_client("Stopping the fo...
def restart_apps_or_services(app_or_service_names=None): """Restart any containers associated with Dusty, or associated with the provided app_or_service_names.""" if app_or_service_names: log_to_client("Restarting the following apps or services: {}".format(', '.join(app_or_service_names))) else:...
def case_insensitive_rename(src, dst): """A hack to allow us to rename paths in a case-insensitive filesystem like HFS.""" temp_dir = tempfile.mkdtemp() shutil.rmtree(temp_dir) shutil.move(src, temp_dir) shutil.move(temp_dir, dst)
def _compose_dict_for_nginx(port_specs): """Return a dictionary containing the Compose spec required to run Dusty's nginx container used for host forwarding.""" spec = {'image': constants.NGINX_IMAGE, 'volumes': ['{}:{}'.format(constants.NGINX_CONFIG_DIR_IN_VM, constants.NGINX_CONFIG_DIR_IN_CONT...
def get_compose_dict(assembled_specs, port_specs): """ This function returns a dictionary representation of a docker-compose.yml file, based on assembled_specs from the spec_assembler, and port_specs from the port_spec compiler """ compose_dict = _compose_dict_for_nginx(port_specs) for app_name in assem...
def _conditional_links(assembled_specs, app_name): """ Given the assembled specs and app_name, this function will return all apps and services specified in 'conditional_links' if they are specified in 'apps' or 'services' in assembled_specs. That means that some other part of the system has declared them as...
def _get_build_path(app_spec): """ Given a spec for an app, returns the value of the `build` field for docker-compose. If the path is relative, it is expanded and added to the path of the app's repo. """ if os.path.isabs(app_spec['build']): return app_spec['build'] return os.path.join(Repo(app_s...
def _composed_app_dict(app_name, assembled_specs, port_specs): """ This function returns a dictionary of the docker-compose.yml specifications for one app """ logging.info("Compose Compiler: Compiling dict for app {}".format(app_name)) app_spec = assembled_specs['apps'][app_name] compose_dict = app_spec...
def _composed_service_dict(service_spec): """This function returns a dictionary of the docker_compose specifications for one service. Currently, this is just the Dusty service spec with an additional volume mount to support Dusty's cp functionality.""" compose_dict = service_spec.plain_dict() _apply...
def _get_ports_list(app_name, port_specs): """ Returns a list of formatted port mappings for an app """ if app_name not in port_specs['docker_compose']: return [] return ["{}:{}".format(port_spec['mapped_host_port'], port_spec['in_container_port']) for port_spec in port_specs['docker_com...
def _get_compose_volumes(app_name, assembled_specs): """ This returns formatted volume specifications for a docker-compose app. We mount the app as well as any libs it needs so that local code is used in our container, instead of whatever code was in the docker image. Additionally, we create a volume f...
def validate_specs_from_path(specs_path): """ Validates Dusty specs at the given path. The following checks are performed: -That the given path exists -That there are bundles in the given path -That the fields in the specs match those allowed in our schemas -That references to ap...
def _env_vars_from_file(filename): """ This code is copied from Docker Compose, so that we're exactly compatible with their `env_file` option """ def split_env(env): if '=' in env: return env.split('=', 1) else: return env, None env = {} for line in op...
def _get_dependent(dependent_type, name, specs, root_spec_type): """ Returns everything of type <dependent_type> that <name>, of type <root_spec_type> depends on Names only are returned in a set """ spec = specs[root_spec_type].get(name) if spec is None: raise RuntimeError("{} {} was ref...
def _get_referenced_apps(specs): """ Returns a set of all apps that are required to run any bundle in specs[constants.CONFIG_BUNDLES_KEY] """ activated_bundles = specs[constants.CONFIG_BUNDLES_KEY].keys() all_active_apps = set() for active_bundle in activated_bundles: bundle_spec = specs...
def _expand_libs_in_apps(specs): """ Expands specs.apps.depends.libs to include any indirectly required libs """ for app_name, app_spec in specs['apps'].iteritems(): if 'depends' in app_spec and 'libs' in app_spec['depends']: app_spec['depends']['libs'] = _get_dependent('libs', app_n...
def _expand_libs_in_libs(specs): """ Expands specs.libs.depends.libs to include any indirectly required libs """ for lib_name, lib_spec in specs['libs'].iteritems(): if 'depends' in lib_spec and 'libs' in lib_spec['depends']: lib_spec['depends']['libs'] = _get_dependent('libs', lib_n...
def _get_referenced_libs(specs): """ Returns all libs that are referenced in specs.apps.depends.libs """ active_libs = set() for app_spec in specs['apps'].values(): for lib in app_spec['depends']['libs']: active_libs.add(lib) return active_libs
def _get_referenced_services(specs): """ Returns all services that are referenced in specs.apps.depends.services, or in specs.bundles.services """ active_services = set() for app_spec in specs['apps'].values(): for service in app_spec['depends']['services']: active_services.a...
def _add_active_assets(specs): """ This function adds an assets key to the specs, which is filled in with a dictionary of all assets defined by apps and libs in the specs """ specs['assets'] = {} for spec in specs.get_apps_and_libs(): for asset in spec['assets']: if not specs...
def _get_expanded_active_specs(specs): """ This function removes any unnecessary bundles, apps, libs, and services that aren't needed by the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed indirectly by each app """ _filter_active(constants.CONFIG_BUND...
def get_repo_of_app_or_library(app_or_library_name): """ This function takes an app or library name and will return the corresponding repo for that app or library""" specs = get_specs() repo_name = specs.get_app_or_lib(app_or_library_name)['repo'] if not repo_name: return None return Rep...
def get_same_container_repos_from_spec(app_or_library_spec): """Given the spec of an app or library, returns all repos that are guaranteed to live in the same container""" repos = set() app_or_lib_repo = get_repo_of_app_or_library(app_or_library_spec.name) if app_or_lib_repo is not None: rep...
def get_same_container_repos(app_or_library_name): """Given the name of an app or library, returns all repos that are guaranteed to live in the same container""" specs = get_expanded_libs_specs() spec = specs.get_app_or_lib(app_or_library_name) return get_same_container_repos_from_spec(spec)
def _dusty_hosts_config(hosts_specs): """Return a string of all host rules required to match the given spec. This string is wrapped in the Dusty hosts header and footer so it can be easily removed later.""" rules = ''.join(['{} {}\n'.format(spec['forwarded_ip'], spec['host_address']) for spec in hosts_...
def update_hosts_file_from_port_spec(port_spec): """Given a port spec, update the hosts file specified at constants.HOST_PATH to contain the port mappings specified in the spec. Any existing Dusty configurations are replaced.""" logging.info('Updating hosts file to match port spec') hosts_specs = po...
def _move_temp_binary_to_path(tmp_binary_path): """Moves the temporary binary to the location of the binary that's currently being run. Preserves owner, group, and permissions of original binary""" # pylint: disable=E1101 binary_path = _get_binary_location() if not binary_path.endswith(constants.DUS...
def parallel_task_queue(pool_size=multiprocessing.cpu_count()): """Context manager for setting up a TaskQueue. Upon leaving the context manager, all tasks that were enqueued will be executed in parallel subject to `pool_size` concurrency constraints.""" task_queue = TaskQueue(pool_size) yield task_q...
def _nginx_location_spec(port_spec, bridge_ip): """This will output the nginx location config string for specific port spec """ location_string_spec = "\t \t location / { \n" for location_setting in ['proxy_http_version 1.1;', 'proxy_set_header Upgrade $http_upgrade;', ...
def _nginx_http_spec(port_spec, bridge_ip): """This will output the nginx HTTP config string for specific port spec """ server_string_spec = "\t server {\n" server_string_spec += "\t \t {}\n".format(_nginx_max_file_size_string()) server_string_spec += "\t \t {}\n".format(_nginx_listen_string(port_spec))...