signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def PluginAssets(self, plugin_name):
with self._accumulators_mutex:<EOL><INDENT>items = list(six.iteritems(self._accumulators))<EOL><DEDENT>return {run: accum.PluginAssets(plugin_name) for run, accum in items}<EOL>
Get index of runs and assets for a given plugin. Args: plugin_name: Name of the plugin we are checking for. Returns: A dictionary that maps from run_name to a list of plugin assets for that run.
f8091:c0:m4
def RetrievePluginAsset(self, run, plugin_name, asset_name):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.RetrievePluginAsset(plugin_name, asset_name)<EOL>
Return the contents for a specific plugin asset from a run. Args: run: The string name of the run. plugin_name: The string name of a plugin. asset_name: The string name of an asset. Returns: The string contents of the plugin asset. Raises: KeyError: If the asset is not available.
f8091:c0:m5
def FirstEventTimestamp(self, run):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.FirstEventTimestamp()<EOL>
Return the timestamp of the first event of the given run. This may perform I/O if no events have been loaded yet for the run. Args: run: A string name of the run for which the timestamp is retrieved. Returns: The wall_time of the first event of the run, which will typically be seconds since the epoch. Raises: KeyError: If the run is not found. ValueError: If the run has no events loaded and there are no events on disk to load.
f8091:c0:m6
def Scalars(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.Scalars(tag)<EOL>
Retrieve the scalar events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ScalarEvents`.
f8091:c0:m7
def Graph(self, run):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.Graph()<EOL>
Retrieve the graph associated with the provided run. Args: run: A string name of a run to load the graph for. Raises: KeyError: If the run is not found. ValueError: If the run does not have an associated graph. Returns: The `GraphDef` protobuf data structure.
f8091:c0:m8
def MetaGraph(self, run):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.MetaGraph()<EOL>
Retrieve the metagraph associated with the provided run. Args: run: A string name of a run to load the graph for. Raises: KeyError: If the run is not found. ValueError: If the run does not have an associated graph. Returns: The `MetaGraphDef` protobuf data structure.
f8091:c0:m9
def RunMetadata(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.RunMetadata(tag)<EOL>
Get the session.run() metadata associated with a TensorFlow run and tag. Args: run: A string name of a TensorFlow run. tag: A string name of the tag associated with a particular session.run(). Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: The metadata in the form of `RunMetadata` protobuf data structure.
f8091:c0:m10
def Histograms(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.Histograms(tag)<EOL>
Retrieve the histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.HistogramEvents`.
f8091:c0:m11
def CompressedHistograms(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.CompressedHistograms(tag)<EOL>
Retrieve the compressed histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.CompressedHistogramEvents`.
f8091:c0:m12
def Images(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.Images(tag)<EOL>
Retrieve the image events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ImageEvents`.
f8091:c0:m13
def Audio(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.Audio(tag)<EOL>
Retrieve the audio events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.AudioEvents`.
f8091:c0:m14
def Tensors(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.Tensors(tag)<EOL>
Retrieve the tensor events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.TensorEvent`s.
f8091:c0:m15
def PluginRunToTagToContent(self, plugin_name):
mapping = {}<EOL>for run in self.Runs():<EOL><INDENT>try:<EOL><INDENT>tag_to_content = self.GetAccumulator(run).PluginTagToContent(<EOL>plugin_name)<EOL><DEDENT>except KeyError:<EOL><INDENT>continue<EOL><DEDENT>mapping[run] = tag_to_content<EOL><DEDENT>return mapping<EOL>
Returns a 2-layer dictionary of the form {run: {tag: content}}. The `content` referred above is the content field of the PluginData proto for the specified plugin within a Summary.Value proto. Args: plugin_name: The name of the plugin for which to fetch content. Returns: A dictionary of the form {run: {tag: content}}.
f8091:c0:m16
def SummaryMetadata(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.SummaryMetadata(tag)<EOL>
Return the summary metadata for the given tag on the given run. Args: run: A string name of the run for which summary metadata is to be retrieved. tag: A string name of the tag whose summary metadata is to be retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: A `SummaryMetadata` protobuf.
f8091:c0:m17
def Runs(self):
with self._accumulators_mutex:<EOL><INDENT>items = list(six.iteritems(self._accumulators))<EOL><DEDENT>return {run_name: accumulator.Tags() for run_name, accumulator in items}<EOL>
Return all the run names in the `EventMultiplexer`. Returns: ``` {runName: { images: [tag1, tag2, tag3], scalarValues: [tagA, tagB, tagC], histograms: [tagX, tagY, tagZ], compressedHistograms: [tagX, tagY, tagZ], graph: true, meta_graph: true}} ```
f8091:c0:m18
def RunPaths(self):
return self._paths<EOL>
Returns a dict mapping run names to event file paths.
f8091:c0:m19
def GetAccumulator(self, run):
with self._accumulators_mutex:<EOL><INDENT>return self._accumulators[run]<EOL><DEDENT>
Returns EventAccumulator for a given run. Args: run: String name of run. Returns: An EventAccumulator object. Raises: KeyError: If run does not exist.
f8091:c0:m20
def add_event(self, event):
self.AddEvent(event)<EOL>
Match the EventWriter API.
f8092:c0:m7
def get_logdir(self):
return self._testcase.get_temp_dir()<EOL>
Return a temp directory for asset writing.
f8092:c0:m8
def close(self):
<EOL>
Closes the event writer
f8092:c0:m9
def assertTagsEqual(self, actual, expected):
empty_tags = {<EOL>ea.IMAGES: [],<EOL>ea.AUDIO: [],<EOL>ea.SCALARS: [],<EOL>ea.HISTOGRAMS: [],<EOL>ea.COMPRESSED_HISTOGRAMS: [],<EOL>ea.GRAPH: False,<EOL>ea.META_GRAPH: False,<EOL>ea.RUN_METADATA: [],<EOL>ea.TENSORS: [],<EOL>}<EOL>self.assertItemsEqual(actual.keys(), empty_tags.keys())<EOL>for key in actual:<EOL><INDENT>expected_value = expected.get(key, empty_tags[key])<EOL>if isinstance(expected_value, list):<EOL><INDENT>self.assertItemsEqual(actual[key], expected_value)<EOL><DEDENT>else:<EOL><INDENT>self.assertEqual(actual[key], expected_value)<EOL><DEDENT><DEDENT>
Utility method for checking the return value of the Tags() call. It fills out the `expected` arg with the default (empty) values for every tag type, so that the author needs only specify the non-empty values they are interested in testing. Args: actual: The actual Accumulator tags response. expected: The expected tags response (empty fields may be omitted)
f8092:c1:m0
def _writeMetadata(self, logdir, summary_metadata, nonce='<STR_LIT>'):
summary = summary_pb2.Summary()<EOL>summary.value.add(<EOL>tensor=tensor_util.make_tensor_proto(['<STR_LIT>', '<STR_LIT>', '<STR_LIT:to>'], dtype=tf.string),<EOL>tag='<STR_LIT>',<EOL>metadata=summary_metadata)<EOL>writer = test_util.FileWriter(logdir, filename_suffix=nonce)<EOL>writer.add_summary(summary.SerializeToString())<EOL>writer.close()<EOL>
Write to disk a summary with the given metadata. Arguments: logdir: a string summary_metadata: a `SummaryMetadata` protobuf object nonce: optional; will be added to the end of the event file name to guarantee that multiple calls to this function do not stomp the same file
f8092:c3:m2
def Load(self):
logger.debug('<STR_LIT>', self._file_path)<EOL>get_next_args = inspect.getargspec(self._reader.GetNext).args <EOL>legacy_get_next = (len(get_next_args) > <NUM_LIT:1>)<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>if legacy_get_next:<EOL><INDENT>with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status:<EOL><INDENT>self._reader.GetNext(status)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._reader.GetNext()<EOL><DEDENT><DEDENT>except (tf.errors.DataLossError, tf.errors.OutOfRangeError) as e:<EOL><INDENT>logger.debug('<STR_LIT>', e)<EOL>break<EOL><DEDENT>yield self._reader.record()<EOL><DEDENT>logger.debug('<STR_LIT>', self._file_path)<EOL>
Loads all new events from disk as raw serialized proto bytestrings. Calling Load multiple times in a row will not 'drop' events as long as the return value is not iterated over. Yields: All event proto bytestrings in the file that have not been yielded yet.
f8094:c0:m1
def Load(self):
for record in super(EventFileLoader, self).Load():<EOL><INDENT>yield event_pb2.Event.FromString(record)<EOL><DEDENT>
Loads all new events from disk. Calling Load multiple times in a row will not 'drop' events as long as the return value is not iterated over. Yields: All events in the file that have not been yielded yet.
f8094:c1:m0
def __init__(self, path):
self._path = path<EOL>self.reload_called = False<EOL>self._plugin_to_tag_to_content = {<EOL>'<STR_LIT>': {<EOL>'<STR_LIT:foo>': '<STR_LIT>',<EOL>'<STR_LIT:bar>': '<STR_LIT>',<EOL>}<EOL>}<EOL>
Constructs a fake accumulator with some fake events. Args: path: The path for the run that this accumulator is for.
f8095:c0:m0
def _LoadAllEvents(self):
for _ in self._watcher.Load():<EOL><INDENT>pass<EOL><DEDENT>
Loads all events in the watcher.
f8096:c1:m3
def __init__(self,<EOL>run_path_map=None,<EOL>size_guidance=None,<EOL>tensor_size_guidance=None,<EOL>purge_orphaned_data=True,<EOL>max_reload_threads=None):
logger.info('<STR_LIT>')<EOL>self._accumulators_mutex = threading.Lock()<EOL>self._accumulators = {}<EOL>self._paths = {}<EOL>self._reload_called = False<EOL>self._size_guidance = (size_guidance or<EOL>event_accumulator.DEFAULT_SIZE_GUIDANCE)<EOL>self._tensor_size_guidance = tensor_size_guidance<EOL>self.purge_orphaned_data = purge_orphaned_data<EOL>self._max_reload_threads = max_reload_threads or <NUM_LIT:1><EOL>if run_path_map is not None:<EOL><INDENT>logger.info('<STR_LIT>',<EOL>run_path_map)<EOL>for (run, path) in six.iteritems(run_path_map):<EOL><INDENT>self.AddRun(path, run)<EOL><DEDENT><DEDENT>logger.info('<STR_LIT>')<EOL>
Constructor for the `EventMultiplexer`. Args: run_path_map: Dict `{run: path}` which specifies the name of a run, and the path to find the associated events. If it is None, then the EventMultiplexer initializes without any runs. size_guidance: A dictionary mapping from `tagType` to the number of items to store for each tag of that type. See `event_accumulator.EventAccumulator` for details. tensor_size_guidance: A dictionary mapping from `plugin_name` to the number of items to store for each tag of that type. See `event_accumulator.EventAccumulator` for details. purge_orphaned_data: Whether to discard any events that were "orphaned" by a TensorFlow restart. max_reload_threads: The max number of threads that TensorBoard can use to reload runs. Each thread reloads one run at a time. If not provided, reloads runs serially (one after another).
f8098:c0:m0
def AddRun(self, path, name=None):
name = name or path<EOL>accumulator = None<EOL>with self._accumulators_mutex:<EOL><INDENT>if name not in self._accumulators or self._paths[name] != path:<EOL><INDENT>if name in self._paths and self._paths[name] != path:<EOL><INDENT>logger.warn('<STR_LIT>',<EOL>name, self._paths[name], path)<EOL><DEDENT>logger.info('<STR_LIT>', path)<EOL>accumulator = event_accumulator.EventAccumulator(<EOL>path,<EOL>size_guidance=self._size_guidance,<EOL>tensor_size_guidance=self._tensor_size_guidance,<EOL>purge_orphaned_data=self.purge_orphaned_data)<EOL>self._accumulators[name] = accumulator<EOL>self._paths[name] = path<EOL><DEDENT><DEDENT>if accumulator:<EOL><INDENT>if self._reload_called:<EOL><INDENT>accumulator.Reload()<EOL><DEDENT><DEDENT>return self<EOL>
Add a run to the multiplexer. If the name is not specified, it is the same as the path. If a run by that name exists, and we are already watching the right path, do nothing. If we are watching a different path, replace the event accumulator. If `Reload` has been called, it will `Reload` the newly created accumulators. Args: path: Path to the event files (or event directory) for given run. name: Name of the run to add. If not provided, is set to path. Returns: The `EventMultiplexer`.
f8098:c0:m1
def AddRunsFromDirectory(self, path, name=None):
logger.info('<STR_LIT>', path)<EOL>for subdir in io_wrapper.GetLogdirSubdirectories(path):<EOL><INDENT>logger.info('<STR_LIT>', subdir)<EOL>rpath = os.path.relpath(subdir, path)<EOL>subname = os.path.join(name, rpath) if name else rpath<EOL>self.AddRun(subdir, name=subname)<EOL><DEDENT>logger.info('<STR_LIT>', path)<EOL>return self<EOL>
Load runs from a directory; recursively walks subdirectories. If path doesn't exist, no-op. This ensures that it is safe to call `AddRunsFromDirectory` multiple times, even before the directory is made. If path is a directory, load event files in the directory (if any exist) and recursively call AddRunsFromDirectory on any subdirectories. This mean you can call AddRunsFromDirectory at the root of a tree of event logs and TensorBoard will load them all. If the `EventMultiplexer` is already loaded this will cause the newly created accumulators to `Reload()`. Args: path: A string path to a directory to load runs from. name: Optionally, what name to apply to the runs. If name is provided and the directory contains run subdirectories, the name of each subrun is the concatenation of the parent name and the subdirectory name. If name is provided and the directory contains event files, then a run is added called "name" and with the events from the path. Raises: ValueError: If the path exists and isn't a directory. Returns: The `EventMultiplexer`.
f8098:c0:m2
def Reload(self):
logger.info('<STR_LIT>')<EOL>self._reload_called = True<EOL>with self._accumulators_mutex:<EOL><INDENT>items = list(self._accumulators.items())<EOL><DEDENT>items_queue = queue.Queue()<EOL>for item in items:<EOL><INDENT>items_queue.put(item)<EOL><DEDENT>names_to_delete = set()<EOL>names_to_delete_mutex = threading.Lock()<EOL>def Worker():<EOL><INDENT>"""<STR_LIT>"""<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>name, accumulator = items_queue.get(block=False)<EOL><DEDENT>except queue.Empty:<EOL><INDENT>break<EOL><DEDENT>try:<EOL><INDENT>accumulator.Reload()<EOL><DEDENT>except (OSError, IOError) as e:<EOL><INDENT>logger.error('<STR_LIT>', name, e)<EOL><DEDENT>except directory_watcher.DirectoryDeletedError:<EOL><INDENT>with names_to_delete_mutex:<EOL><INDENT>names_to_delete.add(name)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>items_queue.task_done()<EOL><DEDENT><DEDENT><DEDENT>if self._max_reload_threads > <NUM_LIT:1>:<EOL><INDENT>num_threads = min(<EOL>self._max_reload_threads, len(items))<EOL>logger.info('<STR_LIT>', num_threads)<EOL>for i in xrange(num_threads):<EOL><INDENT>thread = threading.Thread(target=Worker, name='<STR_LIT>' % i)<EOL>thread.daemon = True<EOL>thread.start()<EOL><DEDENT>items_queue.join()<EOL><DEDENT>else:<EOL><INDENT>logger.info(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>Worker()<EOL><DEDENT>with self._accumulators_mutex:<EOL><INDENT>for name in names_to_delete:<EOL><INDENT>logger.warn('<STR_LIT>', name)<EOL>del self._accumulators[name]<EOL><DEDENT><DEDENT>logger.info('<STR_LIT>')<EOL>return self<EOL>
Call `Reload` on every `EventAccumulator`.
f8098:c0:m3
def PluginAssets(self, plugin_name):
with self._accumulators_mutex:<EOL><INDENT>items = list(six.iteritems(self._accumulators))<EOL><DEDENT>return {run: accum.PluginAssets(plugin_name) for run, accum in items}<EOL>
Get index of runs and assets for a given plugin. Args: plugin_name: Name of the plugin we are checking for. Returns: A dictionary that maps from run_name to a list of plugin assets for that run.
f8098:c0:m4
def RetrievePluginAsset(self, run, plugin_name, asset_name):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.RetrievePluginAsset(plugin_name, asset_name)<EOL>
Return the contents for a specific plugin asset from a run. Args: run: The string name of the run. plugin_name: The string name of a plugin. asset_name: The string name of an asset. Returns: The string contents of the plugin asset. Raises: KeyError: If the asset is not available.
f8098:c0:m5
def FirstEventTimestamp(self, run):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.FirstEventTimestamp()<EOL>
Return the timestamp of the first event of the given run. This may perform I/O if no events have been loaded yet for the run. Args: run: A string name of the run for which the timestamp is retrieved. Returns: The wall_time of the first event of the run, which will typically be seconds since the epoch. Raises: KeyError: If the run is not found. ValueError: If the run has no events loaded and there are no events on disk to load.
f8098:c0:m6
def Scalars(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.Scalars(tag)<EOL>
Retrieve the scalar events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ScalarEvents`.
f8098:c0:m7
def Graph(self, run):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.Graph()<EOL>
Retrieve the graph associated with the provided run. Args: run: A string name of a run to load the graph for. Raises: KeyError: If the run is not found. ValueError: If the run does not have an associated graph. Returns: The `GraphDef` protobuf data structure.
f8098:c0:m8
def MetaGraph(self, run):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.MetaGraph()<EOL>
Retrieve the metagraph associated with the provided run. Args: run: A string name of a run to load the graph for. Raises: KeyError: If the run is not found. ValueError: If the run does not have an associated graph. Returns: The `MetaGraphDef` protobuf data structure.
f8098:c0:m9
def RunMetadata(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.RunMetadata(tag)<EOL>
Get the session.run() metadata associated with a TensorFlow run and tag. Args: run: A string name of a TensorFlow run. tag: A string name of the tag associated with a particular session.run(). Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: The metadata in the form of `RunMetadata` protobuf data structure.
f8098:c0:m10
def Audio(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.Audio(tag)<EOL>
Retrieve the audio events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.AudioEvents`.
f8098:c0:m11
def Tensors(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.Tensors(tag)<EOL>
Retrieve the tensor events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.TensorEvent`s.
f8098:c0:m12
def PluginRunToTagToContent(self, plugin_name):
mapping = {}<EOL>for run in self.Runs():<EOL><INDENT>try:<EOL><INDENT>tag_to_content = self.GetAccumulator(run).PluginTagToContent(<EOL>plugin_name)<EOL><DEDENT>except KeyError:<EOL><INDENT>continue<EOL><DEDENT>mapping[run] = tag_to_content<EOL><DEDENT>return mapping<EOL>
Returns a 2-layer dictionary of the form {run: {tag: content}}. The `content` referred above is the content field of the PluginData proto for the specified plugin within a Summary.Value proto. Args: plugin_name: The name of the plugin for which to fetch content. Returns: A dictionary of the form {run: {tag: content}}.
f8098:c0:m13
def SummaryMetadata(self, run, tag):
accumulator = self.GetAccumulator(run)<EOL>return accumulator.SummaryMetadata(tag)<EOL>
Return the summary metadata for the given tag on the given run. Args: run: A string name of the run for which summary metadata is to be retrieved. tag: A string name of the tag whose summary metadata is to be retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: A `SummaryMetadata` protobuf.
f8098:c0:m14
def Runs(self):
with self._accumulators_mutex:<EOL><INDENT>items = list(six.iteritems(self._accumulators))<EOL><DEDENT>return {run_name: accumulator.Tags() for run_name, accumulator in items}<EOL>
Return all the run names in the `EventMultiplexer`. Returns: ``` {runName: { scalarValues: [tagA, tagB, tagC], graph: true, meta_graph: true}} ```
f8098:c0:m15
def RunPaths(self):
return self._paths<EOL>
Returns a dict mapping run names to event file paths.
f8098:c0:m16
def GetAccumulator(self, run):
with self._accumulators_mutex:<EOL><INDENT>return self._accumulators[run]<EOL><DEDENT>
Returns EventAccumulator for a given run. Args: run: String name of run. Returns: An EventAccumulator object. Raises: KeyError: If run does not exist.
f8098:c0:m17
def initialize_schema(connection):
cursor = connection.cursor()<EOL>cursor.execute("<STR_LIT>".format(_TENSORBOARD_APPLICATION_ID))<EOL>cursor.execute("<STR_LIT>".format(_TENSORBOARD_USER_VERSION))<EOL>with connection:<EOL><INDENT>for statement in _SCHEMA_STATEMENTS:<EOL><INDENT>lines = statement.strip('<STR_LIT:\n>').split('<STR_LIT:\n>')<EOL>message = lines[<NUM_LIT:0>] + ('<STR_LIT>' if len(lines) > <NUM_LIT:1> else '<STR_LIT>')<EOL>logger.debug('<STR_LIT>', message)<EOL>cursor.execute(statement)<EOL><DEDENT><DEDENT>
Initializes the TensorBoard sqlite schema using the given connection. Args: connection: A sqlite DB connection.
f8100:m0
def __init__(self, db_connection_provider):
self._db = db_connection_provider()<EOL>
Constructs a SqliteWriterEventSink. Args: db_connection_provider: Provider function for creating a DB connection.
f8100:c0:m0
def _create_id(self):
cursor = self._db.cursor()<EOL>cursor.execute('<STR_LIT>')<EOL>return cursor.lastrowid<EOL>
Returns a freshly created DB-wide unique ID.
f8100:c0:m2
def _maybe_init_user(self):
user_name = os.environ.get('<STR_LIT>', '<STR_LIT>') or os.environ.get('<STR_LIT>', '<STR_LIT>')<EOL>cursor = self._db.cursor()<EOL>cursor.execute('<STR_LIT>',<EOL>(user_name,))<EOL>row = cursor.fetchone()<EOL>if row:<EOL><INDENT>return row[<NUM_LIT:0>]<EOL><DEDENT>user_id = self._create_id()<EOL>cursor.execute(<EOL>"""<STR_LIT>""",<EOL>(user_id, user_name, time.time()))<EOL>return user_id<EOL>
Returns the ID for the current user, creating the row if needed.
f8100:c0:m3
def _maybe_init_experiment(self, experiment_name):
user_id = self._maybe_init_user()<EOL>cursor = self._db.cursor()<EOL>cursor.execute(<EOL>"""<STR_LIT>""",<EOL>(user_id, experiment_name))<EOL>row = cursor.fetchone()<EOL>if row:<EOL><INDENT>return row[<NUM_LIT:0>]<EOL><DEDENT>experiment_id = self._create_id()<EOL>computed_time = <NUM_LIT:0><EOL>cursor.execute(<EOL>"""<STR_LIT>""",<EOL>(user_id, experiment_id, experiment_name, time.time(), computed_time,<EOL>False))<EOL>return experiment_id<EOL>
Returns the ID for the given experiment, creating the row if needed. Args: experiment_name: name of experiment.
f8100:c0:m4
def _maybe_init_run(self, experiment_name, run_name):
experiment_id = self._maybe_init_experiment(experiment_name)<EOL>cursor = self._db.cursor()<EOL>cursor.execute(<EOL>"""<STR_LIT>""",<EOL>(experiment_id, run_name))<EOL>row = cursor.fetchone()<EOL>if row:<EOL><INDENT>return row[<NUM_LIT:0>]<EOL><DEDENT>run_id = self._create_id()<EOL>started_time = <NUM_LIT:0><EOL>cursor.execute(<EOL>"""<STR_LIT>""",<EOL>(experiment_id, run_id, run_name, time.time(), started_time))<EOL>return run_id<EOL>
Returns the ID for the given run, creating the row if needed. Args: experiment_name: name of experiment containing this run. run_name: name of run.
f8100:c0:m5
def _maybe_init_tags(self, run_id, tag_to_metadata):
cursor = self._db.cursor()<EOL>cursor.execute('<STR_LIT>',<EOL>(run_id,))<EOL>tag_to_id = {row[<NUM_LIT:0>]: row[<NUM_LIT:1>] for row in cursor.fetchall()<EOL>if row[<NUM_LIT:0>] in tag_to_metadata}<EOL>new_tag_data = []<EOL>for tag, metadata in six.iteritems(tag_to_metadata):<EOL><INDENT>if tag not in tag_to_id:<EOL><INDENT>tag_id = self._create_id()<EOL>tag_to_id[tag] = tag_id<EOL>new_tag_data.append((run_id, tag_id, tag, time.time(),<EOL>metadata.display_name,<EOL>metadata.plugin_data.plugin_name,<EOL>self._make_blob(metadata.plugin_data.content)))<EOL><DEDENT><DEDENT>cursor.executemany(<EOL>"""<STR_LIT>""",<EOL>new_tag_data)<EOL>return tag_to_id<EOL>
Returns a tag-to-ID map for the given tags, creating rows if needed. Args: run_id: the ID of the run to which these tags belong. tag_to_metadata: map of tag name to SummaryMetadata for the tag.
f8100:c0:m6
def write_summaries(self, tagged_data, experiment_name, run_name):
logger.debug('<STR_LIT>', len(tagged_data))<EOL>with self._db:<EOL><INDENT>self._db.execute('<STR_LIT>')<EOL>run_id = self._maybe_init_run(experiment_name, run_name)<EOL>tag_to_metadata = {<EOL>tag: tagdata.metadata for tag, tagdata in six.iteritems(tagged_data)<EOL>}<EOL>tag_to_id = self._maybe_init_tags(run_id, tag_to_metadata)<EOL>tensor_values = []<EOL>for tag, tagdata in six.iteritems(tagged_data):<EOL><INDENT>tag_id = tag_to_id[tag]<EOL>for step, wall_time, tensor_proto in tagdata.values:<EOL><INDENT>dtype = tensor_proto.dtype<EOL>shape = '<STR_LIT:U+002C>'.join(str(d.size) for d in tensor_proto.tensor_shape.dim)<EOL>data = self._make_blob(<EOL>tensor_proto.tensor_content or<EOL>tensor_util.make_ndarray(tensor_proto).tobytes())<EOL>tensor_values.append((tag_id, step, wall_time, dtype, shape, data))<EOL><DEDENT><DEDENT>self._db.executemany(<EOL>"""<STR_LIT>""",<EOL>tensor_values)<EOL><DEDENT>
Transactionally writes the given tagged summary data to the DB. Args: tagged_data: map from tag to TagData instances. experiment_name: name of experiment. run_name: name of run.
f8100:c0:m7
def __init__(self,<EOL>db_connection_provider,<EOL>purge_orphaned_data,<EOL>max_reload_threads,<EOL>use_import_op):
logger.info('<STR_LIT>')<EOL>self._db_connection_provider = db_connection_provider<EOL>self._purge_orphaned_data = purge_orphaned_data<EOL>self._max_reload_threads = max_reload_threads<EOL>self._use_import_op = use_import_op<EOL>self._event_sink = None<EOL>self._run_loaders = {}<EOL>if self._purge_orphaned_data:<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>')<EOL><DEDENT>conn = self._db_connection_provider()<EOL>rows = conn.execute('<STR_LIT>').fetchall()<EOL>db_name_to_path = {row[<NUM_LIT:1>]: row[<NUM_LIT:2>] for row in rows}<EOL>self._db_path = db_name_to_path['<STR_LIT>']<EOL>logger.info('<STR_LIT>', self._db_path)<EOL>conn.execute('<STR_LIT>')<EOL>conn.execute('<STR_LIT>') <EOL>sqlite_writer.initialize_schema(conn)<EOL>logger.info('<STR_LIT>')<EOL>
Constructor for `DbImportMultiplexer`. Args: db_connection_provider: Provider function for creating a DB connection. purge_orphaned_data: Whether to discard any events that were "orphaned" by a TensorFlow restart. max_reload_threads: The max number of threads that TensorBoard can use to reload runs. Each thread reloads one run at a time. If not provided, reloads runs serially (one after another). use_import_op: If True, use TensorFlow's import_event() op for imports, otherwise use TensorBoard's own sqlite ingestion logic.
f8101:c0:m0
def AddRunsFromDirectory(self, path, name=None):
logger.info('<STR_LIT>', path, name)<EOL>for subdir in io_wrapper.GetLogdirSubdirectories(path):<EOL><INDENT>logger.info('<STR_LIT>', subdir)<EOL>if subdir not in self._run_loaders:<EOL><INDENT>logger.info('<STR_LIT>', subdir)<EOL>names = self._get_exp_and_run_names(path, subdir, name)<EOL>experiment_name, run_name = names<EOL>self._run_loaders[subdir] = _RunLoader(<EOL>subdir=subdir,<EOL>experiment_name=experiment_name,<EOL>run_name=run_name)<EOL><DEDENT><DEDENT>logger.info('<STR_LIT>', path)<EOL>
Load runs from a directory; recursively walks subdirectories. If path doesn't exist, no-op. This ensures that it is safe to call `AddRunsFromDirectory` multiple times, even before the directory is made. Args: path: A string path to a directory to load runs from. name: Optional, specifies a name for the experiment under which the runs from this directory hierarchy will be imported. If omitted, the path will be used as the name. Raises: ValueError: If the path exists and isn't a directory.
f8101:c0:m2
def Reload(self):
logger.info('<STR_LIT>')<EOL>if not self._event_sink:<EOL><INDENT>self._event_sink = self._CreateEventSink()<EOL><DEDENT>loader_queue = collections.deque(six.itervalues(self._run_loaders))<EOL>loader_delete_queue = collections.deque()<EOL>def batch_generator():<EOL><INDENT>while True:<EOL><INDENT>try:<EOL><INDENT>loader = loader_queue.popleft()<EOL><DEDENT>except IndexError:<EOL><INDENT>return<EOL><DEDENT>try:<EOL><INDENT>for batch in loader.load_batches():<EOL><INDENT>yield batch<EOL><DEDENT><DEDENT>except directory_watcher.DirectoryDeletedError:<EOL><INDENT>loader_delete_queue.append(loader)<EOL><DEDENT>except (OSError, IOError) as e:<EOL><INDENT>logger.error('<STR_LIT>', loader.subdir, e)<EOL><DEDENT><DEDENT><DEDENT>num_threads = min(self._max_reload_threads, len(self._run_loaders))<EOL>if num_threads <= <NUM_LIT:1>:<EOL><INDENT>logger.info('<STR_LIT>')<EOL>for batch in batch_generator():<EOL><INDENT>self._event_sink.write_batch(batch)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>output_queue = queue.Queue()<EOL>sentinel = object()<EOL>def producer():<EOL><INDENT>try:<EOL><INDENT>for batch in batch_generator():<EOL><INDENT>output_queue.put(batch)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>output_queue.put(sentinel)<EOL><DEDENT><DEDENT>logger.info('<STR_LIT>', num_threads)<EOL>for i in xrange(num_threads):<EOL><INDENT>thread = threading.Thread(target=producer, name='<STR_LIT>' % i)<EOL>thread.daemon = True<EOL>thread.start()<EOL><DEDENT>num_live_threads = num_threads<EOL>while num_live_threads > <NUM_LIT:0>:<EOL><INDENT>output = output_queue.get()<EOL>if output == sentinel:<EOL><INDENT>num_live_threads -= <NUM_LIT:1><EOL>continue<EOL><DEDENT>self._event_sink.write_batch(output)<EOL><DEDENT><DEDENT>for loader in loader_delete_queue:<EOL><INDENT>logger.warn('<STR_LIT>', loader.subdir)<EOL>del self._run_loaders[loader.subdir]<EOL><DEDENT>logger.info('<STR_LIT>')<EOL>
Load events from every detected run.
f8101:c0:m3
def __init__(self, subdir, experiment_name, run_name):
self._subdir = subdir<EOL>self._experiment_name = experiment_name<EOL>self._run_name = run_name<EOL>self._directory_watcher = directory_watcher.DirectoryWatcher(<EOL>subdir,<EOL>event_file_loader.RawEventFileLoader,<EOL>io_wrapper.IsTensorFlowEventsFile)<EOL>
Constructs a `_RunLoader`. Args: subdir: string, filesystem path of the run directory experiment_name: string, name of the run's experiment run_name: string, name of the run
f8101:c1:m0
def load_batches(self):
event_iterator = self._directory_watcher.Load()<EOL>while True:<EOL><INDENT>events = []<EOL>event_bytes = <NUM_LIT:0><EOL>start = time.time()<EOL>for event_proto in event_iterator:<EOL><INDENT>events.append(event_proto)<EOL>event_bytes += len(event_proto)<EOL>if len(events) >= self._BATCH_COUNT or event_bytes >= self._BATCH_BYTES:<EOL><INDENT>break<EOL><DEDENT><DEDENT>elapsed = time.time() - start<EOL>logger.debug('<STR_LIT>',<EOL>elapsed, self._subdir)<EOL>if not events:<EOL><INDENT>return<EOL><DEDENT>yield _EventBatch(<EOL>events=events,<EOL>experiment_name=self._experiment_name,<EOL>run_name=self._run_name)<EOL><DEDENT>
Returns a batched event iterator over the run directory event files.
f8101:c1:m2
@abc.abstractmethod<EOL><INDENT>def write_batch(self, event_batch):<DEDENT>
raise NotImplementedError()<EOL>
Writes the given event batch to the sink. Args: event_batch: an _EventBatch of event data.
f8101:c2:m0
def __init__(self, db_path):
self._db_path = db_path<EOL>self._writer_fn_cache = {}<EOL>
Constructs an ImportOpEventSink. Args: db_path: string, filesystem path of the DB file to open
f8101:c3:m0
def __init__(self, db_connection_provider):
self._writer = sqlite_writer.SqliteWriter(db_connection_provider)<EOL>
Constructs a SqliteWriterEventSink. Args: db_connection_provider: Provider function for creating a DB connection.
f8101:c4:m0
def _process_event(self, event, tagged_data):
event_type = event.WhichOneof('<STR_LIT>')<EOL>if event_type == '<STR_LIT>':<EOL><INDENT>for value in event.summary.value:<EOL><INDENT>value = data_compat.migrate_value(value)<EOL>tag, metadata, values = tagged_data.get(value.tag, (None, None, []))<EOL>values.append((event.step, event.wall_time, value.tensor))<EOL>if tag is None:<EOL><INDENT>tagged_data[value.tag] = sqlite_writer.TagData(<EOL>value.tag, value.metadata, values)<EOL><DEDENT><DEDENT><DEDENT>elif event_type == '<STR_LIT>':<EOL><INDENT>pass <EOL><DEDENT>elif event_type == '<STR_LIT>':<EOL><INDENT>if event.session_log.status == event_pb2.SessionLog.START:<EOL><INDENT>pass <EOL><DEDENT><DEDENT>elif event_type in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>pass <EOL><DEDENT>elif event_type == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>
Processes a single tf.Event and records it in tagged_data.
f8101:c4:m2
def AddScalarTensor(self, tag, wall_time=<NUM_LIT:0>, step=<NUM_LIT:0>, value=<NUM_LIT:0>):
tensor = tensor_util.make_tensor_proto(float(value))<EOL>event = event_pb2.Event(<EOL>wall_time=wall_time,<EOL>step=step,<EOL>summary=summary_pb2.Summary(<EOL>value=[summary_pb2.Summary.Value(tag=tag, tensor=tensor)]))<EOL>self.AddEvent(event)<EOL>
Add a rank-0 tensor event. Note: This is not related to the scalar plugin; it's just a convenience function to add an event whose contents aren't important.
f8102:c0:m2
def add_event(self, event):
self.AddEvent(event)<EOL>
Match the EventWriter API.
f8102:c0:m4
def get_logdir(self):
return self._testcase.get_temp_dir()<EOL>
Return a temp directory for asset writing.
f8102:c0:m5
def assertTagsEqual(self, actual, expected):
empty_tags = {<EOL>ea.GRAPH: False,<EOL>ea.META_GRAPH: False,<EOL>ea.RUN_METADATA: [],<EOL>ea.TENSORS: [],<EOL>}<EOL>self.assertItemsEqual(actual.keys(), empty_tags.keys())<EOL>for key in actual:<EOL><INDENT>expected_value = expected.get(key, empty_tags[key])<EOL>if isinstance(expected_value, list):<EOL><INDENT>self.assertItemsEqual(actual[key], expected_value)<EOL><DEDENT>else:<EOL><INDENT>self.assertEqual(actual[key], expected_value)<EOL><DEDENT><DEDENT>
Utility method for checking the return value of the Tags() call. It fills out the `expected` arg with the default (empty) values for every tag type, so that the author needs only specify the non-empty values they are interested in testing. Args: actual: The actual Accumulator tags response. expected: The expected tags response (empty fields may be omitted)
f8102:c1:m0
def _writeMetadata(self, logdir, summary_metadata, nonce='<STR_LIT>'):
summary = summary_pb2.Summary()<EOL>summary.value.add(<EOL>tensor=tensor_util.make_tensor_proto(['<STR_LIT>', '<STR_LIT>', '<STR_LIT:to>'], dtype=tf.string),<EOL>tag='<STR_LIT>',<EOL>metadata=summary_metadata)<EOL>writer = test_util.FileWriter(logdir, filename_suffix=nonce)<EOL>writer.add_summary(summary.SerializeToString())<EOL>writer.close()<EOL>
Write to disk a summary with the given metadata. Arguments: logdir: a string summary_metadata: a `SummaryMetadata` protobuf object nonce: optional; will be added to the end of the event file name to guarantee that multiple calls to this function do not stomp the same file
f8102:c3:m2
def tensor_size_guidance_from_flags(flags):
tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)<EOL>if not flags or not flags.samples_per_plugin:<EOL><INDENT>return tensor_size_guidance<EOL><DEDENT>for token in flags.samples_per_plugin.split('<STR_LIT:U+002C>'):<EOL><INDENT>k, v = token.strip().split('<STR_LIT:=>')<EOL>tensor_size_guidance[k] = int(v)<EOL><DEDENT>return tensor_size_guidance<EOL>
Apply user per-summary size guidance overrides.
f8104:m0
def standard_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider):
multiplexer = event_multiplexer.EventMultiplexer(<EOL>size_guidance=DEFAULT_SIZE_GUIDANCE,<EOL>tensor_size_guidance=tensor_size_guidance_from_flags(flags),<EOL>purge_orphaned_data=flags.purge_orphaned_data,<EOL>max_reload_threads=flags.max_reload_threads)<EOL>loading_multiplexer = multiplexer<EOL>reload_interval = flags.reload_interval<EOL>reload_task = flags.reload_task<EOL>if reload_task == '<STR_LIT>' and flags.db_import and flags.db_import_use_op:<EOL><INDENT>reload_task == '<STR_LIT>'<EOL><DEDENT>db_uri = flags.db<EOL>if flags.db_import and not flags.db:<EOL><INDENT>tmpdir = tempfile.mkdtemp(prefix='<STR_LIT>')<EOL>atexit.register(shutil.rmtree, tmpdir)<EOL>db_uri = '<STR_LIT>' % tmpdir<EOL><DEDENT>db_module, db_connection_provider = get_database_info(db_uri)<EOL>if flags.db_import:<EOL><INDENT>if db_module != sqlite3:<EOL><INDENT>raise base_plugin.FlagsError('<STR_LIT>')<EOL><DEDENT>logger.info('<STR_LIT>', db_uri)<EOL>loading_multiplexer = db_import_multiplexer.DbImportMultiplexer(<EOL>db_connection_provider=db_connection_provider,<EOL>purge_orphaned_data=flags.purge_orphaned_data,<EOL>max_reload_threads=flags.max_reload_threads,<EOL>use_import_op=flags.db_import_use_op)<EOL><DEDENT>elif flags.db:<EOL><INDENT>reload_interval = -<NUM_LIT:1><EOL><DEDENT>plugin_name_to_instance = {}<EOL>context = base_plugin.TBContext(<EOL>db_module=db_module,<EOL>db_connection_provider=db_connection_provider,<EOL>db_uri=db_uri,<EOL>flags=flags,<EOL>logdir=flags.logdir,<EOL>multiplexer=multiplexer,<EOL>assets_zip_provider=assets_zip_provider,<EOL>plugin_name_to_instance=plugin_name_to_instance,<EOL>window_title=flags.window_title)<EOL>plugins = []<EOL>for loader in plugin_loaders:<EOL><INDENT>plugin = loader.load(context)<EOL>if plugin is None:<EOL><INDENT>continue<EOL><DEDENT>plugins.append(plugin)<EOL>plugin_name_to_instance[plugin.plugin_name] = plugin<EOL><DEDENT>return TensorBoardWSGIApp(flags.logdir, plugins, loading_multiplexer,<EOL>reload_interval, flags.path_prefix,<EOL>reload_task)<EOL>
Construct a TensorBoardWSGIApp with standard plugins and multiplexer. Args: flags: An argparse.Namespace containing TensorBoard CLI flags. plugin_loaders: A list of TBLoader instances. assets_zip_provider: See TBContext documentation for more information. Returns: The new TensorBoard WSGI application. :type plugin_loaders: list[base_plugin.TBLoader] :rtype: TensorBoardWSGI
f8104:m1
def TensorBoardWSGIApp(logdir, plugins, multiplexer, reload_interval,<EOL>path_prefix='<STR_LIT>', reload_task='<STR_LIT>'):
path_to_run = parse_event_files_spec(logdir)<EOL>if reload_interval >= <NUM_LIT:0>:<EOL><INDENT>start_reloading_multiplexer(multiplexer, path_to_run, reload_interval,<EOL>reload_task)<EOL><DEDENT>return TensorBoardWSGI(plugins, path_prefix)<EOL>
Constructs the TensorBoard application. Args: logdir: the logdir spec that describes where data will be loaded. may be a directory, or comma,separated list of directories, or colons can be used to provide named directories plugins: A list of base_plugin.TBPlugin subclass instances. multiplexer: The EventMultiplexer with TensorBoard data to serve reload_interval: How often (in seconds) to reload the Multiplexer. Zero means reload just once at startup; negative means never load. path_prefix: A prefix of the path when app isn't served from root. reload_task: Indicates the type of background task to reload with. Returns: A WSGI application that implements the TensorBoard backend. Raises: ValueError: If something is wrong with the plugin configuration. :type plugins: list[base_plugin.TBPlugin] :rtype: TensorBoardWSGI
f8104:m2
def parse_event_files_spec(logdir):
files = {}<EOL>if logdir is None:<EOL><INDENT>return files<EOL><DEDENT>uri_pattern = re.compile('<STR_LIT>')<EOL>for specification in logdir.split('<STR_LIT:U+002C>'):<EOL><INDENT>if (uri_pattern.match(specification) is None and '<STR_LIT::>' in specification and<EOL>specification[<NUM_LIT:0>] != '<STR_LIT:/>' and not os.path.splitdrive(specification)[<NUM_LIT:0>]):<EOL><INDENT>run_name, _, path = specification.partition('<STR_LIT::>')<EOL><DEDENT>else:<EOL><INDENT>run_name = None<EOL>path = specification<EOL><DEDENT>if uri_pattern.match(path) is None:<EOL><INDENT>path = os.path.realpath(os.path.expanduser(path))<EOL><DEDENT>files[path] = run_name<EOL><DEDENT>return files<EOL>
Parses `logdir` into a map from paths to run group names. The events files flag format is a comma-separated list of path specifications. A path specification either looks like 'group_name:/path/to/directory' or '/path/to/directory'; in the latter case, the group is unnamed. Group names cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec with no name and path '/foo:bar/baz'. Globs are not supported. Args: logdir: A comma-separated list of run specifications. Returns: A dict mapping directory paths to names like {'/path/to/directory': 'name'}. Groups without an explicit name are named after their path. If logdir is None, returns an empty dict, which is helpful for testing things that don't require any valid runs.
f8104:m3
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval,<EOL>reload_task):
if load_interval < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>' % load_interval)<EOL><DEDENT>def _reload():<EOL><INDENT>while True:<EOL><INDENT>start = time.time()<EOL>logger.info('<STR_LIT>')<EOL>for path, name in six.iteritems(path_to_run):<EOL><INDENT>multiplexer.AddRunsFromDirectory(path, name)<EOL><DEDENT>logger.info('<STR_LIT>')<EOL>multiplexer.Reload()<EOL>duration = time.time() - start<EOL>logger.info('<STR_LIT>', duration)<EOL>if load_interval == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>time.sleep(load_interval)<EOL><DEDENT><DEDENT>if reload_task == '<STR_LIT>':<EOL><INDENT>logger.info('<STR_LIT>')<EOL>import multiprocessing<EOL>process = multiprocessing.Process(target=_reload, name='<STR_LIT>')<EOL>process.daemon = True<EOL>process.start()<EOL><DEDENT>elif reload_task in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>logger.info('<STR_LIT>')<EOL>thread = threading.Thread(target=_reload, name='<STR_LIT>')<EOL>thread.daemon = True<EOL>thread.start()<EOL><DEDENT>elif reload_task == '<STR_LIT>':<EOL><INDENT>if load_interval != <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>_reload()<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % reload_task)<EOL><DEDENT>
Starts automatically reloading the given multiplexer. If `load_interval` is positive, the thread will reload the multiplexer by calling `ReloadMultiplexer` every `load_interval` seconds, starting immediately. Otherwise, reloads the multiplexer once and never again. Args: multiplexer: The `EventMultiplexer` to add runs to and reload. path_to_run: A dict mapping from paths to run names, where `None` as the run name is interpreted as a run name equal to the path. load_interval: An integer greater than or equal to 0. If positive, how many seconds to wait after one load before starting the next load. Otherwise, reloads the multiplexer once and never again (no continuous reloading). reload_task: Indicates the type of background task to reload with. Raises: ValueError: If `load_interval` is negative.
f8104:m4
def get_database_info(db_uri):
if not db_uri:<EOL><INDENT>return None, None<EOL><DEDENT>scheme = urlparse.urlparse(db_uri).scheme<EOL>if scheme == '<STR_LIT>':<EOL><INDENT>return sqlite3, create_sqlite_connection_provider(db_uri)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' + db_uri)<EOL><DEDENT>
Returns TBContext fields relating to SQL database. Args: db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db". Returns: A tuple with the db_module and db_connection_provider TBContext fields. If db_uri was empty, then (None, None) is returned. Raises: ValueError: If db_uri scheme is not supported.
f8104:m5
def create_sqlite_connection_provider(db_uri):
uri = urlparse.urlparse(db_uri)<EOL>if uri.scheme != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>' + db_uri)<EOL><DEDENT>if uri.netloc:<EOL><INDENT>raise ValueError('<STR_LIT>' + db_uri)<EOL><DEDENT>if uri.path == '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>' + db_uri)<EOL><DEDENT>path = os.path.expanduser(uri.path)<EOL>params = _get_connect_params(uri.query)<EOL>return lambda: sqlite3.connect(path, **params)<EOL>
Returns function that returns SQLite Connection objects. Args: db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db". Returns: A function that returns a new PEP-249 DB Connection, which must be closed, each time it is called. Raises: ValueError: If db_uri is not a valid sqlite file URI.
f8104:m6
def _clean_path(path, path_prefix="<STR_LIT>"):
if path != path_prefix + '<STR_LIT:/>' and path.endswith('<STR_LIT:/>'):<EOL><INDENT>return path[:-<NUM_LIT:1>]<EOL><DEDENT>return path<EOL>
Cleans the path of the request. Removes the ending '/' if the request begins with the path prefix and pings a non-empty route. Arguments: path: The path of a request. path_prefix: The prefix string that every route of this TensorBoard instance starts with. Returns: The route to use to serve the request (with the path prefix stripped if applicable).
f8104:m8
def __init__(self, plugins, path_prefix='<STR_LIT>'):
self._plugins = plugins<EOL>if path_prefix.endswith('<STR_LIT:/>'):<EOL><INDENT>self._path_prefix = path_prefix[:-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>self._path_prefix = path_prefix<EOL><DEDENT>self.data_applications = {<EOL>self._path_prefix + DATA_PREFIX + PLUGINS_LISTING_ROUTE:<EOL>self._serve_plugins_listing,<EOL>}<EOL>plugin_names_encountered = set()<EOL>for plugin in self._plugins:<EOL><INDENT>if plugin.plugin_name is None:<EOL><INDENT>raise ValueError('<STR_LIT>' % plugin)<EOL><DEDENT>if not _VALID_PLUGIN_RE.match(plugin.plugin_name):<EOL><INDENT>raise ValueError('<STR_LIT>' % (plugin,<EOL>plugin.plugin_name))<EOL><DEDENT>if plugin.plugin_name in plugin_names_encountered:<EOL><INDENT>raise ValueError('<STR_LIT>' % plugin.plugin_name)<EOL><DEDENT>plugin_names_encountered.add(plugin.plugin_name)<EOL>try:<EOL><INDENT>plugin_apps = plugin.get_plugin_apps()<EOL><DEDENT>except Exception as e: <EOL><INDENT>if type(plugin) is core_plugin.CorePlugin: <EOL><INDENT>raise<EOL><DEDENT>logger.warn('<STR_LIT>',<EOL>plugin.plugin_name, str(e))<EOL>continue<EOL><DEDENT>for route, app in plugin_apps.items():<EOL><INDENT>if not route.startswith('<STR_LIT:/>'):<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(plugin.plugin_name, route))<EOL><DEDENT>if type(plugin) is core_plugin.CorePlugin: <EOL><INDENT>path = self._path_prefix + route<EOL><DEDENT>else:<EOL><INDENT>path = (self._path_prefix + DATA_PREFIX + PLUGIN_PREFIX + '<STR_LIT:/>' +<EOL>plugin.plugin_name + route)<EOL><DEDENT>self.data_applications[path] = app<EOL><DEDENT><DEDENT>
Constructs TensorBoardWSGI instance. Args: plugins: A list of base_plugin.TBPlugin subclass instances. flags: An argparse.Namespace containing TensorBoard CLI flags. Returns: A WSGI application for the set of all TBPlugin instances. Raises: ValueError: If some plugin has no plugin_name ValueError: If some plugin has an invalid plugin_name (plugin names must only contain [A-Za-z0-9_.-]) ValueError: If two plugins have the same plugin_name ValueError: If some plugin handles a route that does not start with a slash :type plugins: list[base_plugin.TBPlugin]
f8104:c0:m0
@wrappers.Request.application<EOL><INDENT>def _serve_plugins_listing(self, request):<DEDENT>
response = {}<EOL>for plugin in self._plugins:<EOL><INDENT>start = time.time()<EOL>response[plugin.plugin_name] = plugin.is_active()<EOL>elapsed = time.time() - start<EOL>logger.info(<EOL>'<STR_LIT>',<EOL>plugin.plugin_name, elapsed)<EOL><DEDENT>return http_util.Respond(request, response, '<STR_LIT:application/json>')<EOL>
Serves an object mapping plugin name to whether it is enabled. Args: request: The werkzeug.Request object. Returns: A werkzeug.Response object.
f8104:c0:m1
def __call__(self, environ, start_response):
request = wrappers.Request(environ)<EOL>parsed_url = urlparse.urlparse(request.path)<EOL>clean_path = _clean_path(parsed_url.path, self._path_prefix)<EOL>if clean_path in self.data_applications:<EOL><INDENT>return self.data_applications[clean_path](environ, start_response)<EOL><DEDENT>else:<EOL><INDENT>logger.warn('<STR_LIT>', clean_path)<EOL>return http_util.Respond(request, '<STR_LIT>', '<STR_LIT>', code=<NUM_LIT>)(<EOL>environ, start_response)<EOL><DEDENT>
Central entry point for the TensorBoard application. This method handles routing to sub-applications. It does simple routing using regular expression matching. This __call__ method conforms to the WSGI spec, so that instances of this class are WSGI applications. Args: environ: See WSGI spec. start_response: See WSGI spec. Returns: A werkzeug Response.
f8104:c0:m2
def __init__(self,<EOL>context,<EOL>plugin_name,<EOL>is_active_value,<EOL>routes_mapping,<EOL>construction_callback=None):
self.plugin_name = plugin_name<EOL>self._is_active_value = is_active_value<EOL>self._routes_mapping = routes_mapping<EOL>if construction_callback:<EOL><INDENT>construction_callback(context)<EOL><DEDENT>
Constructs a fake plugin. Args: context: The TBContext magic container. Contains properties that are potentially useful to this plugin. plugin_name: The name of this plugin. is_active_value: Whether the plugin is active. routes_mapping: A dictionary mapping from route (string URL path) to the method called when a user issues a request to that route. construction_callback: An optional callback called when the plugin is constructed. The callback is passed the TBContext.
f8105:c1:m0
def get_plugin_apps(self):
return self._routes_mapping<EOL>
Returns a mapping from routes to handlers offered by this plugin. Returns: A dictionary mapping from routes to handlers offered by this plugin.
f8105:c1:m1
def is_active(self):
return self._is_active_value<EOL>
Returns whether this plugin is active. Returns: A boolean. Whether this plugin is active.
f8105:c1:m2
def assertPlatformSpecificLogdirParsing(self, pathObj, logdir, expected):
with mock.patch('<STR_LIT>', pathObj):<EOL><INDENT>self.assertEqual(application.parse_event_files_spec(logdir), expected)<EOL><DEDENT>
A custom assertion to test :func:`parse_event_files_spec` under various systems. Args: pathObj: a custom replacement object for `os.path`, typically `posixpath` or `ntpath` logdir: the string to be parsed by :func:`~application.TensorBoardWSGIApp.parse_event_files_spec` expected: the expected dictionary as returned by :func:`~application.TensorBoardWSGIApp.parse_event_files_spec`
f8105:c6:m0
def _construction_callback(self, context):
self.context = context<EOL>
Called when a plugin is constructed.
f8105:c7:m3
def Respond(request,<EOL>content,<EOL>content_type,<EOL>code=<NUM_LIT:200>,<EOL>expires=<NUM_LIT:0>,<EOL>content_encoding=None,<EOL>encoding='<STR_LIT:utf-8>'):
mimetype = _EXTRACT_MIMETYPE_PATTERN.search(content_type).group(<NUM_LIT:0>)<EOL>charset_match = _EXTRACT_CHARSET_PATTERN.search(content_type)<EOL>charset = charset_match.group(<NUM_LIT:1>) if charset_match else encoding<EOL>textual = charset_match or mimetype in _TEXTUAL_MIMETYPES<EOL>if (mimetype in _JSON_MIMETYPES and<EOL>isinstance(content, (dict, list, set, tuple))):<EOL><INDENT>content = json.dumps(json_util.Cleanse(content, encoding),<EOL>ensure_ascii=not charset_match)<EOL><DEDENT>if charset != encoding:<EOL><INDENT>content = tf.compat.as_text(content, encoding)<EOL><DEDENT>content = tf.compat.as_bytes(content, charset)<EOL>if textual and not charset_match and mimetype not in _JSON_MIMETYPES:<EOL><INDENT>content_type += '<STR_LIT>' + charset<EOL><DEDENT>gzip_accepted = _ALLOWS_GZIP_PATTERN.search(<EOL>request.headers.get('<STR_LIT>', '<STR_LIT>'))<EOL>if textual and not content_encoding and gzip_accepted:<EOL><INDENT>out = six.BytesIO()<EOL>with gzip.GzipFile(fileobj=out, mode='<STR_LIT:wb>', compresslevel=<NUM_LIT:3>, mtime=<NUM_LIT:0>) as f:<EOL><INDENT>f.write(content)<EOL><DEDENT>content = out.getvalue()<EOL>content_encoding = '<STR_LIT>'<EOL><DEDENT>content_length = len(content)<EOL>direct_passthrough = False<EOL>if content_encoding == '<STR_LIT>' and not gzip_accepted:<EOL><INDENT>gzip_file = gzip.GzipFile(fileobj=six.BytesIO(content), mode='<STR_LIT:rb>')<EOL>content_length = struct.unpack('<STR_LIT>', content[-<NUM_LIT:4>:])[<NUM_LIT:0>]<EOL>content = werkzeug.wsgi.wrap_file(request.environ, gzip_file)<EOL>content_encoding = None<EOL>direct_passthrough = True<EOL><DEDENT>headers = []<EOL>headers.append(('<STR_LIT>', str(content_length)))<EOL>if content_encoding:<EOL><INDENT>headers.append(('<STR_LIT>', content_encoding))<EOL><DEDENT>if expires > <NUM_LIT:0>:<EOL><INDENT>e = wsgiref.handlers.format_date_time(time.time() + float(expires))<EOL>headers.append(('<STR_LIT>', e))<EOL>headers.append(('<STR_LIT>', '<STR_LIT>' % expires))<EOL><DEDENT>else:<EOL><INDENT>headers.append(('<STR_LIT>', '<STR_LIT:0>'))<EOL>headers.append(('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>if request.method == '<STR_LIT>':<EOL><INDENT>content = None<EOL><DEDENT>return werkzeug.wrappers.Response(<EOL>response=content, status=code, headers=headers, content_type=content_type,<EOL>direct_passthrough=direct_passthrough)<EOL>
Construct a werkzeug Response. Responses are transmitted to the browser with compression if: a) the browser supports it; b) it's sane to compress the content_type in question; and c) the content isn't already compressed, as indicated by the content_encoding parameter. Browser and proxy caching is completely disabled by default. If the expires parameter is greater than zero then the response will be able to be cached by the browser for that many seconds; however, proxies are still forbidden from caching so that developers can bypass the cache with Ctrl+Shift+R. For textual content that isn't JSON, the encoding parameter is used as the transmission charset which is automatically appended to the Content-Type header. That is unless of course the content_type parameter contains a charset parameter. If the two disagree, the characters in content will be transcoded to the latter. If content_type declares a JSON media type, then content MAY be a dict, list, tuple, or set, in which case this function has an implicit composition with json_util.Cleanse and json.dumps. The encoding parameter is used to decode byte strings within the JSON object; therefore transmitting binary data within JSON is not permitted. JSON is transmitted as ASCII unless the content_type parameter explicitly defines a charset parameter, in which case the serialized JSON bytes will use that instead of escape sequences. Args: request: A werkzeug Request object. Used mostly to check the Accept-Encoding header. content: Payload data as byte string, unicode string, or maybe JSON. content_type: Media type and optionally an output charset. code: Numeric HTTP status code to use. expires: Second duration for browser caching. content_encoding: Encoding if content is already encoded, e.g. 'gzip'. encoding: Input charset if content parameter has byte strings. Returns: A werkzeug Response object (a WSGI application).
f8106:m0
def prepare_graph_for_ui(graph, limit_attr_size=<NUM_LIT>,<EOL>large_attrs_key='<STR_LIT>'):
<EOL>if limit_attr_size is not None:<EOL><INDENT>if large_attrs_key is None:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if limit_attr_size <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>' %<EOL>limit_attr_size)<EOL><DEDENT><DEDENT>if limit_attr_size is not None:<EOL><INDENT>for node in graph.node:<EOL><INDENT>keys = list(node.attr.keys())<EOL>for key in keys:<EOL><INDENT>size = node.attr[key].ByteSize()<EOL>if size > limit_attr_size or size < <NUM_LIT:0>:<EOL><INDENT>del node.attr[key]<EOL>node.attr[large_attrs_key].list.s.append(tf.compat.as_bytes(key))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
Prepares (modifies in-place) the graph to be served to the front-end. For now, it supports filtering out attributes that are too large to be shown in the graph UI. Args: graph: The GraphDef proto message. limit_attr_size: Maximum allowed size in bytes, before the attribute is considered large. Default is 1024 (1KB). Must be > 0 or None. If None, there will be no filtering. large_attrs_key: The attribute key that will be used for storing attributes that are too large. Default is '_too_large_attrs'. Must be != None if `limit_attr_size` is != None. Raises: ValueError: If `large_attrs_key is None` while `limit_attr_size != None`. ValueError: If `limit_attr_size` is defined, but <= 0.
f8107:m0
def lazy_load(name):
def wrapper(load_fn):<EOL><INDENT>@_memoize<EOL>def load_once(self):<EOL><INDENT>if load_once.loading:<EOL><INDENT>raise ImportError("<STR_LIT>" % name)<EOL><DEDENT>load_once.loading = True<EOL>try:<EOL><INDENT>module = load_fn()<EOL><DEDENT>finally:<EOL><INDENT>load_once.loading = False<EOL><DEDENT>self.__dict__.update(module.__dict__)<EOL>load_once.loaded = True<EOL>return module<EOL><DEDENT>load_once.loading = False<EOL>load_once.loaded = False<EOL>class LazyModule(types.ModuleType):<EOL><INDENT>def __getattr__(self, attr_name):<EOL><INDENT>return getattr(load_once(self), attr_name)<EOL><DEDENT>def __dir__(self):<EOL><INDENT>return dir(load_once(self))<EOL><DEDENT>def __repr__(self):<EOL><INDENT>if load_once.loaded:<EOL><INDENT>return '<STR_LIT>' % load_once(self)<EOL><DEDENT>return '<STR_LIT>' % self.__name__<EOL><DEDENT><DEDENT>return LazyModule(name)<EOL><DEDENT>return wrapper<EOL>
Decorator to define a function that lazily loads the module 'name'. This can be used to defer importing troublesome dependencies - e.g. ones that are large and infrequently used, or that cause a dependency cycle - until they are actually used. Args: name: the fully-qualified name of the module; typically the last segment of 'name' matches the name of the decorated function Returns: Decorator function that produces a lazy-loading module 'name' backed by the underlying decorated function.
f8108:m0
def _memoize(f):
nothing = object() <EOL>cache = {}<EOL>lock = threading.RLock()<EOL>@functools.wraps(f)<EOL>def wrapper(arg):<EOL><INDENT>if cache.get(arg, nothing) is nothing:<EOL><INDENT>with lock:<EOL><INDENT>if cache.get(arg, nothing) is nothing:<EOL><INDENT>cache[arg] = f(arg)<EOL><DEDENT><DEDENT><DEDENT>return cache[arg]<EOL><DEDENT>return wrapper<EOL>
Memoizing decorator for f, which must have exactly 1 hashable argument.
f8108:m1
def reexport_tf_summary():
import sys <EOL>packages = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>]<EOL>if not getattr(tf, '<STR_LIT>', '<STR_LIT>').startswith('<STR_LIT>'): <EOL><INDENT>packages.remove('<STR_LIT>')<EOL><DEDENT>def dynamic_wildcard_import(module):<EOL><INDENT>"""<STR_LIT>"""<EOL>symbols = getattr(module, '<STR_LIT>', None)<EOL>if symbols is None:<EOL><INDENT>symbols = [k for k in module.__dict__.keys() if not k.startswith('<STR_LIT:_>')]<EOL><DEDENT>globals().update({symbol: getattr(module, symbol) for symbol in symbols})<EOL><DEDENT>notfound = object() <EOL>for package_name in packages:<EOL><INDENT>package = sys.modules.get(package_name, notfound)<EOL>if package is notfound:<EOL><INDENT>continue<EOL><DEDENT>module = getattr(package, '<STR_LIT>', None)<EOL>if module is None:<EOL><INDENT>continue<EOL><DEDENT>dynamic_wildcard_import(module)<EOL>return<EOL><DEDENT>
Re-export all symbols from the original tf.summary. This function finds the original tf.summary V2 API and re-exports all the symbols from it within this module as well, so that when this module is patched into the TF API namespace as the new tf.summary, the effect is an overlay that just adds TensorBoard-provided symbols to the module. Finding the original tf.summary V2 API module reliably is a challenge, since this code runs *during* the overall TF API import process and depending on the order of imports (which is subject to change), different parts of the API may or may not be defined at the point in time we attempt to access them. This code also may be inserted into two places in the API (tf and tf.compat.v2) and may be re-executed multiple times even for the same place in the API (due to the TF module import system not populating sys.modules properly), so it needs to be robust to many different scenarios. The one constraint we can count on is that everywhere this module is loaded (via the component_api_helper mechanism in TF), it's going to be the 'summary' submodule of a larger API package that already has a 'summary' attribute that contains the TF-only summary API symbols we need to re-export. This may either be the original TF-only summary module (the first time we load this module) or a pre-existing copy of this module (if we're re-loading this module again). We don't actually need to differentiate those two cases, because it's okay if we re-import our own TensorBoard-provided symbols; they will just be overwritten later on in this file. So given that guarantee, the approach we take is to first attempt to locate a TF V2 API package that already has a 'summary' attribute (most likely this is the parent package into which we're being imported, but not necessarily), and then do the dynamic version of "from tf_api_package.summary import *". Lastly, this logic is encapsulated in a function to avoid symbol leakage.
f8109:m0
def __init__(self, logdir, max_queue_size=<NUM_LIT:10>, flush_secs=<NUM_LIT>, filename_suffix='<STR_LIT>'):
self._logdir = logdir<EOL>if not os.path.exists(logdir):<EOL><INDENT>os.makedirs(logdir)<EOL><DEDENT>self._file_name = os.path.join(logdir, "<STR_LIT>" %<EOL>(time.time(), socket.gethostname(), os.getpid(), _global_uid.get())) + filename_suffix <EOL>self._general_file_writer = open(self._file_name, '<STR_LIT:wb>')<EOL>self._async_writer = _AsyncWriter(RecordWriter(self._general_file_writer), max_queue_size, flush_secs)<EOL>_event = event_pb2.Event(wall_time=time.time(), file_version='<STR_LIT>')<EOL>self.add_event(_event)<EOL>self.flush()<EOL>
Creates a `EventFileWriter` and an event file to write to. On construction the summary writer creates a new event file in `logdir`. This event file will contain `Event` protocol buffers, which are written to disk via the add_event method. The other arguments to the constructor control the asynchronous writes to the event file: Args: logdir: A string. Directory where event file will be written. max_queue_size: Integer. Size of the queue for pending events and summaries. flush_secs: Number. How often, in seconds, to flush the pending events and summaries to disk.
f8110:c1:m0
def get_logdir(self):
return self._logdir<EOL>
Returns the directory where event file will be written.
f8110:c1:m1
def add_event(self, event):
if not isinstance(event, event_pb2.Event):<EOL><INDENT>raise TypeError("<STR_LIT>"<EOL>"<STR_LIT>" % type(event))<EOL><DEDENT>self._async_writer.write(event.SerializeToString())<EOL>
Adds an event to the event file. Args: event: An `Event` protocol buffer.
f8110:c1:m2
def flush(self):
self._async_writer.flush()<EOL>
Flushes the event file to disk. Call this method to make sure that all pending events have been written to disk.
f8110:c1:m3
def close(self):
self._async_writer.close()<EOL>
Performs a final flush of the event file to disk, stops the write/flush worker and closes the file. Call this method when you do not need the summary writer anymore.
f8110:c1:m4
def __init__(self, record_writer, max_queue_size=<NUM_LIT:20>, flush_secs=<NUM_LIT>):
self._writer = record_writer<EOL>self._closed = False<EOL>self._byte_queue = six.moves.queue.Queue(max_queue_size)<EOL>self._worker = _AsyncWriterThread(self._byte_queue, self._writer, flush_secs)<EOL>self._lock = threading.Lock()<EOL>self._worker.start()<EOL>
Writes bytes to a file asynchronously. An instance of this class holds a queue to keep the incoming data temporarily. Data passed to the `write` function will be put to the queue and the function returns immediately. This class also maintains a thread to write data in the queue to disk. The first initialization parameter is an instance of `tensorboard.summary.record_writer` which computes the CRC checksum and then write the combined result to the disk. So we use an async approach to improve performance. Args: record_writer: A RecordWriter instance max_queue_size: Integer. Size of the queue for pending bytestrings. flush_secs: Number. How often, in seconds, to flush the pending bytestrings to disk.
f8110:c2:m0
def flush(self):
with self._lock:<EOL><INDENT>if self._closed:<EOL><INDENT>raise IOError('<STR_LIT>')<EOL><DEDENT>self._byte_queue.join()<EOL>self._writer.flush()<EOL><DEDENT>
Write all the enqueued bytestring before this flush call to disk. Block until all the above bytestring are written.
f8110:c2:m2
def close(self):
if not self._closed:<EOL><INDENT>with self._lock:<EOL><INDENT>if not self._closed:<EOL><INDENT>self._closed = True<EOL>self._worker.stop()<EOL>self._writer.flush()<EOL>self._writer.close()<EOL><DEDENT><DEDENT><DEDENT>
Closes the underlying writer, flushing any pending writes first.
f8110:c2:m3
def __init__(self, queue, record_writer, flush_secs):
threading.Thread.__init__(self)<EOL>self.daemon = True<EOL>self._queue = queue<EOL>self._record_writer = record_writer<EOL>self._flush_secs = flush_secs<EOL>self._next_flush_time = <NUM_LIT:0><EOL>self._has_pending_data = False<EOL>self._shutdown_signal = object()<EOL>
Creates an _AsyncWriterThread. Args: queue: A Queue from which to dequeue data. record_writer: An instance of record_writer writer. flush_secs: How often, in seconds, to flush the pending file to disk.
f8110:c3:m0
def __init__(self, writer):
self._writer = writer<EOL>
Open a file to keep the tensorboard records. Args: writer: A file-like object that implements `write`, `flush` and `close`.
f8114:c0:m0
def _get_context():
<EOL>try:<EOL><INDENT>import google.colab<EOL>import IPython<EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if IPython.get_ipython() is not None:<EOL><INDENT>return _CONTEXT_COLAB<EOL><DEDENT><DEDENT>try:<EOL><INDENT>import IPython<EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>ipython = IPython.get_ipython()<EOL>if ipython is not None and ipython.has_trait("<STR_LIT>"):<EOL><INDENT>return _CONTEXT_IPYTHON<EOL><DEDENT><DEDENT>return _CONTEXT_NONE<EOL>
Determine the most specific context that we're in. Returns: _CONTEXT_COLAB: If in Colab with an IPython notebook context. _CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook context (e.g., from running `jupyter notebook` at the command line). _CONTEXT_NONE: Otherwise (e.g., by running a Python script at the command-line or using the `ipython` interactive shell).
f8120:m0
def load_ipython_extension(ipython):
raise RuntimeError(<EOL>"<STR_LIT>"<EOL>)<EOL>
Deprecated: use `%load_ext tensorboard` instead. Raises: RuntimeError: Always.
f8120:m1
def _load_ipython_extension(ipython):
_register_magics(ipython)<EOL>
Load the TensorBoard notebook extension. Intended to be called from `%load_ext tensorboard`. Do not invoke this directly. Args: ipython: An `IPython.InteractiveShell` instance.
f8120:m2