python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger that flattens nested data."""
from typing import Sequence
from acme.utils.loggers import base
class FlattenDictLogger(base.Logger):
"""Logger which flattens sub-dictionaries into the top level dict."""
def __init__(self,
logger: base.Logger,
label: str = 'Logs',
raw_keys: Sequence[str] = ()):
"""Initializer.
Args:
logger: The wrapped logger.
label: The label to add as a prefix to all keys except for raw ones.
raw_keys: The keys that should not be prefixed. The values for these keys
must always be flat. Metric visualisation tools may require certain
keys to be present in the logs (e.g. 'step', 'timestamp'), so these
keys should not be prefixed.
"""
self._logger = logger
self._label = label
self._raw_keys = raw_keys
def write(self, values: base.LoggingData):
flattened_values = {}
for key, value in values.items():
if key in self._raw_keys:
flattened_values[key] = value
continue
name = f'{self._label}/{key}'
if isinstance(value, dict):
for sub_key, sub_value in value.items():
flattened_values[f'{name}/{sub_key}'] = sub_value
else:
flattened_values[name] = value
self._logger.write(flattened_values)
def close(self):
self._logger.close()
|
acme-master
|
acme/utils/loggers/flatten.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for logging to the terminal."""
import logging
import time
from typing import Any, Callable
from acme.utils.loggers import base
import numpy as np
def _format_key(key: str) -> str:
"""Internal function for formatting keys."""
return key.replace('_', ' ').title()
def _format_value(value: Any) -> str:
"""Internal function for formatting values."""
value = base.to_numpy(value)
if isinstance(value, (float, np.number)):
return f'{value:0.3f}'
return f'{value}'
def serialize(values: base.LoggingData) -> str:
"""Converts `values` to a pretty-printed string.
This takes a dictionary `values` whose keys are strings and returns
a formatted string such that each [key, value] pair is separated by ' = ' and
each entry is separated by ' | '. The keys are sorted alphabetically to ensure
a consistent order, and snake case is split into words.
For example:
values = {'a': 1, 'b' = 2.33333333, 'c': 'hello', 'big_value': 10}
# Returns 'A = 1 | B = 2.333 | Big Value = 10 | C = hello'
values_string = serialize(values)
Args:
values: A dictionary with string keys.
Returns:
A formatted string.
"""
return ' | '.join(f'{_format_key(k)} = {_format_value(v)}'
for k, v in sorted(values.items()))
class TerminalLogger(base.Logger):
"""Logs to terminal."""
def __init__(
self,
label: str = '',
print_fn: Callable[[str], None] = logging.info,
serialize_fn: Callable[[base.LoggingData], str] = serialize,
time_delta: float = 0.0,
):
"""Initializes the logger.
Args:
label: label string to use when logging.
print_fn: function to call which acts like print.
serialize_fn: function to call which transforms values into a str.
time_delta: How often (in seconds) to write values. This can be used to
minimize terminal spam, but is 0 by default---ie everything is written.
"""
self._print_fn = print_fn
self._serialize_fn = serialize_fn
self._label = label and f'[{_format_key(label)}] '
self._time = time.time()
self._time_delta = time_delta
def write(self, values: base.LoggingData):
now = time.time()
if (now - self._time) > self._time_delta:
self._print_fn(f'{self._label}{self._serialize_fn(values)}')
self._time = now
def close(self):
pass
|
acme-master
|
acme/utils/loggers/terminal.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for logging filters."""
import time
from acme.utils.loggers import base
from acme.utils.loggers import filters
from absl.testing import absltest
# TODO(jaslanides): extract this to test_utils, or similar, for re-use.
class FakeLogger(base.Logger):
"""A fake logger for testing."""
def __init__(self):
self.data = []
def write(self, data):
self.data.append(data)
@property
def last_write(self):
return self.data[-1]
def close(self):
pass
class GatedFilterTest(absltest.TestCase):
def test_logarithmic_filter(self):
logger = FakeLogger()
filtered = filters.GatedFilter.logarithmic(logger, n=10)
for t in range(100):
filtered.write({'t': t})
rows = [row['t'] for row in logger.data]
self.assertEqual(rows, [*range(10), *range(10, 100, 10)])
def test_periodic_filter(self):
logger = FakeLogger()
filtered = filters.GatedFilter.periodic(logger, interval=10)
for t in range(100):
filtered.write({'t': t})
rows = [row['t'] for row in logger.data]
self.assertEqual(rows, list(range(0, 100, 10)))
class TimeFilterTest(absltest.TestCase):
def test_delta(self):
logger = FakeLogger()
filtered = filters.TimeFilter(logger, time_delta=0.1)
# Logged.
filtered.write({'foo': 1})
self.assertIn('foo', logger.last_write)
# *Not* logged.
filtered.write({'bar': 2})
self.assertNotIn('bar', logger.last_write)
# Wait out delta.
time.sleep(0.11)
# Logged.
filtered.write({'baz': 3})
self.assertIn('baz', logger.last_write)
self.assertLen(logger.data, 2)
class KeyFilterTest(absltest.TestCase):
def test_keep_filter(self):
logger = FakeLogger()
filtered = filters.KeyFilter(logger, keep=('foo',))
filtered.write({'foo': 'bar', 'baz': 12})
row, *_ = logger.data
self.assertIn('foo', row)
self.assertNotIn('baz', row)
def test_drop_filter(self):
logger = FakeLogger()
filtered = filters.KeyFilter(logger, drop=('foo',))
filtered.write({'foo': 'bar', 'baz': 12})
row, *_ = logger.data
self.assertIn('baz', row)
self.assertNotIn('foo', row)
def test_bad_arguments(self):
with self.assertRaises(ValueError):
filters.KeyFilter(FakeLogger())
with self.assertRaises(ValueError):
filters.KeyFilter(FakeLogger(), keep=('a',), drop=('b',))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/loggers/filters_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger for writing to an in-memory list.
This is convenient for e.g. interactive usage via Google Colab.
For example, for usage with pandas:
```python
from acme.utils import loggers
import pandas as pd
logger = InMemoryLogger()
# ...
logger.write({'foo': 1.337, 'bar': 420})
results = pd.DataFrame(logger.data)
```
"""
from typing import Sequence
from acme.utils.loggers import base
class InMemoryLogger(base.Logger):
"""A simple logger that keeps all data in memory."""
def __init__(self):
self._data = []
def write(self, data: base.LoggingData):
self._data.append(data)
def close(self):
pass
@property
def data(self) -> Sequence[base.LoggingData]:
return self._data
|
acme-master
|
acme/utils/loggers/dataframe.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Acme loggers."""
from acme.utils.loggers.aggregators import Dispatcher
from acme.utils.loggers.asynchronous import AsyncLogger
from acme.utils.loggers.auto_close import AutoCloseLogger
from acme.utils.loggers.base import Logger
from acme.utils.loggers.base import LoggerFactory
from acme.utils.loggers.base import LoggerLabel
from acme.utils.loggers.base import LoggerStepsKey
from acme.utils.loggers.base import LoggingData
from acme.utils.loggers.base import NoOpLogger
from acme.utils.loggers.base import TaskInstance
from acme.utils.loggers.base import to_numpy
from acme.utils.loggers.constant import ConstantLogger
from acme.utils.loggers.csv import CSVLogger
from acme.utils.loggers.dataframe import InMemoryLogger
from acme.utils.loggers.filters import GatedFilter
from acme.utils.loggers.filters import KeyFilter
from acme.utils.loggers.filters import NoneFilter
from acme.utils.loggers.filters import TimeFilter
from acme.utils.loggers.flatten import FlattenDictLogger
from acme.utils.loggers.default import make_default_logger # pylint: disable=g-bad-import-order
from acme.utils.loggers.terminal import TerminalLogger
from acme.utils.loggers.timestamp import TimestampLogger
# Internal imports.
|
acme-master
|
acme/utils/loggers/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default logger."""
import logging
from typing import Any, Callable, Mapping, Optional
from acme.utils.loggers import aggregators
from acme.utils.loggers import asynchronous as async_logger
from acme.utils.loggers import base
from acme.utils.loggers import csv
from acme.utils.loggers import filters
from acme.utils.loggers import terminal
def make_default_logger(
label: str,
save_data: bool = True,
time_delta: float = 1.0,
asynchronous: bool = False,
print_fn: Optional[Callable[[str], None]] = None,
serialize_fn: Optional[Callable[[Mapping[str, Any]], str]] = base.to_numpy,
steps_key: str = 'steps',
) -> base.Logger:
"""Makes a default Acme logger.
Args:
label: Name to give to the logger.
save_data: Whether to persist data.
time_delta: Time (in seconds) between logging events.
asynchronous: Whether the write function should block or not.
print_fn: How to print to terminal (defaults to print).
serialize_fn: An optional function to apply to the write inputs before
passing them to the various loggers.
steps_key: Ignored.
Returns:
A logger object that responds to logger.write(some_dict).
"""
del steps_key
if not print_fn:
print_fn = logging.info
terminal_logger = terminal.TerminalLogger(label=label, print_fn=print_fn)
loggers = [terminal_logger]
if save_data:
loggers.append(csv.CSVLogger(label=label))
# Dispatch to all writers and filter Nones and by time.
logger = aggregators.Dispatcher(loggers, serialize_fn)
logger = filters.NoneFilter(logger)
if asynchronous:
logger = async_logger.AsyncLogger(logger)
logger = filters.TimeFilter(logger, time_delta)
return logger
|
acme-master
|
acme/utils/loggers/default.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for aggregating to other loggers."""
from typing import Callable, Optional, Sequence
from acme.utils.loggers import base
class Dispatcher(base.Logger):
"""Writes data to multiple `Logger` objects."""
def __init__(
self,
to: Sequence[base.Logger],
serialize_fn: Optional[Callable[[base.LoggingData], str]] = None,
):
"""Initialize `Dispatcher` connected to several `Logger` objects."""
self._to = to
self._serialize_fn = serialize_fn
def write(self, values: base.LoggingData):
"""Writes `values` to the underlying `Logger` objects."""
if self._serialize_fn:
values = self._serialize_fn(values)
for logger in self._to:
logger.write(values)
def close(self):
for logger in self._to:
logger.close()
|
acme-master
|
acme/utils/loggers/aggregators.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image logger."""
import os
from acme.testing import test_utils
from acme.utils.loggers import image
import numpy as np
from PIL import Image
from absl.testing import absltest
class ImageTest(test_utils.TestCase):
def test_save_load_identity(self):
directory = self.get_tempdir()
logger = image.ImageLogger(directory, label='foo')
array = (np.random.rand(10, 10) * 255).astype(np.uint8)
logger.write({'img': array})
with open(f'{directory}/foo/img_000000.png', mode='rb') as f:
out = np.asarray(Image.open(f))
np.testing.assert_array_equal(array, out)
def test_indexing(self):
directory = self.get_tempdir()
logger = image.ImageLogger(directory, label='foo')
zeros = np.zeros(shape=(3, 3), dtype=np.uint8)
logger.write({'img': zeros, 'other_img': zeros + 1})
logger.write({'img': zeros - 1})
logger.write({'other_img': zeros + 1})
logger.write({'other_img': zeros + 2})
fnames = sorted(os.listdir(f'{directory}/foo'))
expected = [
'img_000000.png',
'img_000001.png',
'other_img_000000.png',
'other_img_000001.png',
'other_img_000002.png',
]
self.assertEqual(fnames, expected)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/loggers/image_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for csv logging."""
import csv
import os
from acme.testing import test_utils
from acme.utils import paths
from acme.utils.loggers import csv as csv_logger
from absl.testing import absltest
from absl.testing import parameterized
_TEST_INPUTS = [{
'c': 'foo',
'a': '1337',
'b': '42.0001',
}, {
'c': 'foo2',
'a': '1338',
'b': '43.0001',
}]
class CSVLoggingTest(test_utils.TestCase):
def test_logging_input_is_directory(self):
# Set up logger.
directory = self.get_tempdir()
label = 'test'
logger = csv_logger.CSVLogger(directory_or_file=directory, label=label)
# Write data and close.
for inp in _TEST_INPUTS:
logger.write(inp)
logger.close()
# Read back data.
outputs = []
with open(logger.file_path) as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
outputs.append(dict(row))
self.assertEqual(outputs, _TEST_INPUTS)
@parameterized.parameters(True, False)
def test_logging_input_is_file(self, add_uid: bool):
# Set up logger.
directory = paths.process_path(
self.get_tempdir(), 'logs', 'my_label', add_uid=add_uid)
file = open(os.path.join(directory, 'logs.csv'), 'a')
logger = csv_logger.CSVLogger(directory_or_file=file, add_uid=add_uid)
# Write data and close.
for inp in _TEST_INPUTS:
logger.write(inp)
logger.close()
# Logger doesn't close the file; caller must do this manually.
self.assertFalse(file.closed)
file.close()
# Read back data.
outputs = []
with open(logger.file_path) as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
outputs.append(dict(row))
self.assertEqual(outputs, _TEST_INPUTS)
def test_flush(self):
logger = csv_logger.CSVLogger(self.get_tempdir(), flush_every=1)
for inp in _TEST_INPUTS:
logger.write(inp)
# Read back data.
outputs = []
with open(logger.file_path) as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
outputs.append(dict(row))
self.assertEqual(outputs, _TEST_INPUTS)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/loggers/csv_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple CSV logger.
Warning: Does not support preemption.
"""
import csv
import os
import time
from typing import TextIO, Union
from absl import logging
from acme.utils import paths
from acme.utils.loggers import base
class CSVLogger(base.Logger):
"""Standard CSV logger.
The fields are inferred from the first call to write() and any additional
fields afterwards are ignored.
TODO(jaslanides): Consider making this stateless/robust to preemption.
"""
_open = open
def __init__(
self,
directory_or_file: Union[str, TextIO] = '~/acme',
label: str = '',
time_delta: float = 0.,
add_uid: bool = True,
flush_every: int = 30,
):
"""Instantiates the logger.
Args:
directory_or_file: Either a directory path as a string, or a file TextIO
object.
label: Extra label to add to logger. This is added as a suffix to the
directory.
time_delta: Interval in seconds between which writes are dropped to
throttle throughput.
add_uid: Whether to add a UID to the file path. See `paths.process_path`
for details.
flush_every: Interval (in writes) between flushes.
"""
if flush_every <= 0:
raise ValueError(
f'`flush_every` must be a positive integer (got {flush_every}).')
self._last_log_time = time.time() - time_delta
self._time_delta = time_delta
self._flush_every = flush_every
self._add_uid = add_uid
self._writer = None
self._file_owner = False
self._file = self._create_file(directory_or_file, label)
self._writes = 0
logging.info('Logging to %s', self.file_path)
def _create_file(
self,
directory_or_file: Union[str, TextIO],
label: str,
) -> TextIO:
"""Opens a file if input is a directory or use existing file."""
if isinstance(directory_or_file, str):
directory = paths.process_path(
directory_or_file, 'logs', label, add_uid=self._add_uid)
file_path = os.path.join(directory, 'logs.csv')
self._file_owner = True
return self._open(file_path, mode='a')
# TextIO instance.
file = directory_or_file
if label:
logging.info('File, not directory, passed to CSVLogger; label not used.')
if not file.mode.startswith('a'):
raise ValueError('File must be open in append mode; instead got '
f'mode="{file.mode}".')
return file
def write(self, data: base.LoggingData):
"""Writes a `data` into a row of comma-separated values."""
# Only log if `time_delta` seconds have passed since last logging event.
now = time.time()
# TODO(b/192227744): Remove this in favour of filters.TimeFilter.
elapsed = now - self._last_log_time
if elapsed < self._time_delta:
logging.debug('Not due to log for another %.2f seconds, dropping data.',
self._time_delta - elapsed)
return
self._last_log_time = now
# Append row to CSV.
data = base.to_numpy(data)
# Use fields from initial `data` to create the header. If extra fields are
# present in subsequent `data`, we ignore them.
if not self._writer:
fields = sorted(data.keys())
self._writer = csv.DictWriter(self._file, fieldnames=fields,
extrasaction='ignore')
# Write header only if the file is empty.
if not self._file.tell():
self._writer.writeheader()
self._writer.writerow(data)
# Flush every `flush_every` writes.
if self._writes % self._flush_every == 0:
self.flush()
self._writes += 1
def close(self):
self.flush()
if self._file_owner:
self._file.close()
def flush(self):
self._file.flush()
@property
def file_path(self) -> str:
return self._file.name
|
acme-master
|
acme/utils/loggers/csv.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger which self closes on exit if not closed yet."""
import weakref
from acme.utils.loggers import base
class AutoCloseLogger(base.Logger):
"""Logger which auto closes itself on exit if not already closed."""
def __init__(self, logger: base.Logger):
self._logger = logger
# The finalizer "logger.close" is invoked in one of the following scenario:
# 1) the current logger is GC
# 2) from the python doc, when the program exits, each remaining live
# finalizer is called.
# Note that in the normal flow, where "close" is explicitly called,
# the finalizer is marked as dead using the detach function so that
# the underlying logger is not closed twice (once explicitly and once
# implicitly when the object is GC or when the program exits).
self._finalizer = weakref.finalize(self, logger.close)
def write(self, values: base.LoggingData):
self._logger.write(values)
def close(self):
if self._finalizer.detach():
self._logger.close()
self._logger = None
|
acme-master
|
acme/utils/loggers/auto_close.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for logging to a tf.summary."""
import time
from typing import Optional
from absl import logging
from acme.utils.loggers import base
import tensorflow as tf
def _format_key(key: str) -> str:
"""Internal function for formatting keys in Tensorboard format."""
return key.title().replace('_', '')
class TFSummaryLogger(base.Logger):
"""Logs to a tf.summary created in a given logdir.
If multiple TFSummaryLogger are created with the same logdir, results will be
categorized by labels.
"""
def __init__(
self,
logdir: str,
label: str = 'Logs',
steps_key: Optional[str] = None
):
"""Initializes the logger.
Args:
logdir: directory to which we should log files.
label: label string to use when logging. Default to 'Logs'.
steps_key: key to use for steps. Must be in the values passed to write.
"""
self._time = time.time()
self.label = label
self._iter = 0
self.summary = tf.summary.create_file_writer(logdir)
self._steps_key = steps_key
def write(self, values: base.LoggingData):
if self._steps_key is not None and self._steps_key not in values:
logging.warning('steps key %s not found. Skip logging.', self._steps_key)
return
step = values[
self._steps_key] if self._steps_key is not None else self._iter
with self.summary.as_default():
# TODO(b/159065169): Remove this suppression once the bug is resolved.
# pytype: disable=unsupported-operands
for key in values.keys() - [self._steps_key]:
# pytype: enable=unsupported-operands
tf.summary.scalar(
f'{self.label}/{_format_key(key)}', data=values[key], step=step)
self._iter += 1
def close(self):
self.summary.close()
|
acme-master
|
acme/utils/loggers/tf_summary.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger for values that remain constant."""
from acme.utils.loggers import base
class ConstantLogger(base.Logger):
"""Logger for values that remain constant throughout the experiment.
This logger is used to log additional values e.g. level_name or
hyperparameters that do not change in an experiment. Having these values
allows to group or facet plots when analysing data post-experiment.
"""
def __init__(
self,
constant_data: base.LoggingData,
to: base.Logger,
):
"""Initialise the extra info logger.
Args:
constant_data: Key-value pairs containing the constant info to be logged.
to: The logger to add these extra info to.
"""
self._constant_data = constant_data
self._to = to
def write(self, data: base.LoggingData):
self._to.write({**self._constant_data, **data})
def close(self):
self._to.close()
|
acme-master
|
acme/utils/loggers/constant.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timestamp logger."""
import time
from acme.utils.loggers import base
class TimestampLogger(base.Logger):
"""Logger which populates the timestamp key with the current timestamp."""
def __init__(self, logger: base.Logger, timestamp_key: str):
self._logger = logger
self._timestamp_key = timestamp_key
def write(self, values: base.LoggingData):
values = dict(values)
values[self._timestamp_key] = time.time()
self._logger.write(values)
def close(self):
self._logger.close()
|
acme-master
|
acme/utils/loggers/timestamp.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An image logger, for writing out arrays to disk as PNG."""
import collections
import pathlib
from typing import Optional
from absl import logging
from acme.utils.loggers import base
from PIL import Image
class ImageLogger(base.Logger):
"""Logger for writing NumPy arrays as PNG images to disk.
Assumes that all data passed are NumPy arrays that can be converted to images.
TODO(jaslanides): Make this stateless/robust to preemptions.
"""
def __init__(
self,
directory: str,
*,
label: str = '',
mode: Optional[str] = None,
):
"""Initialises the writer.
Args:
directory: Base directory to which images are logged.
label: Optional subdirectory in which to save images.
mode: Image mode for use with Pillow. If `None` (default), mode is
determined by data type. See [0] for details.
[0] https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
"""
self._path = self._get_path(directory, label)
if not self._path.exists():
self._path.mkdir(parents=True)
self._mode = mode
self._indices = collections.defaultdict(int)
def write(self, data: base.LoggingData):
for k, v in data.items():
image = Image.fromarray(v, mode=self._mode)
path = self._path / f'{k}_{self._indices[k]:06}.png'
self._indices[k] += 1
with path.open(mode='wb') as f:
logging.info('Writing image to %s.', str(path))
image.save(f)
def close(self):
pass
@property
def directory(self) -> str:
return str(self._path)
def _get_path(self, *args, **kwargs) -> pathlib.Path:
return pathlib.Path(*args, **kwargs)
|
acme-master
|
acme/utils/loggers/image.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base logger."""
import abc
from typing import Any, Mapping, Optional
import numpy as np
import tree
from typing_extensions import Protocol
LoggingData = Mapping[str, Any]
class Logger(abc.ABC):
"""A logger has a `write` method."""
@abc.abstractmethod
def write(self, data: LoggingData) -> None:
"""Writes `data` to destination (file, terminal, database, etc)."""
@abc.abstractmethod
def close(self) -> None:
"""Closes the logger, not expecting any further write."""
TaskInstance = int
# TODO(stanczyk): Turn LoggerLabel into an enum of [Learner, Actor, Evaluator].
LoggerLabel = str
LoggerStepsKey = str
class LoggerFactory(Protocol):
def __call__(self,
label: LoggerLabel,
steps_key: Optional[LoggerStepsKey] = None,
instance: Optional[TaskInstance] = None) -> Logger:
...
class NoOpLogger(Logger):
"""Simple Logger which does nothing and outputs no logs.
This should be used sparingly, but it can prove useful if we want to quiet an
individual component and have it produce no logging whatsoever.
"""
def write(self, data: LoggingData):
pass
def close(self):
pass
def tensor_to_numpy(value: Any):
if hasattr(value, 'numpy'):
return value.numpy() # tf.Tensor (TF2).
if hasattr(value, 'device_buffer'):
return np.asarray(value) # jnp.DeviceArray.
return value
def to_numpy(values: Any):
"""Converts tensors in a nested structure to numpy.
Converts tensors from TensorFlow to Numpy if needed without importing TF
dependency.
Args:
values: nested structure with numpy and / or TF tensors.
Returns:
Same nested structure as values, but with numpy tensors.
"""
return tree.map_structure(tensor_to_numpy, values)
|
acme-master
|
acme/utils/loggers/base.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loggers which filter other loggers."""
import math
import time
from typing import Callable, Optional, Sequence
from acme.utils.loggers import base
class NoneFilter(base.Logger):
"""Logger which writes to another logger, filtering any `None` values."""
def __init__(self, to: base.Logger):
"""Initializes the logger.
Args:
to: A `Logger` object to which the current object will forward its results
when `write` is called.
"""
self._to = to
def write(self, values: base.LoggingData):
values = {k: v for k, v in values.items() if v is not None}
self._to.write(values)
def close(self):
self._to.close()
class TimeFilter(base.Logger):
"""Logger which writes to another logger at a given time interval."""
def __init__(self, to: base.Logger, time_delta: float):
"""Initializes the logger.
Args:
to: A `Logger` object to which the current object will forward its results
when `write` is called.
time_delta: How often to write values out in seconds.
Note that writes within `time_delta` are dropped.
"""
self._to = to
self._time = 0
self._time_delta = time_delta
if time_delta < 0:
raise ValueError(f'time_delta must be greater than 0 (got {time_delta}).')
def write(self, values: base.LoggingData):
now = time.time()
if (now - self._time) > self._time_delta:
self._to.write(values)
self._time = now
def close(self):
self._to.close()
class KeyFilter(base.Logger):
"""Logger which filters keys in logged data."""
def __init__(
self,
to: base.Logger,
*,
keep: Optional[Sequence[str]] = None,
drop: Optional[Sequence[str]] = None,
):
"""Creates the filter.
Args:
to: A `Logger` object to which the current object will forward its writes.
keep: Keys that are kept by the filter. Note that `keep` and `drop` cannot
be both set at once.
drop: Keys that are dropped by the filter. Note that `keep` and `drop`
cannot be both set at once.
"""
if bool(keep) == bool(drop):
raise ValueError('Exactly one of `keep` & `drop` arguments must be set.')
self._to = to
self._keep = keep
self._drop = drop
def write(self, data: base.LoggingData):
if self._keep:
data = {k: data[k] for k in self._keep}
if self._drop:
data = {k: v for k, v in data.items() if k not in self._drop}
self._to.write(data)
def close(self):
self._to.close()
class GatedFilter(base.Logger):
"""Logger which writes to another logger based on a gating function.
This logger tracks the number of times its `write` method is called, and uses
a gating function on this number to decide when to write.
"""
def __init__(self, to: base.Logger, gating_fn: Callable[[int], bool]):
"""Initialises the logger.
Args:
to: A `Logger` object to which the current object will forward its results
when `write` is called.
gating_fn: A function that takes an integer (number of calls) as input.
For example, to log every tenth call: gating_fn=lambda t: t % 10 == 0.
"""
self._to = to
self._gating_fn = gating_fn
self._calls = 0
def write(self, values: base.LoggingData):
if self._gating_fn(self._calls):
self._to.write(values)
self._calls += 1
def close(self):
self._to.close()
@classmethod
def logarithmic(cls, to: base.Logger, n: int = 10) -> 'GatedFilter':
"""Builds a logger for writing at logarithmically-spaced intervals.
This will log on a linear scale at each order of magnitude of `n`.
For example, with n=10, this will log at times:
[0, 1, 2, ..., 9, 10, 20, 30, ... 90, 100, 200, 300, ... 900, 1000]
Args:
to: The underlying logger to write to.
n: Base (default 10) on which to operate.
Returns:
A GatedFilter logger, which gates logarithmically as described above.
"""
def logarithmic_filter(t: int) -> bool:
magnitude = math.floor(math.log10(max(t, 1))/math.log10(n))
return t % (n**magnitude) == 0
return cls(to, gating_fn=logarithmic_filter)
@classmethod
def periodic(cls, to: base.Logger, interval: int = 10) -> 'GatedFilter':
"""Builds a logger for writing at linearly-spaced intervals.
Args:
to: The underlying logger to write to.
interval: The interval between writes.
Returns:
A GatedFilter logger, which gates periodically as described above.
"""
return cls(to, gating_fn=lambda t: t % interval == 0)
|
acme-master
|
acme/utils/loggers/filters.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for measurement_metrics."""
import copy
from unittest import mock
from acme import specs
from acme.testing import fakes
from acme.utils.observers import measurement_metrics
import dm_env
import numpy as np
from absl.testing import absltest
def _make_fake_env() -> dm_env.Environment:
env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(10, 5), dtype=np.float32),
actions=specs.BoundedArray(
shape=(1,), dtype=np.float32, minimum=-100., maximum=100.),
rewards=specs.Array(shape=(), dtype=np.float32),
discounts=specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1.),
)
return fakes.Environment(env_spec, episode_length=10)
_FAKE_ENV = _make_fake_env()
_TIMESTEP = mock.MagicMock(spec=dm_env.TimeStep)
_TIMESTEP.observation = [1.0, -2.0]
class MeasurementMetricsTest(absltest.TestCase):
def test_observe_nothing(self):
observer = measurement_metrics.MeasurementObserver()
self.assertEqual({}, observer.get_metrics())
def test_observe_first(self):
observer = measurement_metrics.MeasurementObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
self.assertEqual({}, observer.get_metrics())
def test_observe_single_step(self):
observer = measurement_metrics.MeasurementObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([1]))
self.assertEqual(
{
'measurement[0]_max': 1.0,
'measurement[0]_mean': 1.0,
'measurement[0]_p25': 1.0,
'measurement[0]_p50': 1.0,
'measurement[0]_p75': 1.0,
'measurement[1]_max': -2.0,
'measurement[1]_mean': -2.0,
'measurement[1]_p25': -2.0,
'measurement[1]_p50': -2.0,
'measurement[1]_p75': -2.0,
'measurement[0]_min': 1.0,
'measurement[1]_min': -2.0,
},
observer.get_metrics(),
)
def test_observe_multiple_step_same_observation(self):
observer = measurement_metrics.MeasurementObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([1]))
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([4]))
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([5]))
self.assertEqual(
{
'measurement[0]_max': 1.0,
'measurement[0]_mean': 1.0,
'measurement[0]_p25': 1.0,
'measurement[0]_p50': 1.0,
'measurement[0]_p75': 1.0,
'measurement[1]_max': -2.0,
'measurement[1]_mean': -2.0,
'measurement[1]_p25': -2.0,
'measurement[1]_p50': -2.0,
'measurement[1]_p75': -2.0,
'measurement[0]_min': 1.0,
'measurement[1]_min': -2.0,
},
observer.get_metrics(),
)
def test_observe_multiple_step(self):
observer = measurement_metrics.MeasurementObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([1]))
first_obs_timestep = copy.deepcopy(_TIMESTEP)
first_obs_timestep.observation = [1000.0, -50.0]
observer.observe(
env=_FAKE_ENV, timestep=first_obs_timestep, action=np.array([4]))
second_obs_timestep = copy.deepcopy(_TIMESTEP)
second_obs_timestep.observation = [-1000.0, 500.0]
observer.observe(
env=_FAKE_ENV, timestep=second_obs_timestep, action=np.array([4]))
self.assertEqual(
{
'measurement[0]_max': 1000.0,
'measurement[0]_mean': 1.0/3,
'measurement[0]_p25': -499.5,
'measurement[0]_p50': 1.0,
'measurement[0]_p75': 500.5,
'measurement[1]_max': 500.0,
'measurement[1]_mean': 448.0/3.0,
'measurement[1]_p25': -26.0,
'measurement[1]_p50': -2.0,
'measurement[1]_p75': 249.0,
'measurement[0]_min': -1000.0,
'measurement[1]_min': -50.0,
},
observer.get_metrics(),
)
def test_observe_empty_observation(self):
observer = measurement_metrics.MeasurementObserver()
empty_timestep = copy.deepcopy(_TIMESTEP)
empty_timestep.observation = {}
observer.observe_first(env=_FAKE_ENV, timestep=empty_timestep)
self.assertEqual({}, observer.get_metrics())
def test_observe_single_dimensions(self):
observer = measurement_metrics.MeasurementObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
single_obs_timestep = copy.deepcopy(_TIMESTEP)
single_obs_timestep.observation = [1000.0, -50.0]
observer.observe(
env=_FAKE_ENV,
timestep=single_obs_timestep,
action=np.array([[1, 2], [3, 4]]))
np.testing.assert_equal(
{
'measurement[0]_max': 1000.0,
'measurement[0]_min': 1000.0,
'measurement[0]_mean': 1000.0,
'measurement[0]_p25': 1000.0,
'measurement[0]_p50': 1000.0,
'measurement[0]_p75': 1000.0,
'measurement[1]_max': -50.0,
'measurement[1]_mean': -50.0,
'measurement[1]_p25': -50.0,
'measurement[1]_p50': -50.0,
'measurement[1]_p75': -50.0,
'measurement[1]_min': -50.0,
},
observer.get_metrics(),
)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/observers/measurement_metrics_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for action_metrics_observers."""
from acme import specs
from acme.testing import fakes
from acme.utils.observers import action_metrics
import dm_env
import numpy as np
from absl.testing import absltest
def _make_fake_env() -> dm_env.Environment:
env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(10, 5), dtype=np.float32),
actions=specs.BoundedArray(
shape=(1,), dtype=np.float32, minimum=-100., maximum=100.),
rewards=specs.Array(shape=(), dtype=np.float32),
discounts=specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1.),
)
return fakes.Environment(env_spec, episode_length=10)
_FAKE_ENV = _make_fake_env()
_TIMESTEP = _FAKE_ENV.reset()
class ActionMetricsTest(absltest.TestCase):
def test_observe_nothing(self):
observer = action_metrics.ContinuousActionObserver()
self.assertEqual({}, observer.get_metrics())
def test_observe_first(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
self.assertEqual({}, observer.get_metrics())
def test_observe_single_step(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([1]))
self.assertEqual(
{
'action[0]_max': 1,
'action[0]_min': 1,
'action[0]_mean': 1,
'action[0]_p50': 1,
},
observer.get_metrics(),
)
def test_observe_multiple_step(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([1]))
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([4]))
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([5]))
self.assertEqual(
{
'action[0]_max': 5,
'action[0]_min': 1,
'action[0]_mean': 10 / 3,
'action[0]_p50': 4,
},
observer.get_metrics(),
)
def test_observe_zero_dimensions(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array(1))
self.assertEqual(
{
'action[]_max': 1,
'action[]_min': 1,
'action[]_mean': 1,
'action[]_p50': 1,
},
observer.get_metrics(),
)
def test_observe_multiple_dimensions(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(
env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([[1, 2], [3, 4]]))
np.testing.assert_equal(
{
'action[0, 0]_max': 1,
'action[0, 0]_min': 1,
'action[0, 0]_mean': 1,
'action[0, 0]_p50': 1,
'action[0, 1]_max': 2,
'action[0, 1]_min': 2,
'action[0, 1]_mean': 2,
'action[0, 1]_p50': 2,
'action[1, 0]_max': 3,
'action[1, 0]_min': 3,
'action[1, 0]_mean': 3,
'action[1, 0]_p50': 3,
'action[1, 1]_max': 4,
'action[1, 1]_min': 4,
'action[1, 1]_mean': 4,
'action[1, 1]_p50': 4,
},
observer.get_metrics(),
)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/observers/action_metrics_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Acme observers."""
from acme.utils.observers.action_metrics import ContinuousActionObserver
from acme.utils.observers.action_norm import ActionNormObserver
from acme.utils.observers.base import EnvLoopObserver
from acme.utils.observers.base import Number
from acme.utils.observers.env_info import EnvInfoObserver
from acme.utils.observers.measurement_metrics import MeasurementObserver
|
acme-master
|
acme/utils/observers/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An observer that collects action norm stats.
"""
from typing import Dict
from acme.utils.observers import base
import dm_env
import numpy as np
class ActionNormObserver(base.EnvLoopObserver):
"""An observer that collects action norm stats."""
def __init__(self):
self._action_norms = None
def observe_first(self, env: dm_env.Environment, timestep: dm_env.TimeStep
) -> None:
"""Observes the initial state."""
self._action_norms = []
def observe(self, env: dm_env.Environment, timestep: dm_env.TimeStep,
action: np.ndarray) -> None:
"""Records one environment step."""
self._action_norms.append(np.linalg.norm(action))
def get_metrics(self) -> Dict[str, base.Number]:
"""Returns metrics collected for the current episode."""
return {'action_norm_avg': np.mean(self._action_norms),
'action_norm_min': np.min(self._action_norms),
'action_norm_max': np.max(self._action_norms)}
|
acme-master
|
acme/utils/observers/action_norm.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An observer that tracks statistics about the actions."""
from typing import Dict
from acme.utils.observers import base
import dm_env
import numpy as np
class ContinuousActionObserver(base.EnvLoopObserver):
"""Observer that tracks statstics of continuous actions taken by the agent.
Assumes the action is a np.ndarray, and for each dimension in the action,
calculates some useful statistics for a particular episode.
"""
def __init__(self):
self._actions = None
def observe_first(self, env: dm_env.Environment,
timestep: dm_env.TimeStep) -> None:
"""Observes the initial state."""
self._actions = []
def observe(self, env: dm_env.Environment, timestep: dm_env.TimeStep,
action: np.ndarray) -> None:
"""Records one environment step."""
self._actions.append(action)
def get_metrics(self) -> Dict[str, base.Number]:
"""Returns metrics collected for the current episode."""
aggregate_metrics = {}
if not self._actions:
return aggregate_metrics
metrics = {
'action_max': np.max(self._actions, axis=0),
'action_min': np.min(self._actions, axis=0),
'action_mean': np.mean(self._actions, axis=0),
'action_p50': np.percentile(self._actions, q=50., axis=0)
}
for index, sub_action_metric in np.ndenumerate(metrics['action_max']):
aggregate_metrics[f'action{list(index)}_max'] = sub_action_metric
aggregate_metrics[f'action{list(index)}_min'] = metrics['action_min'][
index]
aggregate_metrics[f'action{list(index)}_mean'] = metrics['action_mean'][
index]
aggregate_metrics[f'action{list(index)}_p50'] = metrics['action_p50'][
index]
return aggregate_metrics
|
acme-master
|
acme/utils/observers/action_metrics.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An observer that returns env's info.
"""
from typing import Dict
from acme.utils.observers import base
import dm_env
import numpy as np
class EnvInfoObserver(base.EnvLoopObserver):
"""An observer that collects and accumulates scalars from env's info."""
def __init__(self):
self._metrics = None
def _accumulate_metrics(self, env: dm_env.Environment) -> None:
if not hasattr(env, 'get_info'):
return
info = getattr(env, 'get_info')()
if not info:
return
for k, v in info.items():
if np.isscalar(v):
self._metrics[k] = self._metrics.get(k, 0) + v
def observe_first(self, env: dm_env.Environment, timestep: dm_env.TimeStep
) -> None:
"""Observes the initial state."""
self._metrics = {}
self._accumulate_metrics(env)
def observe(self, env: dm_env.Environment, timestep: dm_env.TimeStep,
action: np.ndarray) -> None:
"""Records one environment step."""
self._accumulate_metrics(env)
def get_metrics(self) -> Dict[str, base.Number]:
"""Returns metrics collected for the current episode."""
return self._metrics
|
acme-master
|
acme/utils/observers/env_info.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.utils.observers.env_info."""
from acme.utils.observers import env_info
from acme.wrappers import gym_wrapper
import gym
from gym import spaces
import numpy as np
from absl.testing import absltest
class GymEnvWithInfo(gym.Env):
def __init__(self):
obs_space = np.ones((10,))
self.observation_space = spaces.Box(-obs_space, obs_space, dtype=np.float32)
act_space = np.ones((3,))
self.action_space = spaces.Box(-act_space, act_space, dtype=np.float32)
self._step = 0
def reset(self):
self._step = 0
return self.observation_space.sample()
def step(self, action: np.ndarray):
self._step += 1
info = {'survival_bonus': 1}
if self._step == 1 or self._step == 7:
info['found_checkpoint'] = 1
if self._step == 5:
info['picked_up_an_apple'] = 1
return self.observation_space.sample(), 0, False, info
class ActionNormTest(absltest.TestCase):
def test_basic(self):
env = GymEnvWithInfo()
env = gym_wrapper.GymWrapper(env)
observer = env_info.EnvInfoObserver()
timestep = env.reset()
observer.observe_first(env, timestep)
for _ in range(20):
action = np.zeros((3,))
timestep = env.step(action)
observer.observe(env, timestep, action)
metrics = observer.get_metrics()
self.assertLen(metrics, 3)
np.testing.assert_equal(metrics['found_checkpoint'], 2)
np.testing.assert_equal(metrics['picked_up_an_apple'], 1)
np.testing.assert_equal(metrics['survival_bonus'], 20)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/observers/env_info_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.utils.observers.action_norm."""
from acme import specs
from acme.testing import fakes
from acme.utils.observers import action_norm
import dm_env
import numpy as np
from absl.testing import absltest
def _make_fake_env() -> dm_env.Environment:
env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(10, 5), dtype=np.float32),
actions=specs.BoundedArray(
shape=(1,), dtype=np.float32, minimum=-10., maximum=10.),
rewards=specs.Array(shape=(), dtype=np.float32),
discounts=specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1.),
)
return fakes.Environment(env_spec, episode_length=10)
class ActionNormTest(absltest.TestCase):
def test_basic(self):
env = _make_fake_env()
observer = action_norm.ActionNormObserver()
timestep = env.reset()
observer.observe_first(env, timestep)
for it in range(5):
action = np.ones((1,), dtype=np.float32) * it
timestep = env.step(action)
observer.observe(env, timestep, action)
metrics = observer.get_metrics()
self.assertLen(metrics, 3)
np.testing.assert_equal(metrics['action_norm_min'], 0)
np.testing.assert_equal(metrics['action_norm_max'], 4)
np.testing.assert_equal(metrics['action_norm_avg'], 2)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/observers/action_norm_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An observer that tracks statistics about the observations."""
from typing import Mapping, List
from acme.utils.observers import base
import dm_env
import numpy as np
class MeasurementObserver(base.EnvLoopObserver):
"""Observer the provides statistics for measurements at every timestep.
This assumes the measurements is a multidimensional array with a static spec.
Warning! It is not intended to be used for high dimensional observations.
self._measurements: List[np.ndarray]
"""
def __init__(self):
self._measurements = []
def observe_first(self, env: dm_env.Environment,
timestep: dm_env.TimeStep) -> None:
"""Observes the initial state."""
self._measurements = []
def observe(self, env: dm_env.Environment, timestep: dm_env.TimeStep,
action: np.ndarray) -> None:
"""Records one environment step."""
self._measurements.append(timestep.observation)
def get_metrics(self) -> Mapping[str, List[base.Number]]: # pytype: disable=signature-mismatch # overriding-return-type-checks
"""Returns metrics collected for the current episode."""
aggregate_metrics = {}
if not self._measurements:
return aggregate_metrics
metrics = {
'measurement_max': np.max(self._measurements, axis=0),
'measurement_min': np.min(self._measurements, axis=0),
'measurement_mean': np.mean(self._measurements, axis=0),
'measurement_p25': np.percentile(self._measurements, q=25., axis=0),
'measurement_p50': np.percentile(self._measurements, q=50., axis=0),
'measurement_p75': np.percentile(self._measurements, q=75., axis=0),
}
for index, sub_observation_metric in np.ndenumerate(
metrics['measurement_max']):
aggregate_metrics[
f'measurement{list(index)}_max'] = sub_observation_metric
aggregate_metrics[f'measurement{list(index)}_min'] = metrics[
'measurement_min'][index]
aggregate_metrics[f'measurement{list(index)}_mean'] = metrics[
'measurement_mean'][index]
aggregate_metrics[f'measurement{list(index)}_p50'] = metrics[
'measurement_p50'][index]
aggregate_metrics[f'measurement{list(index)}_p25'] = metrics[
'measurement_p25'][index]
aggregate_metrics[f'measurement{list(index)}_p75'] = metrics[
'measurement_p75'][index]
return aggregate_metrics
|
acme-master
|
acme/utils/observers/measurement_metrics.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics observers."""
import abc
from typing import Dict, Union
import dm_env
import numpy as np
Number = Union[int, float]
class EnvLoopObserver(abc.ABC):
"""An interface for collecting metrics/counters in EnvironmentLoop."""
@abc.abstractmethod
def observe_first(self, env: dm_env.Environment, timestep: dm_env.TimeStep
) -> None:
"""Observes the initial state."""
@abc.abstractmethod
def observe(self, env: dm_env.Environment, timestep: dm_env.TimeStep,
action: np.ndarray) -> None:
"""Records one environment step."""
@abc.abstractmethod
def get_metrics(self) -> Dict[str, Number]:
"""Returns metrics collected for the current episode."""
|
acme-master
|
acme/utils/observers/base.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OpenSpiel multi-agent/environment training loop."""
import operator
import time
from typing import Optional, Sequence
from acme import core
from acme.utils import counting
from acme.utils import loggers
from acme.wrappers import open_spiel_wrapper
import dm_env
from dm_env import specs
import numpy as np
import tree
# pytype: disable=import-error
import pyspiel
# pytype: enable=import-error
class OpenSpielEnvironmentLoop(core.Worker):
"""An OpenSpiel RL environment loop.
This takes `Environment` and list of `Actor` instances and coordinates their
interaction. Agents are updated if `should_update=True`. This can be used as:
loop = EnvironmentLoop(environment, actors)
loop.run(num_episodes)
A `Counter` instance can optionally be given in order to maintain counts
between different Acme components. If not given a local Counter will be
created to maintain counts between calls to the `run` method.
A `Logger` instance can also be passed in order to control the output of the
loop. If not given a platform-specific default logger will be used as defined
by utils.loggers.make_default_logger. A string `label` can be passed to easily
change the label associated with the default logger; this is ignored if a
`Logger` instance is given.
"""
def __init__(
self,
environment: open_spiel_wrapper.OpenSpielWrapper,
actors: Sequence[core.Actor],
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
should_update: bool = True,
label: str = 'open_spiel_environment_loop',
):
# Internalize agent and environment.
self._environment = environment
self._actors = actors
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(label)
self._should_update = should_update
# Track information necessary to coordinate updates among multiple actors.
self._observed_first = [False] * len(self._actors)
self._prev_actions = [pyspiel.INVALID_ACTION] * len(self._actors)
def _send_observation(self, timestep: dm_env.TimeStep, player: int):
# If terminal all actors must update
if player == pyspiel.PlayerId.TERMINAL:
for player_id in range(len(self._actors)):
# Note: we must account for situations where the first observation
# is a terminal state, e.g. if an opponent folds in poker before we get
# to act.
if self._observed_first[player_id]:
player_timestep = self._get_player_timestep(timestep, player_id)
self._actors[player_id].observe(self._prev_actions[player_id],
player_timestep)
if self._should_update:
self._actors[player_id].update()
self._observed_first = [False] * len(self._actors)
self._prev_actions = [pyspiel.INVALID_ACTION] * len(self._actors)
else:
if not self._observed_first[player]:
player_timestep = dm_env.TimeStep(
observation=timestep.observation[player],
reward=None,
discount=None,
step_type=dm_env.StepType.FIRST)
self._actors[player].observe_first(player_timestep)
self._observed_first[player] = True
else:
player_timestep = self._get_player_timestep(timestep, player)
self._actors[player].observe(self._prev_actions[player],
player_timestep)
if self._should_update:
self._actors[player].update()
def _get_action(self, timestep: dm_env.TimeStep, player: int) -> int:
self._prev_actions[player] = self._actors[player].select_action(
timestep.observation[player])
return self._prev_actions[player]
def _get_player_timestep(self, timestep: dm_env.TimeStep,
player: int) -> dm_env.TimeStep:
return dm_env.TimeStep(observation=timestep.observation[player],
reward=timestep.reward[player],
discount=timestep.discount[player],
step_type=timestep.step_type)
def run_episode(self) -> loggers.LoggingData:
"""Run one episode.
Each episode is a loop which interacts first with the environment to get an
observation and then give that observation to the agent in order to retrieve
an action.
Returns:
An instance of `loggers.LoggingData`.
"""
# Reset any counts and start the environment.
start_time = time.time()
episode_steps = 0
# For evaluation, this keeps track of the total undiscounted reward
# for each player accumulated during the episode.
multiplayer_reward_spec = specs.BoundedArray(
(self._environment.game.num_players(),),
np.float32,
minimum=self._environment.game.min_utility(),
maximum=self._environment.game.max_utility())
episode_return = tree.map_structure(_generate_zeros_from_spec,
multiplayer_reward_spec)
timestep = self._environment.reset()
# Make the first observation.
self._send_observation(timestep, self._environment.current_player)
# Run an episode.
while not timestep.last():
# Generate an action from the agent's policy and step the environment.
if self._environment.is_turn_based:
action_list = [
self._get_action(timestep, self._environment.current_player)
]
else:
# FIXME: Support simultaneous move games.
raise ValueError('Currently only supports sequential games.')
timestep = self._environment.step(action_list)
# Have the agent observe the timestep and let the actor update itself.
self._send_observation(timestep, self._environment.current_player)
# Book-keeping.
episode_steps += 1
# Equivalent to: episode_return += timestep.reward
# We capture the return value because if timestep.reward is a JAX
# DeviceArray, episode_return will not be mutated in-place. (In all other
# cases, the returned episode_return will be the same object as the
# argument episode_return.)
episode_return = tree.map_structure(operator.iadd,
episode_return,
timestep.reward)
# Record counts.
counts = self._counter.increment(episodes=1, steps=episode_steps)
# Collect the results and combine with counts.
steps_per_second = episode_steps / (time.time() - start_time)
result = {
'episode_length': episode_steps,
'episode_return': episode_return,
'steps_per_second': steps_per_second,
}
result.update(counts)
return result
def run(self,
num_episodes: Optional[int] = None,
num_steps: Optional[int] = None):
"""Perform the run loop.
Run the environment loop either for `num_episodes` episodes or for at
least `num_steps` steps (the last episode is always run until completion,
so the total number of steps may be slightly more than `num_steps`).
At least one of these two arguments has to be None.
Upon termination of an episode a new episode will be started. If the number
of episodes and the number of steps are not given then this will interact
with the environment infinitely.
Args:
num_episodes: number of episodes to run the loop for.
num_steps: minimal number of steps to run the loop for.
Raises:
ValueError: If both 'num_episodes' and 'num_steps' are not None.
"""
if not (num_episodes is None or num_steps is None):
raise ValueError('Either "num_episodes" or "num_steps" should be None.')
def should_terminate(episode_count: int, step_count: int) -> bool:
return ((num_episodes is not None and episode_count >= num_episodes) or
(num_steps is not None and step_count >= num_steps))
episode_count, step_count = 0, 0
while not should_terminate(episode_count, step_count):
result = self.run_episode()
episode_count += 1
step_count += result['episode_length']
# Log the given results.
self._logger.write(result)
def _generate_zeros_from_spec(spec: specs.Array) -> np.ndarray:
return np.zeros(spec.shape, spec.dtype)
|
acme-master
|
acme/environment_loops/open_spiel_environment_loop.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for OpenSpiel environment loop."""
import unittest
import acme
from acme import core
from acme import specs
from acme import types
from acme import wrappers
import dm_env
import numpy as np
import tree
from absl.testing import absltest
from absl.testing import parameterized
SKIP_OPEN_SPIEL_TESTS = False
SKIP_OPEN_SPIEL_MESSAGE = 'open_spiel not installed.'
try:
# pylint: disable=g-import-not-at-top
# pytype: disable=import-error
from acme.environment_loops import open_spiel_environment_loop
from acme.wrappers import open_spiel_wrapper
from open_spiel.python import rl_environment
# pytype: disable=import-error
class RandomActor(core.Actor):
"""Fake actor which generates random actions and validates specs."""
def __init__(self, spec: specs.EnvironmentSpec):
self._spec = spec
self.num_updates = 0
def select_action(self, observation: open_spiel_wrapper.OLT) -> int:
_validate_spec(self._spec.observations, observation)
legals = np.array(np.nonzero(observation.legal_actions), dtype=np.int32)
return np.random.choice(legals[0])
def observe_first(self, timestep: dm_env.TimeStep):
_validate_spec(self._spec.observations, timestep.observation)
def observe(self, action: types.NestedArray,
next_timestep: dm_env.TimeStep):
_validate_spec(self._spec.actions, action)
_validate_spec(self._spec.rewards, next_timestep.reward)
_validate_spec(self._spec.discounts, next_timestep.discount)
_validate_spec(self._spec.observations, next_timestep.observation)
def update(self, wait: bool = False):
self.num_updates += 1
except ModuleNotFoundError:
SKIP_OPEN_SPIEL_TESTS = True
def _validate_spec(spec: types.NestedSpec, value: types.NestedArray):
"""Validate a value from a potentially nested spec."""
tree.assert_same_structure(value, spec)
tree.map_structure(lambda s, v: s.validate(v), spec, value)
@unittest.skipIf(SKIP_OPEN_SPIEL_TESTS, SKIP_OPEN_SPIEL_MESSAGE)
class OpenSpielEnvironmentLoopTest(parameterized.TestCase):
def test_loop_run(self):
raw_env = rl_environment.Environment('tic_tac_toe')
env = open_spiel_wrapper.OpenSpielWrapper(raw_env)
env = wrappers.SinglePrecisionWrapper(env)
environment_spec = acme.make_environment_spec(env)
actors = []
for _ in range(env.num_players):
actors.append(RandomActor(environment_spec))
loop = open_spiel_environment_loop.OpenSpielEnvironmentLoop(env, actors)
result = loop.run_episode()
self.assertIn('episode_length', result)
self.assertIn('episode_return', result)
self.assertIn('steps_per_second', result)
loop.run(num_episodes=10)
loop.run(num_steps=100)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/environment_loops/open_spiel_environment_loop_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specialized environment loops."""
try:
# pylint: disable=g-import-not-at-top
from acme.environment_loops.open_spiel_environment_loop import OpenSpielEnvironmentLoop
except ImportError:
pass
|
acme-master
|
acme/environment_loops/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake (mock) components for multiagent testing."""
from typing import Dict, List
from acme import specs
import numpy as np
def _make_multiagent_spec(agent_indices: List[str]) -> Dict[str, specs.Array]:
"""Returns dummy multiagent sub-spec (e.g., observation or action spec).
Args:
agent_indices: a list of agent indices.
"""
return {
agent_id: specs.BoundedArray((1,), np.float32, 0, 1)
for agent_id in agent_indices
}
def make_multiagent_environment_spec(
agent_indices: List[str]) -> specs.EnvironmentSpec:
"""Returns dummy multiagent environment spec.
Args:
agent_indices: a list of agent indices.
"""
action_spec = _make_multiagent_spec(agent_indices)
discount_spec = specs.BoundedArray((), np.float32, 0.0, 1.0)
observation_spec = _make_multiagent_spec(agent_indices)
reward_spec = _make_multiagent_spec(agent_indices)
return specs.EnvironmentSpec(
actions=action_spec,
discounts=discount_spec,
observations=observation_spec,
rewards=reward_spec)
|
acme-master
|
acme/testing/multiagent_fakes.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing utilities."""
import sys
from typing import Optional
from absl import flags
from absl.testing import parameterized
class TestCase(parameterized.TestCase):
"""A custom TestCase which handles FLAG parsing for pytest compatibility."""
def get_tempdir(self, name: Optional[str] = None) -> str:
try:
flags.FLAGS.test_tmpdir
except flags.UnparsedFlagAccessError:
# Need to initialize flags when running `pytest`.
flags.FLAGS(sys.argv, known_only=True)
return self.create_tempdir(name).full_path
|
acme-master
|
acme/testing/test_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing helpers."""
|
acme-master
|
acme/testing/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake (mock) components.
Minimal implementations of fake Acme components which can be instantiated in
order to test or interact with other components.
"""
import threading
from typing import List, Mapping, Optional, Sequence, Callable, Iterator
from acme import core
from acme import specs
from acme import types
from acme import wrappers
import dm_env
import numpy as np
import reverb
from rlds import rlds_types
import tensorflow as tf
import tree
class Actor(core.Actor):
"""Fake actor which generates random actions and validates specs."""
def __init__(self, spec: specs.EnvironmentSpec):
self._spec = spec
self.num_updates = 0
def select_action(self, observation: types.NestedArray) -> types.NestedArray:
_validate_spec(self._spec.observations, observation)
return _generate_from_spec(self._spec.actions)
def observe_first(self, timestep: dm_env.TimeStep):
_validate_spec(self._spec.observations, timestep.observation)
def observe(
self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
):
_validate_spec(self._spec.actions, action)
_validate_spec(self._spec.rewards, next_timestep.reward)
_validate_spec(self._spec.discounts, next_timestep.discount)
_validate_spec(self._spec.observations, next_timestep.observation)
def update(self, wait: bool = False):
self.num_updates += 1
class VariableSource(core.VariableSource):
"""Fake variable source."""
def __init__(self,
variables: Optional[types.NestedArray] = None,
barrier: Optional[threading.Barrier] = None,
use_default_key: bool = True):
# Add dummy variables so we can expose them in get_variables.
if use_default_key:
self._variables = {'policy': [] if variables is None else variables}
else:
self._variables = variables
self._barrier = barrier
def get_variables(self, names: Sequence[str]) -> List[types.NestedArray]:
if self._barrier is not None:
self._barrier.wait()
return [self._variables[name] for name in names]
class Learner(core.Learner, VariableSource):
"""Fake Learner."""
def __init__(self,
variables: Optional[types.NestedArray] = None,
barrier: Optional[threading.Barrier] = None):
super().__init__(variables=variables, barrier=barrier)
self.step_counter = 0
def step(self):
self.step_counter += 1
class Environment(dm_env.Environment):
"""A fake environment with a given spec."""
def __init__(
self,
spec: specs.EnvironmentSpec,
*,
episode_length: int = 25,
):
# Assert that the discount spec is a BoundedArray with range [0, 1].
def check_discount_spec(path, discount_spec):
if (not isinstance(discount_spec, specs.BoundedArray) or
not np.isclose(discount_spec.minimum, 0) or
not np.isclose(discount_spec.maximum, 1)):
if path:
path_str = ' ' + '/'.join(str(p) for p in path)
else:
path_str = ''
raise ValueError(
'discount_spec {}isn\'t a BoundedArray in [0, 1].'.format(path_str))
tree.map_structure_with_path(check_discount_spec, spec.discounts)
self._spec = spec
self._episode_length = episode_length
self._step = 0
def _generate_fake_observation(self):
return _generate_from_spec(self._spec.observations)
def _generate_fake_reward(self):
return _generate_from_spec(self._spec.rewards)
def _generate_fake_discount(self):
return _generate_from_spec(self._spec.discounts)
def reset(self) -> dm_env.TimeStep:
observation = self._generate_fake_observation()
self._step = 1
return dm_env.restart(observation)
def step(self, action) -> dm_env.TimeStep:
# Return a reset timestep if we haven't touched the environment yet.
if not self._step:
return self.reset()
_validate_spec(self._spec.actions, action)
observation = self._generate_fake_observation()
reward = self._generate_fake_reward()
discount = self._generate_fake_discount()
if self._episode_length and (self._step == self._episode_length):
self._step = 0
# We can't use dm_env.termination directly because then the discount
# wouldn't necessarily conform to the spec (if eg. we want float32).
return dm_env.TimeStep(dm_env.StepType.LAST, reward, discount,
observation)
else:
self._step += 1
return dm_env.transition(
reward=reward, observation=observation, discount=discount)
def action_spec(self):
return self._spec.actions
def observation_spec(self):
return self._spec.observations
def reward_spec(self):
return self._spec.rewards
def discount_spec(self):
return self._spec.discounts
class _BaseDiscreteEnvironment(Environment):
"""Discrete action fake environment."""
def __init__(self,
*,
num_actions: int = 1,
action_dtype=np.int32,
observation_spec: types.NestedSpec,
discount_spec: Optional[types.NestedSpec] = None,
reward_spec: Optional[types.NestedSpec] = None,
**kwargs):
"""Initialize the environment."""
if reward_spec is None:
reward_spec = specs.Array((), np.float32)
if discount_spec is None:
discount_spec = specs.BoundedArray((), np.float32, 0.0, 1.0)
actions = specs.DiscreteArray(num_actions, dtype=action_dtype)
super().__init__(
spec=specs.EnvironmentSpec(
observations=observation_spec,
actions=actions,
rewards=reward_spec,
discounts=discount_spec),
**kwargs)
class DiscreteEnvironment(_BaseDiscreteEnvironment):
"""Discrete state and action fake environment."""
def __init__(self,
*,
num_actions: int = 1,
num_observations: int = 1,
action_dtype=np.int32,
obs_dtype=np.int32,
obs_shape: Sequence[int] = (),
discount_spec: Optional[types.NestedSpec] = None,
reward_spec: Optional[types.NestedSpec] = None,
**kwargs):
"""Initialize the environment."""
observations_spec = specs.BoundedArray(
shape=obs_shape,
dtype=obs_dtype,
minimum=obs_dtype(0),
maximum=obs_dtype(num_observations - 1))
super().__init__(
num_actions=num_actions,
action_dtype=action_dtype,
observation_spec=observations_spec,
discount_spec=discount_spec,
reward_spec=reward_spec,
**kwargs)
class NestedDiscreteEnvironment(_BaseDiscreteEnvironment):
"""Discrete action fake environment with nested discrete state."""
def __init__(self,
*,
num_observations: Mapping[str, int],
num_actions: int = 1,
action_dtype=np.int32,
obs_dtype=np.int32,
obs_shape: Sequence[int] = (),
discount_spec: Optional[types.NestedSpec] = None,
reward_spec: Optional[types.NestedSpec] = None,
**kwargs):
"""Initialize the environment."""
observations_spec = {}
for key in num_observations:
observations_spec[key] = specs.BoundedArray(
shape=obs_shape,
dtype=obs_dtype,
minimum=obs_dtype(0),
maximum=obs_dtype(num_observations[key] - 1))
super().__init__(
num_actions=num_actions,
action_dtype=action_dtype,
observation_spec=observations_spec,
discount_spec=discount_spec,
reward_spec=reward_spec,
**kwargs)
class ContinuousEnvironment(Environment):
"""Continuous state and action fake environment."""
def __init__(self,
*,
action_dim: int = 1,
observation_dim: int = 1,
bounded: bool = False,
dtype=np.float32,
reward_dtype=np.float32,
**kwargs):
"""Initialize the environment.
Args:
action_dim: number of action dimensions.
observation_dim: number of observation dimensions.
bounded: whether or not the actions are bounded in [-1, 1].
dtype: dtype of the action and observation spaces.
reward_dtype: dtype of the reward and discounts.
**kwargs: additional kwargs passed to the Environment base class.
"""
action_shape = () if action_dim == 0 else (action_dim,)
observation_shape = () if observation_dim == 0 else (observation_dim,)
observations = specs.Array(observation_shape, dtype)
rewards = specs.Array((), reward_dtype)
discounts = specs.BoundedArray((), reward_dtype, 0.0, 1.0)
if bounded:
actions = specs.BoundedArray(action_shape, dtype, -1.0, 1.0)
else:
actions = specs.Array(action_shape, dtype)
super().__init__(
spec=specs.EnvironmentSpec(
observations=observations,
actions=actions,
rewards=rewards,
discounts=discounts),
**kwargs)
def _validate_spec(spec: types.NestedSpec, value: types.NestedArray):
"""Validate a value from a potentially nested spec."""
tree.assert_same_structure(value, spec)
tree.map_structure(lambda s, v: s.validate(v), spec, value)
def _normalize_array(array: specs.Array) -> specs.Array:
"""Converts bounded arrays with (-inf,+inf) bounds to unbounded arrays.
The returned array should be mostly equivalent to the input, except that
`generate_value()` returns -infs on arrays bounded to (-inf,+inf) and zeros
on unbounded arrays.
Args:
array: the array to be normalized.
Returns:
normalized array.
"""
if isinstance(array, specs.DiscreteArray):
return array
if not isinstance(array, specs.BoundedArray):
return array
if not (array.minimum == float('-inf')).all():
return array
if not (array.maximum == float('+inf')).all():
return array
return specs.Array(array.shape, array.dtype, array.name)
def _generate_from_spec(spec: types.NestedSpec) -> types.NestedArray:
"""Generate a value from a potentially nested spec."""
return tree.map_structure(lambda s: _normalize_array(s).generate_value(),
spec)
def transition_dataset_from_spec(
spec: specs.EnvironmentSpec) -> tf.data.Dataset:
"""Constructs fake dataset of Reverb N-step transition samples.
Args:
spec: Constructed fake transitions match the provided specification.
Returns:
tf.data.Dataset that produces the same fake N-step transition ReverbSample
object indefinitely.
"""
observation = _generate_from_spec(spec.observations)
action = _generate_from_spec(spec.actions)
reward = _generate_from_spec(spec.rewards)
discount = _generate_from_spec(spec.discounts)
data = types.Transition(observation, action, reward, discount, observation)
info = tree.map_structure(
lambda tf_dtype: tf.ones([], tf_dtype.as_numpy_dtype),
reverb.SampleInfo.tf_dtypes())
sample = reverb.ReplaySample(info=info, data=data)
return tf.data.Dataset.from_tensors(sample).repeat()
def transition_dataset(environment: dm_env.Environment) -> tf.data.Dataset:
"""Constructs fake dataset of Reverb N-step transition samples.
Args:
environment: Constructed fake transitions will match the specification of
this environment.
Returns:
tf.data.Dataset that produces the same fake N-step transition ReverbSample
object indefinitely.
"""
return transition_dataset_from_spec(specs.make_environment_spec(environment))
def transition_iterator_from_spec(
spec: specs.EnvironmentSpec) -> Callable[[int], Iterator[types.Transition]]:
"""Constructs fake iterator of transitions.
Args:
spec: Constructed fake transitions match the provided specification..
Returns:
A callable that given a batch_size returns an iterator of transitions.
"""
observation = _generate_from_spec(spec.observations)
action = _generate_from_spec(spec.actions)
reward = _generate_from_spec(spec.rewards)
discount = _generate_from_spec(spec.discounts)
data = types.Transition(observation, action, reward, discount, observation)
dataset = tf.data.Dataset.from_tensors(data).repeat()
return lambda batch_size: dataset.batch(batch_size).as_numpy_iterator()
def transition_iterator(
environment: dm_env.Environment
) -> Callable[[int], Iterator[types.Transition]]:
"""Constructs fake iterator of transitions.
Args:
environment: Constructed fake transitions will match the specification of
this environment.
Returns:
A callable that given a batch_size returns an iterator of transitions.
"""
return transition_iterator_from_spec(specs.make_environment_spec(environment))
def fake_atari_wrapped(episode_length: int = 10,
oar_wrapper: bool = False) -> dm_env.Environment:
"""Builds fake version of the environment to be used by tests.
Args:
episode_length: The length of episodes produced by this environment.
oar_wrapper: Should ObservationActionRewardWrapper be applied.
Returns:
Fake version of the environment equivalent to the one returned by
env_loader.load_atari_wrapped
"""
env = DiscreteEnvironment(
num_actions=18,
num_observations=2,
obs_shape=(84, 84, 4),
obs_dtype=np.float32,
episode_length=episode_length)
if oar_wrapper:
env = wrappers.ObservationActionRewardWrapper(env)
return env
def rlds_dataset_from_env_spec(
spec: specs.EnvironmentSpec,
*,
episode_count: int = 10,
episode_length: int = 25,
) -> tf.data.Dataset:
"""Constructs a fake RLDS dataset with the given spec.
Args:
spec: specification to use for generation of fake steps.
episode_count: number of episodes in the dataset.
episode_length: length of the episode in the dataset.
Returns:
a fake RLDS dataset.
"""
fake_steps = {
rlds_types.OBSERVATION:
([_generate_from_spec(spec.observations)] * episode_length),
rlds_types.ACTION: ([_generate_from_spec(spec.actions)] * episode_length),
rlds_types.REWARD: ([_generate_from_spec(spec.rewards)] * episode_length),
rlds_types.DISCOUNT:
([_generate_from_spec(spec.discounts)] * episode_length),
rlds_types.IS_TERMINAL: [False] * (episode_length - 1) + [True],
rlds_types.IS_FIRST: [True] + [False] * (episode_length - 1),
rlds_types.IS_LAST: [False] * (episode_length - 1) + [True],
}
steps_dataset = tf.data.Dataset.from_tensor_slices(fake_steps)
return tf.data.Dataset.from_tensor_slices(
{rlds_types.STEPS: [steps_dataset] * episode_count})
|
acme-master
|
acme/testing/fakes.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for snapshotter."""
import os
import time
from typing import Any, Sequence
from acme import core
from acme.jax import snapshotter
from acme.jax import types
from acme.testing import test_utils
import jax.numpy as jnp
from absl.testing import absltest
def _model0(params, x1, x2):
return params['w0'] * jnp.sin(x1) + params['w1'] * jnp.cos(x2)
def _model1(params, x):
return params['p0'] * jnp.log(x)
class _DummyVariableSource(core.VariableSource):
def __init__(self):
self._params_model0 = {
'w0': jnp.ones([2, 3], dtype=jnp.float32),
'w1': 2 * jnp.ones([2, 3], dtype=jnp.float32),
}
self._params_model1 = {
'p0': jnp.ones([3, 1], dtype=jnp.float32),
}
def get_variables(self, names: Sequence[str]) -> Sequence[Any]: # pytype: disable=signature-mismatch # overriding-return-type-checks
variables = []
for n in names:
if n == 'params_model0':
variables.append(self._params_model0)
elif n == 'params_model1':
variables.append(self._params_model1)
else:
raise ValueError('Unknow variable name: {n}')
return variables
def _get_model0(variable_source: core.VariableSource) -> types.ModelToSnapshot:
return types.ModelToSnapshot(
model=_model0,
params=variable_source.get_variables(['params_model0'])[0],
dummy_kwargs={
'x1': jnp.ones([2, 3], dtype=jnp.float32),
'x2': jnp.ones([2, 3], dtype=jnp.float32),
},
)
def _get_model1(variable_source: core.VariableSource) -> types.ModelToSnapshot:
return types.ModelToSnapshot(
model=_model1,
params=variable_source.get_variables(['params_model1'])[0],
dummy_kwargs={
'x': jnp.ones([3, 1], dtype=jnp.float32),
},
)
class SnapshotterTest(test_utils.TestCase):
def setUp(self):
super().setUp()
self._test_models = {'model0': _get_model0, 'model1': _get_model1}
def _check_snapshot(self, directory: str, name: str):
self.assertTrue(os.path.exists(os.path.join(directory, name, 'model0')))
self.assertTrue(os.path.exists(os.path.join(directory, name, 'model1')))
def test_snapshotter(self):
"""Checks that the Snapshotter class saves as expected."""
directory = self.get_tempdir()
models_snapshotter = snapshotter.JAXSnapshotter(
variable_source=_DummyVariableSource(),
models=self._test_models,
path=directory,
max_to_keep=2,
add_uid=False,
)
models_snapshotter._save()
# The snapshots are written in a folder of the form:
# PATH/{time.strftime}/MODEL_NAME
first_snapshots = os.listdir(directory)
self.assertEqual(len(first_snapshots), 1)
self._check_snapshot(directory, first_snapshots[0])
# Make sure that the second snapshot is constructed.
time.sleep(1.1)
models_snapshotter._save()
snapshots = os.listdir(directory)
self.assertEqual(len(snapshots), 2)
self._check_snapshot(directory, snapshots[0])
self._check_snapshot(directory, snapshots[1])
# Make sure that new snapshotter deletes the oldest snapshot upon _save().
time.sleep(1.1)
models_snapshotter2 = snapshotter.JAXSnapshotter(
variable_source=_DummyVariableSource(),
models=self._test_models,
path=directory,
max_to_keep=2,
add_uid=False,
)
self.assertEqual(snapshots, os.listdir(directory))
time.sleep(1.1)
models_snapshotter2._save()
snapshots = os.listdir(directory)
self.assertNotIn(first_snapshots[0], snapshots)
self.assertEqual(len(snapshots), 2)
self._check_snapshot(directory, snapshots[0])
self._check_snapshot(directory, snapshots[1])
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/jax/snapshotter_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to compute running statistics."""
import dataclasses
from typing import Any, Optional, Tuple, Union
from acme import types
from acme.utils import tree_utils
import chex
import jax
import jax.numpy as jnp
import numpy as np
import tree
Path = Tuple[Any, ...]
"""Path in a nested structure.
A path is a tuple of indices (normally strings for maps and integers for
arrays and tuples) that uniquely identifies a subtree in the nested structure.
See
https://tree.readthedocs.io/en/latest/api.html#tree.map_structure_with_path
for more details.
"""
def _is_prefix(a: Path, b: Path) -> bool:
"""Returns whether `a` is a prefix of `b`."""
return b[:len(a)] == a
def _zeros_like(nest: types.Nest, dtype=None) -> types.NestedArray:
return jax.tree_map(lambda x: jnp.zeros(x.shape, dtype or x.dtype), nest)
def _ones_like(nest: types.Nest, dtype=None) -> types.NestedArray:
return jax.tree_map(lambda x: jnp.ones(x.shape, dtype or x.dtype), nest)
@chex.dataclass(frozen=True)
class NestedMeanStd:
"""A container for running statistics (mean, std) of possibly nested data."""
mean: types.NestedArray
std: types.NestedArray
@chex.dataclass(frozen=True)
class RunningStatisticsState(NestedMeanStd):
"""Full state of running statistics computation."""
count: Union[int, jnp.ndarray]
summed_variance: types.NestedArray
@dataclasses.dataclass(frozen=True)
class NestStatisticsConfig:
"""Specifies how to compute statistics for Nests with the same structure.
Attributes:
paths: A sequence of Nest paths to compute statistics for. If there is a
collision between paths (one is a prefix of the other), the shorter path
takes precedence.
"""
paths: Tuple[Path, ...] = ((),)
def _is_path_included(config: NestStatisticsConfig, path: Path) -> bool:
"""Returns whether the path is included in the config."""
# A path is included in the config if it corresponds to a tree node that
# belongs to a subtree rooted at the node corresponding to some path in
# the config.
return any(_is_prefix(config_path, path) for config_path in config.paths)
def init_state(nest: types.Nest) -> RunningStatisticsState:
"""Initializes the running statistics for the given nested structure."""
dtype = jnp.float64 if jax.config.jax_enable_x64 else jnp.float32
return RunningStatisticsState( # pytype: disable=wrong-arg-types # jax-ndarray
count=0.,
mean=_zeros_like(nest, dtype=dtype),
summed_variance=_zeros_like(nest, dtype=dtype),
# Initialize with ones to make sure normalization works correctly
# in the initial state.
std=_ones_like(nest, dtype=dtype))
def _validate_batch_shapes(batch: types.NestedArray,
reference_sample: types.NestedArray,
batch_dims: Tuple[int, ...]) -> None:
"""Verifies shapes of the batch leaves against the reference sample.
Checks that batch dimensions are the same in all leaves in the batch.
Checks that non-batch dimensions for all leaves in the batch are the same
as in the reference sample.
Arguments:
batch: the nested batch of data to be verified.
reference_sample: the nested array to check non-batch dimensions.
batch_dims: a Tuple of indices of batch dimensions in the batch shape.
Returns:
None.
"""
def validate_node_shape(reference_sample: jnp.ndarray,
batch: jnp.ndarray) -> None:
expected_shape = batch_dims + reference_sample.shape
assert batch.shape == expected_shape, f'{batch.shape} != {expected_shape}'
tree_utils.fast_map_structure(validate_node_shape, reference_sample, batch)
def update(state: RunningStatisticsState,
batch: types.NestedArray,
*,
config: NestStatisticsConfig = NestStatisticsConfig(),
weights: Optional[jnp.ndarray] = None,
std_min_value: float = 1e-6,
std_max_value: float = 1e6,
pmap_axis_name: Optional[str] = None,
validate_shapes: bool = True) -> RunningStatisticsState:
"""Updates the running statistics with the given batch of data.
Note: data batch and state elements (mean, etc.) must have the same structure.
Note: by default will use int32 for counts and float32 for accumulated
variance. This results in an integer overflow after 2^31 data points and
degrading precision after 2^24 batch updates or even earlier if variance
updates have large dynamic range.
To improve precision, consider setting jax_enable_x64 to True, see
https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
Arguments:
state: The running statistics before the update.
batch: The data to be used to update the running statistics.
config: The config that specifies which leaves of the nested structure
should the running statistics be computed for.
weights: Weights of the batch data. Should match the batch dimensions.
Passing a weight of 2. should be equivalent to updating on the
corresponding data point twice.
std_min_value: Minimum value for the standard deviation.
std_max_value: Maximum value for the standard deviation.
pmap_axis_name: Name of the pmapped axis, if any.
validate_shapes: If true, the shapes of all leaves of the batch will be
validated. Enabled by default. Doesn't impact performance when jitted.
Returns:
Updated running statistics.
"""
# We require exactly the same structure to avoid issues when flattened
# batch and state have different order of elements.
tree.assert_same_structure(batch, state.mean)
batch_shape = tree.flatten(batch)[0].shape
# We assume the batch dimensions always go first.
batch_dims = batch_shape[:len(batch_shape) - tree.flatten(state.mean)[0].ndim]
batch_axis = range(len(batch_dims))
if weights is None:
step_increment = np.prod(batch_dims)
else:
step_increment = jnp.sum(weights)
if pmap_axis_name is not None:
step_increment = jax.lax.psum(step_increment, axis_name=pmap_axis_name)
count = state.count + step_increment
# Validation is important. If the shapes don't match exactly, but are
# compatible, arrays will be silently broadcasted resulting in incorrect
# statistics.
if validate_shapes:
if weights is not None:
if weights.shape != batch_dims:
raise ValueError(f'{weights.shape} != {batch_dims}')
_validate_batch_shapes(batch, state.mean, batch_dims)
def _compute_node_statistics(
path: Path, mean: jnp.ndarray, summed_variance: jnp.ndarray,
batch: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
assert isinstance(mean, jnp.ndarray), type(mean)
assert isinstance(summed_variance, jnp.ndarray), type(summed_variance)
if not _is_path_included(config, path):
# Return unchanged.
return mean, summed_variance
# The mean and the sum of past variances are updated with Welford's
# algorithm using batches (see https://stackoverflow.com/q/56402955).
diff_to_old_mean = batch - mean
if weights is not None:
expanded_weights = jnp.reshape(
weights,
list(weights.shape) + [1] * (batch.ndim - weights.ndim))
diff_to_old_mean = diff_to_old_mean * expanded_weights
mean_update = jnp.sum(diff_to_old_mean, axis=batch_axis) / count
if pmap_axis_name is not None:
mean_update = jax.lax.psum(
mean_update, axis_name=pmap_axis_name)
mean = mean + mean_update
diff_to_new_mean = batch - mean
variance_update = diff_to_old_mean * diff_to_new_mean
variance_update = jnp.sum(variance_update, axis=batch_axis)
if pmap_axis_name is not None:
variance_update = jax.lax.psum(variance_update, axis_name=pmap_axis_name)
summed_variance = summed_variance + variance_update
return mean, summed_variance
updated_stats = tree_utils.fast_map_structure_with_path(
_compute_node_statistics, state.mean, state.summed_variance, batch)
# map_structure_up_to is slow, so shortcut if we know the input is not
# structured.
if isinstance(state.mean, jnp.ndarray):
mean, summed_variance = updated_stats
else:
# Reshape the updated stats from `nest(mean, summed_variance)` to
# `nest(mean), nest(summed_variance)`.
mean, summed_variance = [
tree.map_structure_up_to(
state.mean, lambda s, i=idx: s[i], updated_stats)
for idx in range(2)
]
def compute_std(path: Path, summed_variance: jnp.ndarray,
std: jnp.ndarray) -> jnp.ndarray:
assert isinstance(summed_variance, jnp.ndarray)
if not _is_path_included(config, path):
return std
# Summed variance can get negative due to rounding errors.
summed_variance = jnp.maximum(summed_variance, 0)
std = jnp.sqrt(summed_variance / count)
std = jnp.clip(std, std_min_value, std_max_value)
return std
std = tree_utils.fast_map_structure_with_path(compute_std, summed_variance,
state.std)
return RunningStatisticsState(
count=count, mean=mean, summed_variance=summed_variance, std=std)
def normalize(batch: types.NestedArray,
mean_std: NestedMeanStd,
max_abs_value: Optional[float] = None) -> types.NestedArray:
"""Normalizes data using running statistics."""
def normalize_leaf(data: jnp.ndarray, mean: jnp.ndarray,
std: jnp.ndarray) -> jnp.ndarray:
# Only normalize inexact types.
if not jnp.issubdtype(data.dtype, jnp.inexact):
return data
data = (data - mean) / std
if max_abs_value is not None:
# TODO(b/124318564): remove pylint directive
data = jnp.clip(data, -max_abs_value, +max_abs_value) # pylint: disable=invalid-unary-operand-type
return data
return tree_utils.fast_map_structure(normalize_leaf, batch, mean_std.mean,
mean_std.std)
def denormalize(batch: types.NestedArray,
mean_std: NestedMeanStd) -> types.NestedArray:
"""Denormalizes values in a nested structure using the given mean/std.
Only values of inexact types are denormalized.
See https://numpy.org/doc/stable/_images/dtype-hierarchy.png for Numpy type
hierarchy.
Args:
batch: a nested structure containing batch of data.
mean_std: mean and standard deviation used for denormalization.
Returns:
Nested structure with denormalized values.
"""
def denormalize_leaf(data: jnp.ndarray, mean: jnp.ndarray,
std: jnp.ndarray) -> jnp.ndarray:
# Only denormalize inexact types.
if not np.issubdtype(data.dtype, np.inexact):
return data
return data * std + mean
return tree_utils.fast_map_structure(denormalize_leaf, batch, mean_std.mean,
mean_std.std)
@dataclasses.dataclass(frozen=True)
class NestClippingConfig:
"""Specifies how to clip Nests with the same structure.
Attributes:
path_map: A map that specifies how to clip values in Nests with the same
structure. Keys correspond to paths in the nest. Values are maximum
absolute values to use for clipping. If there is a collision between paths
(one path is a prefix of the other), the behavior is undefined.
"""
path_map: Tuple[Tuple[Path, float], ...] = ()
def get_clip_config_for_path(config: NestClippingConfig,
path: Path) -> NestClippingConfig:
"""Returns the config for a subtree from the leaf defined by the path."""
# Start with an empty config.
path_map = []
for map_path, max_abs_value in config.path_map:
if _is_prefix(map_path, path):
return NestClippingConfig(path_map=(((), max_abs_value),))
if _is_prefix(path, map_path):
path_map.append((map_path[len(path):], max_abs_value))
return NestClippingConfig(path_map=tuple(path_map))
def clip(batch: types.NestedArray,
clipping_config: NestClippingConfig) -> types.NestedArray:
"""Clips the batch."""
def max_abs_value_for_path(path: Path, x: jnp.ndarray) -> Optional[float]:
del x # Unused, needed by interface.
return next((max_abs_value
for clipping_path, max_abs_value in clipping_config.path_map
if _is_prefix(clipping_path, path)), None)
max_abs_values = tree_utils.fast_map_structure_with_path(
max_abs_value_for_path, batch)
def clip_leaf(data: jnp.ndarray,
max_abs_value: Optional[float]) -> jnp.ndarray:
if max_abs_value is not None:
# TODO(b/124318564): remove pylint directive
data = jnp.clip(data, -max_abs_value, +max_abs_value) # pylint: disable=invalid-unary-operand-type
return data
return tree_utils.fast_map_structure(clip_leaf, batch, max_abs_values)
@dataclasses.dataclass(frozen=True)
class NestNormalizationConfig:
"""Specifies how to normalize Nests with the same structure.
Attributes:
stats_config: A config that defines how to compute running statistics to be
used for normalization.
clip_config: A config that defines how to clip normalized values.
"""
stats_config: NestStatisticsConfig = NestStatisticsConfig()
clip_config: NestClippingConfig = NestClippingConfig()
|
acme-master
|
acme/jax/running_statistics.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility classes for saving model checkpoints."""
import datetime
import os
import pickle
from typing import Any
from absl import logging
from acme import core
from acme.tf import savers as tf_savers
import jax
import numpy as np
import tree
# Internal imports.
CheckpointState = Any
_DEFAULT_CHECKPOINT_TTL = int(datetime.timedelta(days=5).total_seconds())
_ARRAY_NAME = 'array_nest'
_EXEMPLAR_NAME = 'nest_exemplar'
def restore_from_path(ckpt_dir: str) -> CheckpointState:
"""Restore the state stored in ckpt_dir."""
array_path = os.path.join(ckpt_dir, _ARRAY_NAME)
exemplar_path = os.path.join(ckpt_dir, _EXEMPLAR_NAME)
with open(exemplar_path, 'rb') as f:
exemplar = pickle.load(f)
with open(array_path, 'rb') as f:
files = np.load(f, allow_pickle=True)
flat_state = [files[key] for key in files.files]
unflattened_tree = tree.unflatten_as(exemplar, flat_state)
def maybe_convert_to_python(value, numpy):
return value if numpy else value.item()
return tree.map_structure(maybe_convert_to_python, unflattened_tree, exemplar)
def save_to_path(ckpt_dir: str, state: CheckpointState):
"""Save the state in ckpt_dir."""
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
is_numpy = lambda x: isinstance(x, (np.ndarray, jax.Array))
flat_state = tree.flatten(state)
nest_exemplar = tree.map_structure(is_numpy, state)
array_path = os.path.join(ckpt_dir, _ARRAY_NAME)
logging.info('Saving flattened array nest to %s', array_path)
def _disabled_seek(*_):
raise AttributeError('seek() is disabled on this object.')
with open(array_path, 'wb') as f:
setattr(f, 'seek', _disabled_seek)
np.savez(f, *flat_state)
exemplar_path = os.path.join(ckpt_dir, _EXEMPLAR_NAME)
logging.info('Saving nest exemplar to %s', exemplar_path)
with open(exemplar_path, 'wb') as f:
pickle.dump(nest_exemplar, f)
# Use TF checkpointer.
class Checkpointer(tf_savers.Checkpointer):
def __init__(
self,
object_to_save: core.Saveable,
directory: str = '~/acme',
subdirectory: str = 'default',
**tf_checkpointer_kwargs):
super().__init__(dict(saveable=object_to_save),
directory=directory,
subdirectory=subdirectory,
**tf_checkpointer_kwargs)
CheckpointingRunner = tf_savers.CheckpointingRunner
|
acme-master
|
acme/jax/savers.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
from acme.jax import utils
import chex
import jax
import jax.numpy as jnp
import numpy as np
from absl.testing import absltest
chex.set_n_cpu_devices(4)
class JaxUtilsTest(absltest.TestCase):
def test_batch_concat(self):
batch_size = 32
inputs = [
jnp.zeros(shape=(batch_size, 2)),
{
'foo': jnp.zeros(shape=(batch_size, 5, 3))
},
[jnp.zeros(shape=(batch_size, 1))],
jnp.zeros(shape=(batch_size,)),
]
output_shape = utils.batch_concat(inputs).shape
expected_shape = [batch_size, 2 + 5 * 3 + 1 + 1]
self.assertSequenceEqual(output_shape, expected_shape)
def test_mapreduce(self):
@utils.mapreduce
def f(y, x):
return jnp.square(x + y)
z = f(jnp.ones(shape=(32,)), jnp.ones(shape=(32,)))
z = jax.device_get(z)
self.assertEqual(z, 4)
def test_get_from_first_device(self):
sharded = {
'a':
jax.device_put_sharded(
list(jnp.arange(16).reshape([jax.local_device_count(), 4])),
jax.local_devices()),
'b':
jax.device_put_sharded(
list(jnp.arange(8).reshape([jax.local_device_count(), 2])),
jax.local_devices(),
),
}
want = {
'a': jnp.arange(4),
'b': jnp.arange(2),
}
# Get zeroth device content as DeviceArray.
device_arrays = utils.get_from_first_device(sharded, as_numpy=False)
jax.tree_map(
lambda x: self.assertIsInstance(x, jax.Array),
device_arrays)
jax.tree_map(np.testing.assert_array_equal, want, device_arrays)
# Get the zeroth device content as numpy arrays.
numpy_arrays = utils.get_from_first_device(sharded, as_numpy=True)
jax.tree_map(lambda x: self.assertIsInstance(x, np.ndarray), numpy_arrays)
jax.tree_map(np.testing.assert_array_equal, want, numpy_arrays)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/jax/utils_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for savers."""
from unittest import mock
from acme import core
from acme.jax import savers
from acme.testing import test_utils
from acme.utils import paths
import jax.numpy as jnp
import numpy as np
import tree
from absl.testing import absltest
class DummySaveable(core.Saveable):
def __init__(self, state):
self.state = state
def save(self):
return self.state
def restore(self, state):
self.state = state
def nest_assert_equal(a, b):
tree.map_structure(np.testing.assert_array_equal, a, b)
class SaverTest(test_utils.TestCase):
def setUp(self):
super().setUp()
self._test_state = {
'foo': jnp.ones(shape=(8, 4), dtype=jnp.float32),
'bar': [jnp.zeros(shape=(3, 2), dtype=jnp.int32)],
'baz': 3,
}
def test_save_restore(self):
"""Checks that we can save and restore state."""
directory = self.get_tempdir()
savers.save_to_path(directory, self._test_state)
result = savers.restore_from_path(directory)
nest_assert_equal(result, self._test_state)
def test_checkpointer(self):
"""Checks that the Checkpointer class saves and restores as expected."""
with mock.patch.object(paths, 'get_unique_id') as mock_unique_id:
mock_unique_id.return_value = ('test',)
# Given a path and some stateful object...
directory = self.get_tempdir()
x = DummySaveable(self._test_state)
# If we checkpoint it...
checkpointer = savers.Checkpointer(x, directory, time_delta_minutes=0)
checkpointer.save()
# The checkpointer should restore the object's state.
x.state = None
checkpointer.restore()
nest_assert_equal(x.state, self._test_state)
# Checkpointers should also attempt a restore at construction time.
x.state = None
savers.Checkpointer(x, directory, time_delta_minutes=0)
nest_assert_equal(x.state, self._test_state)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/jax/savers_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility classes for snapshotting models."""
import os
import time
from typing import Callable, Dict, List, Optional, Sequence, Tuple
from absl import logging
from acme import core
from acme.jax import types
from acme.utils import signals
from acme.utils import paths
from jax.experimental import jax2tf
import tensorflow as tf
# Internal imports.
class JAXSnapshotter(core.Worker):
"""Periodically fetches new version of params and stores tf.saved_models."""
# NOTE: External contributor please refrain from modifying the high level of
# the API defined here.
def __init__(self,
variable_source: core.VariableSource,
models: Dict[str, Callable[[core.VariableSource],
types.ModelToSnapshot]],
path: str,
subdirectory: Optional[str] = None,
max_to_keep: Optional[int] = None,
add_uid: bool = False):
self._variable_source = variable_source
self._models = models
if subdirectory is not None:
self._path = paths.process_path(path, subdirectory, add_uid=add_uid)
else:
self._path = paths.process_path(path, add_uid=add_uid)
self._max_to_keep = max_to_keep
self._snapshot_paths: Optional[List[str]] = None
# Handle preemption signal. Note that this must happen in the main thread.
def _signal_handler(self):
logging.info('Caught SIGTERM: forcing models save.')
self._save()
def _save(self):
if not self._snapshot_paths:
# Lazy discovery of already existing snapshots.
self._snapshot_paths = os.listdir(self._path)
self._snapshot_paths.sort(reverse=True)
snapshot_location = os.path.join(self._path, time.strftime('%Y%m%d-%H%M%S'))
if self._snapshot_paths and self._snapshot_paths[0] == snapshot_location:
logging.info('Snapshot for the current time already exists.')
return
# To make sure models are captured as close as possible from the same time
# we gather all the `ModelToSnapshot` in a 1st loop. We then convert/saved
# them in another loop as this operation can be slow.
models_and_paths = self._get_models_and_paths(path=snapshot_location)
self._snapshot_paths.insert(0, snapshot_location)
for model, saving_path in models_and_paths:
self._snapshot_model(model=model, saving_path=saving_path)
# Delete any excess snapshots.
while self._max_to_keep and len(self._snapshot_paths) > self._max_to_keep:
paths.rmdir(os.path.join(self._path, self._snapshot_paths.pop()))
def _get_models_and_paths(
self, path: str) -> Sequence[Tuple[types.ModelToSnapshot, str]]:
"""Gets the models to save asssociated with their saving path."""
models_and_paths = []
for name, model_fn in self._models.items():
model = model_fn(self._variable_source)
model_path = os.path.join(path, name)
models_and_paths.append((model, model_path))
return models_and_paths
def _snapshot_model(self, model: types.ModelToSnapshot,
saving_path: str) -> None:
module = model_to_tf_module(model)
tf.saved_model.save(module, saving_path)
def run(self):
"""Runs the saver."""
with signals.runtime_terminator(self._signal_handler):
while True:
self._save()
time.sleep(5 * 60)
def model_to_tf_module(model: types.ModelToSnapshot) -> tf.Module:
def jax_fn_to_save(**kwargs):
return model.model(model.params, **kwargs)
module = tf.Module()
module.f = tf.function(jax2tf.convert(jax_fn_to_save), autograph=False)
# Traces input to ensure the model has the correct shapes.
module.f(**model.dummy_kwargs)
return module
|
acme-master
|
acme/jax/snapshotter.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
acme-master
|
acme/jax/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common JAX type definitions."""
import dataclasses
from typing import Any, Callable, Dict, Generic, Mapping, TypeVar
from acme import types
import chex
import dm_env
import jax
import jax.numpy as jnp
PRNGKey = jax.random.KeyArray
Networks = TypeVar('Networks')
"""Container for all agent network components."""
Policy = TypeVar('Policy')
"""Function or container for agent policy functions."""
Sample = TypeVar('Sample')
"""Sample from the demonstrations or replay buffer."""
TrainingState = TypeVar('TrainingState')
TrainingMetrics = Mapping[str, jnp.ndarray]
"""Metrics returned by the training step.
Typically these are logged, so the values are expected to be scalars.
"""
Variables = Mapping[str, types.NestedArray]
"""Mapping of variable collections.
A mapping of variable collections, as defined by Learner.get_variables.
The keys are the collection names, the values are nested arrays representing
the values of the corresponding collection variables.
"""
@chex.dataclass(frozen=True, mappable_dataclass=False)
class TrainingStepOutput(Generic[TrainingState]):
state: TrainingState
metrics: TrainingMetrics
Seed = int
EnvironmentFactory = Callable[[Seed], dm_env.Environment]
@dataclasses.dataclass
class ModelToSnapshot:
"""Stores all necessary info to be able to save a model.
Attributes:
model: a jax function to be saved.
params: fixed params to be passed to the function.
dummy_kwargs: arguments to be passed to the function.
"""
model: Any # Callable[params, **dummy_kwargs]
params: Any
dummy_kwargs: Dict[str, Any]
|
acme-master
|
acme/jax/types.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX type definitions for imitation and apprenticeship learning algorithms."""
from typing import TypeVar
# Common TypeVars that correspond to various aspects of the direct RL algorithm.
DirectPolicyNetwork = TypeVar('DirectPolicyNetwork')
DirectRLNetworks = TypeVar('DirectRLNetworks')
DirectRLTrainingState = TypeVar('DirectRLTrainingState')
|
acme-master
|
acme/jax/imitation_learning_types.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for JAX."""
import functools
import itertools
import queue
import threading
from typing import Callable, Iterable, Iterator, NamedTuple, Optional, Sequence, Tuple, TypeVar
from absl import logging
from acme import core
from acme import types
from acme.jax import types as jax_types
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import reverb
import tree
F = TypeVar('F', bound=Callable)
N = TypeVar('N', bound=types.NestedArray)
T = TypeVar('T')
NUM_PREFETCH_THREADS = 1
def add_batch_dim(values: types.Nest) -> types.NestedArray:
return jax.tree_map(lambda x: jnp.expand_dims(x, axis=0), values)
def _flatten(x: jnp.ndarray, num_batch_dims: int) -> jnp.ndarray:
"""Flattens the input, preserving the first ``num_batch_dims`` dimensions.
If the input has fewer than ``num_batch_dims`` dimensions, it is returned
unchanged.
If the input has exactly ``num_batch_dims`` dimensions, an extra dimension
is added. This is needed to handle batched scalars.
Arguments:
x: the input array to flatten.
num_batch_dims: number of dimensions to preserve.
Returns:
flattened input.
"""
# TODO(b/173492429): consider throwing an error instead.
if x.ndim < num_batch_dims:
return x
return jnp.reshape(x, list(x.shape[:num_batch_dims]) + [-1])
def batch_concat(
values: types.NestedArray,
num_batch_dims: int = 1,
) -> jnp.ndarray:
"""Flatten and concatenate nested array structure, keeping batch dims."""
flatten_fn = lambda x: _flatten(x, num_batch_dims)
flat_leaves = tree.map_structure(flatten_fn, values)
return jnp.concatenate(tree.flatten(flat_leaves), axis=-1)
def zeros_like(nest: types.Nest, dtype=None) -> types.NestedArray:
return jax.tree_map(lambda x: jnp.zeros(x.shape, dtype or x.dtype), nest)
def ones_like(nest: types.Nest, dtype=None) -> types.NestedArray:
return jax.tree_map(lambda x: jnp.ones(x.shape, dtype or x.dtype), nest)
def squeeze_batch_dim(nest: types.Nest) -> types.NestedArray:
return jax.tree_map(lambda x: jnp.squeeze(x, axis=0), nest)
def to_numpy_squeeze(values: types.Nest) -> types.NestedArray:
"""Converts to numpy and squeezes out dummy batch dimension."""
return jax.tree_map(lambda x: np.asarray(x).squeeze(axis=0), values)
def to_numpy(values: types.Nest) -> types.NestedArray:
return jax.tree_map(np.asarray, values)
def fetch_devicearray(values: types.Nest) -> types.Nest:
"""Fetches and converts any DeviceArrays to np.ndarrays."""
return tree.map_structure(_fetch_devicearray, values)
def _fetch_devicearray(x):
if isinstance(x, jax.Array):
return np.asarray(x)
return x
def batch_to_sequence(values: types.Nest) -> types.NestedArray:
return jax.tree_map(
lambda x: jnp.transpose(x, axes=(1, 0, *range(2, len(x.shape)))), values)
def tile_array(array: jnp.ndarray, multiple: int) -> jnp.ndarray:
"""Tiles `multiple` copies of `array` along a new leading axis."""
return jnp.stack([array] * multiple)
def tile_nested(inputs: types.Nest, multiple: int) -> types.Nest:
"""Tiles tensors in a nested structure along a new leading axis."""
tile = functools.partial(tile_array, multiple=multiple)
return jax.tree_map(tile, inputs)
def maybe_recover_lstm_type(state: types.NestedArray) -> types.NestedArray:
"""Recovers the type hk.LSTMState if LSTMState is in the type name.
When the recurrent state of recurrent neural networks (RNN) is deserialized,
for example when it is sampled from replay, it is sometimes repacked in a type
that is identical to the source type but not the correct type itself. When
using this state as the initial state in an hk.dynamic_unroll, this will
cause hk.dynamic_unroll to raise an error as it requires its input and output
states to be identical.
Args:
state: a nested structure of arrays representing the state of an RNN.
Returns:
Either the state unchanged if it is anything but an LSTMState, otherwise
returns the state arrays properly contained in an hk.LSTMState.
"""
return hk.LSTMState(*state) if type(state).__name__ == 'LSTMState' else state
def prefetch(
iterable: Iterable[T],
buffer_size: int = 5,
device: Optional[jax.Device] = None,
num_threads: int = NUM_PREFETCH_THREADS,
) -> core.PrefetchingIterator[T]:
"""Returns prefetching iterator with additional 'ready' method."""
return PrefetchIterator(iterable, buffer_size, device, num_threads)
class PrefetchingSplit(NamedTuple):
host: types.NestedArray
device: types.NestedArray
_SplitFunction = Callable[[types.NestedArray], PrefetchingSplit]
def keep_key_on_host(sample: reverb.ReplaySample) -> PrefetchingSplit:
"""Returns PrefetchingSplit which keeps uint64 reverb key on the host.
We want to avoid truncation of the uint64 reverb key by JAX.
Args:
sample: a sample from a Reverb replay buffer.
Returns:
PrefetchingSplit with device having the reverb sample, and key on host.
"""
return PrefetchingSplit(host=sample.info.key, device=sample)
def device_put(
iterable: Iterable[types.NestedArray],
device: jax.Device,
split_fn: Optional[_SplitFunction] = None,
):
"""Returns iterator that samples an item and places it on the device."""
return PutToDevicesIterable(
iterable=iterable,
pmapped_user=False,
devices=[device],
split_fn=split_fn)
def multi_device_put(
iterable: Iterable[types.NestedArray],
devices: Sequence[jax.Device],
split_fn: Optional[_SplitFunction] = None,
):
"""Returns iterator that, per device, samples an item and places on device."""
return PutToDevicesIterable(
iterable=iterable, pmapped_user=True, devices=devices, split_fn=split_fn)
class PutToDevicesIterable(Iterable[types.NestedArray]):
"""Per device, samples an item from iterator and places on device.
if pmapped_user:
Items from the resulting generator are intended to be used in a pmapped
function. Every element is a ShardedDeviceArray or (nested) Python container
thereof. A single next() call to this iterator results in len(devices)
calls to the underlying iterator. The returned items are put one on each
device.
if not pmapped_user:
Places a sample from the iterator on the given device.
Yields:
If no split_fn is specified:
DeviceArray/ShardedDeviceArray or (nested) Python container thereof
representing the elements of shards stacked together, with each shard
backed by physical device memory specified by the corresponding entry in
devices.
If split_fn is specified:
PrefetchingSplit where the .host element is a stacked numpy array or
(nested) Python contained thereof. The .device element is a
DeviceArray/ShardedDeviceArray or (nested) Python container thereof.
Raises:
StopIteration: if there are not enough items left in the iterator to place
one sample on each device.
Any error thrown by the iterable_function. Note this is not raised inside
the producer, but after it finishes executing.
"""
def __init__(
self,
iterable: Iterable[types.NestedArray],
pmapped_user: bool,
devices: Sequence[jax.Device],
split_fn: Optional[_SplitFunction] = None,
):
"""Constructs PutToDevicesIterable.
Args:
iterable: A python iterable. This is used to build the python prefetcher.
Note that each iterable should only be passed to this function once as
iterables aren't thread safe.
pmapped_user: whether the user of data from this iterator is implemented
using pmapping.
devices: Devices used for prefecthing.
split_fn: Optional function applied to every element from the iterable to
split the parts of it that will be kept in the host and the parts that
will sent to the device.
Raises:
ValueError: If devices list is empty, or if pmapped_use=False and more
than 1 device is provided.
"""
self.num_devices = len(devices)
if self.num_devices == 0:
raise ValueError('At least one device must be specified.')
if (not pmapped_user) and (self.num_devices != 1):
raise ValueError('User is not implemented with pmapping but len(devices) '
f'= {len(devices)} is not equal to 1! Devices given are:'
f'\n{devices}')
self.iterable = iterable
self.pmapped_user = pmapped_user
self.split_fn = split_fn
self.devices = devices
self.iterator = iter(self.iterable)
def __iter__(self) -> Iterator[types.NestedArray]:
# It is important to structure the Iterable like this, because in
# JustPrefetchIterator we must build a new iterable for each thread.
# This is crucial if working with tensorflow datasets because tf.Graph
# objects are thread local.
self.iterator = iter(self.iterable)
return self
def __next__(self) -> types.NestedArray:
try:
if not self.pmapped_user:
item = next(self.iterator)
if self.split_fn is None:
return jax.device_put(item, self.devices[0])
item_split = self.split_fn(item)
return PrefetchingSplit(
host=item_split.host,
device=jax.device_put(item_split.device, self.devices[0]))
items = itertools.islice(self.iterator, self.num_devices)
items = tuple(items)
if len(items) < self.num_devices:
raise StopIteration
if self.split_fn is None:
return jax.device_put_sharded(tuple(items), self.devices)
else:
# ((host: x1, device: y1), ..., (host: xN, device: yN)).
items_split = (self.split_fn(item) for item in items)
# (host: (x1, ..., xN), device: (y1, ..., yN)).
split = tree.map_structure_up_to(
PrefetchingSplit(None, None), lambda *x: x, *items_split)
return PrefetchingSplit(
host=np.stack(split.host),
device=jax.device_put_sharded(split.device, self.devices))
except StopIteration:
raise
except Exception: # pylint: disable=broad-except
logging.exception('Error for %s', self.iterable)
raise
def sharded_prefetch(
iterable: Iterable[types.NestedArray],
buffer_size: int = 5,
num_threads: int = 1,
split_fn: Optional[_SplitFunction] = None,
devices: Optional[Sequence[jax.Device]] = None,
) -> core.PrefetchingIterator:
"""Performs sharded prefetching from an iterable in separate threads.
Elements from the resulting generator are intended to be used in a jax.pmap
call. Every element is a sharded prefetched array with an additional replica
dimension and corresponds to jax.local_device_count() elements from the
original iterable.
Args:
iterable: A python iterable. This is used to build the python prefetcher.
Note that each iterable should only be passed to this function once as
iterables aren't thread safe.
buffer_size (int): Number of elements to keep in the prefetch buffer.
num_threads (int): Number of threads.
split_fn: Optional function applied to every element from the iterable to
split the parts of it that will be kept in the host and the parts that
will sent to the device.
devices: Devices used for prefecthing. Optional, jax.local_devices() by
default.
Returns:
Prefetched elements from the original iterable with additional replica
dimension.
Raises:
ValueError if the buffer_size <= 1.
Any error thrown by the iterable_function. Note this is not raised inside
the producer, but after it finishes executing.
"""
devices = devices or jax.local_devices()
iterable = PutToDevicesIterable(
iterable=iterable, pmapped_user=True, devices=devices, split_fn=split_fn)
return prefetch(iterable, buffer_size, device=None, num_threads=num_threads)
def replicate_in_all_devices(
nest: N, devices: Optional[Sequence[jax.Device]] = None
) -> N:
"""Replicate array nest in all available devices."""
devices = devices or jax.local_devices()
return jax.device_put_sharded([nest] * len(devices), devices)
def get_from_first_device(nest: N, as_numpy: bool = True) -> N:
"""Gets the first array of a nest of `jax.Array`s.
Args:
nest: A nest of `jax.Array`s.
as_numpy: If `True` then each `DeviceArray` that is retrieved is transformed
(and copied if not on the host machine) into a `np.ndarray`.
Returns:
The first array of a nest of `jax.Array`s. Note that if
`as_numpy=False` then the array will be a `DeviceArray` (which will live on
the same device as the sharded device array). If `as_numpy=True` then the
array will be copied to the host machine and converted into a `np.ndarray`.
"""
zeroth_nest = jax.tree_map(lambda x: x[0], nest)
return jax.device_get(zeroth_nest) if as_numpy else zeroth_nest
def mapreduce(
f: F,
reduce_fn: Optional[Callable[[jax.Array], jax.Array]] = None,
**vmap_kwargs,
) -> F:
"""A simple decorator that transforms `f` into (`reduce_fn` o vmap o f).
By default, we vmap over axis 0, and the `reduce_fn` is jnp.mean over axis 0.
Note that the call signature of `f` is invariant under this transformation.
If, for example, f has shape signature [H, W] -> [N], then mapreduce(f)
(with the default arguments) will have shape signature [B, H, W] -> [N].
Args:
f: A pure function over examples.
reduce_fn: A pure function that reduces DeviceArrays -> DeviceArrays.
**vmap_kwargs: Keyword arguments to forward to `jax.vmap`.
Returns:
g: A pure function over batches of examples.
"""
if reduce_fn is None:
reduce_fn = lambda x: jnp.mean(x, axis=0)
vmapped_f = jax.vmap(f, **vmap_kwargs)
def g(*args, **kwargs):
return jax.tree_map(reduce_fn, vmapped_f(*args, **kwargs))
return g
_TrainingState = TypeVar('_TrainingState')
_TrainingData = TypeVar('_TrainingData')
_TrainingAux = TypeVar('_TrainingAux')
# TODO(b/192806089): migrate all callers to process_many_batches and remove this
# method.
def process_multiple_batches(
process_one_batch: Callable[[_TrainingState, _TrainingData],
Tuple[_TrainingState, _TrainingAux]],
num_batches: int,
postprocess_aux: Optional[Callable[[_TrainingAux], _TrainingAux]] = None
) -> Callable[[_TrainingState, _TrainingData], Tuple[_TrainingState,
_TrainingAux]]:
"""Makes 'process_one_batch' process multiple batches at once.
Args:
process_one_batch: a function that takes 'state' and 'data', and returns
'new_state' and 'aux' (for example 'metrics').
num_batches: how many batches to process at once
postprocess_aux: how to merge the extra information, defaults to taking the
mean.
Returns:
A function with the same interface as 'process_one_batch' which processes
multiple batches at once.
"""
assert num_batches >= 1
if num_batches == 1:
if not postprocess_aux:
return process_one_batch
def _process_one_batch(state, data):
state, aux = process_one_batch(state, data)
return state, postprocess_aux(aux)
return _process_one_batch
if postprocess_aux is None:
postprocess_aux = lambda x: jax.tree_map(jnp.mean, x)
def _process_multiple_batches(state, data):
data = jax.tree_map(
lambda a: jnp.reshape(a, (num_batches, -1, *a.shape[1:])), data)
state, aux = jax.lax.scan(
process_one_batch, state, data, length=num_batches)
return state, postprocess_aux(aux)
return _process_multiple_batches
def process_many_batches(
process_one_batch: Callable[[_TrainingState, _TrainingData],
jax_types.TrainingStepOutput[_TrainingState]],
num_batches: int,
postprocess_aux: Optional[Callable[[jax_types.TrainingMetrics],
jax_types.TrainingMetrics]] = None
) -> Callable[[_TrainingState, _TrainingData],
jax_types.TrainingStepOutput[_TrainingState]]:
"""The version of 'process_multiple_batches' with stronger typing."""
def _process_one_batch(
state: _TrainingState,
data: _TrainingData) -> Tuple[_TrainingState, jax_types.TrainingMetrics]:
result = process_one_batch(state, data)
return result.state, result.metrics
func = process_multiple_batches(_process_one_batch, num_batches,
postprocess_aux)
def _process_many_batches(
state: _TrainingState,
data: _TrainingData) -> jax_types.TrainingStepOutput[_TrainingState]:
state, aux = func(state, data)
return jax_types.TrainingStepOutput(state, aux)
return _process_many_batches
def weighted_softmax(x: jnp.ndarray, weights: jnp.ndarray, axis: int = 0):
x = x - jnp.max(x, axis=axis)
return weights * jnp.exp(x) / jnp.sum(weights * jnp.exp(x),
axis=axis, keepdims=True)
def sample_uint32(random_key: jax_types.PRNGKey) -> int:
"""Returns an integer uniformly distributed in 0..2^32-1."""
iinfo = jnp.iinfo(jnp.int32)
# randint only accepts int32 values as min and max.
jax_random = jax.random.randint(
random_key, shape=(), minval=iinfo.min, maxval=iinfo.max, dtype=jnp.int32)
return np.uint32(jax_random).item()
class PrefetchIterator(core.PrefetchingIterator):
"""Performs prefetching from an iterable in separate threads.
Its interface is additionally extended with `ready` method which tells whether
there is any data waiting for processing and a `retrieved_elements` method
specifying number of elements retrieved from the iterator.
Yields:
Prefetched elements from the original iterable.
Raises:
ValueError: if the buffer_size < 1.
StopIteration: If the iterable contains no more items.
Any error thrown by the iterable_function. Note this is not raised inside
the producer, but after it finishes executing.
"""
def __init__(
self,
iterable: Iterable[types.NestedArray],
buffer_size: int = 5,
device: Optional[jax.Device] = None,
num_threads: int = NUM_PREFETCH_THREADS,
):
"""Constructs PrefetchIterator.
Args:
iterable: A python iterable. This is used to build the python prefetcher.
Note that each iterable should only be passed to this function once as
iterables aren't thread safe.
buffer_size (int): Number of elements to keep in the prefetch buffer.
device (deprecated): Optionally place items from the iterable on the given
device. If None, the items are returns as given by the iterable. This
argument is deprecated and the recommended usage is to wrap the
iterables using utils.device_put or utils.multi_device_put before using
utils.prefetch.
num_threads (int): Number of threads.
"""
if buffer_size < 1:
raise ValueError('the buffer_size should be >= 1')
self.buffer = queue.Queue(maxsize=buffer_size)
self.producer_error = []
self.end = object()
self.iterable = iterable
self.device = device
self.count = 0
# Start producer threads.
for _ in range(num_threads):
threading.Thread(target=self.producer, daemon=True).start()
def producer(self):
"""Enqueues items from `iterable` on a given thread."""
try:
# Build a new iterable for each thread. This is crucial if working with
# tensorflow datasets because tf.Graph objects are thread local.
for item in self.iterable:
if self.device:
jax.device_put(item, self.device)
self.buffer.put(item)
except Exception as e: # pylint: disable=broad-except
logging.exception('Error in producer thread for %s', self.iterable)
self.producer_error.append(e)
finally:
self.buffer.put(self.end)
def __iter__(self):
return self
def ready(self):
return not self.buffer.empty()
def retrieved_elements(self):
return self.count
def __next__(self):
value = self.buffer.get()
if value is self.end:
if self.producer_error:
raise self.producer_error[0] from self.producer_error[0]
raise StopIteration
self.count += 1
return value
|
acme-master
|
acme/jax/utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines Inference Server class used for centralised inference."""
import dataclasses
import datetime
import threading
from typing import Any, Callable, Generic, Optional, Sequence, TypeVar
import acme
from acme.jax import variable_utils
import jax
import launchpad as lp
@dataclasses.dataclass
class InferenceServerConfig:
"""Configuration options for centralised inference.
Attributes:
batch_size: How many elements to batch together per single inference call.
Auto-computed when not specified.
update_period: Frequency of updating variables from the variable source.
It is passed to VariableClient. Auto-computed when not specified.
timeout: Time after which incomplete batch is executed (batch is padded,
so there batch handler is always called with batch_size elements).
By default timeout is effectively disabled (set to 30 days).
"""
batch_size: Optional[int] = None
update_period: Optional[int] = None
timeout: datetime.timedelta = datetime.timedelta(days=30)
InferenceServerHandler = TypeVar('InferenceServerHandler')
class InferenceServer(Generic[InferenceServerHandler]):
"""Centralised, batched inference server."""
def __init__(
self,
handler: InferenceServerHandler,
variable_source: acme.VariableSource,
devices: Sequence[jax.Device],
config: InferenceServerConfig,
):
"""Constructs an inference server object.
Args:
handler: A callable or a mapping of callables to be exposed
through the inference server.
variable_source: Source of variables
devices: Devices used for executing handlers. All devices are used in
parallel.
config: Inference Server configuration.
"""
self._variable_source = variable_source
self._variable_client = None
self._keys = []
self._devices = devices
self._config = config
self._call_cnt = 0
self._device_params = [None] * len(self._devices)
self._device_params_ids = [None] * len(self._devices)
self._mutex = threading.Lock()
self._handler = jax.tree_map(self._build_handler, handler, is_leaf=callable)
@property
def handler(self) -> InferenceServerHandler:
return self._handler
def _dereference_params(self, arg):
"""Replaces VariableReferences with their corresponding param values."""
if not isinstance(arg, variable_utils.VariableReference):
# All arguments but VariableReference are returned without modifications.
return arg
# Due to batching dimension we take the first element.
variable_name = arg.variable_name[0]
if variable_name not in self._keys:
# Create a new VariableClient which also serves new variables.
self._keys.append(variable_name)
self._variable_client = variable_utils.VariableClient(
client=self._variable_source,
key=self._keys,
update_period=self._config.update_period)
params = self._variable_client.params
device_idx = self._call_cnt % len(self._devices)
# Select device via round robin, and update its params if they changed.
if self._device_params_ids[device_idx] != id(params):
self._device_params_ids[device_idx] = id(params)
self._device_params[device_idx] = jax.device_put(
params, self._devices[device_idx])
# Return the params that are located on the chosen device.
device_params = self._device_params[device_idx]
if len(self._keys) == 1:
return device_params
return device_params[self._keys.index(variable_name)]
def _build_handler(self, handler: Callable[..., Any]) -> Callable[..., Any]:
"""Builds a batched handler for a given callable handler and its name."""
def dereference_params_and_call_handler(*args, **kwargs):
with self._mutex:
# Dereference args corresponding to params, leaving others unchanged.
args_with_dereferenced_params = [
self._dereference_params(arg) for arg in args
]
kwargs_with_dereferenced_params = {
key: self._dereference_params(value)
for key, value in kwargs.items()
}
self._call_cnt += 1
# Maybe update params, depending on client configuration.
if self._variable_client is not None:
self._variable_client.update()
return handler(*args_with_dereferenced_params,
**kwargs_with_dereferenced_params)
return lp.batched_handler(
batch_size=self._config.batch_size,
timeout=self._config.timeout,
pad_batch=True,
max_parallelism=2 * len(self._devices))(
dereference_params_and_call_handler)
|
acme-master
|
acme/jax/inference_server.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for running statistics utilities."""
import functools
import math
from typing import NamedTuple
from acme import specs
from acme.jax import running_statistics
import jax
from jax.config import config as jax_config
import jax.numpy as jnp
import numpy as np
import tree
from absl.testing import absltest
update_and_validate = functools.partial(
running_statistics.update, validate_shapes=True)
class TestNestedSpec(NamedTuple):
# Note: the fields are intentionally in reverse order to test ordering.
a: specs.Array
b: specs.Array
class RunningStatisticsTest(absltest.TestCase):
def setUp(self):
super().setUp()
jax_config.update('jax_enable_x64', False)
def assert_allclose(self,
actual: jnp.ndarray,
desired: jnp.ndarray,
err_msg: str = '') -> None:
np.testing.assert_allclose(
actual, desired, atol=1e-5, rtol=1e-5, err_msg=err_msg)
def test_normalize(self):
state = running_statistics.init_state(specs.Array((5,), jnp.float32))
x = jnp.arange(200, dtype=jnp.float32).reshape(20, 2, 5)
x1, x2, x3, x4 = jnp.split(x, 4, axis=0)
state = update_and_validate(state, x1)
state = update_and_validate(state, x2)
state = update_and_validate(state, x3)
state = update_and_validate(state, x4)
normalized = running_statistics.normalize(x, state)
mean = jnp.mean(normalized)
std = jnp.std(normalized)
self.assert_allclose(mean, jnp.zeros_like(mean))
self.assert_allclose(std, jnp.ones_like(std))
def test_init_normalize(self):
state = running_statistics.init_state(specs.Array((5,), jnp.float32))
x = jnp.arange(200, dtype=jnp.float32).reshape(20, 2, 5)
normalized = running_statistics.normalize(x, state)
self.assert_allclose(normalized, x)
def test_one_batch_dim(self):
state = running_statistics.init_state(specs.Array((5,), jnp.float32))
x = jnp.arange(10, dtype=jnp.float32).reshape(2, 5)
state = update_and_validate(state, x)
normalized = running_statistics.normalize(x, state)
mean = jnp.mean(normalized, axis=0)
std = jnp.std(normalized, axis=0)
self.assert_allclose(mean, jnp.zeros_like(mean))
self.assert_allclose(std, jnp.ones_like(std))
def test_clip(self):
state = running_statistics.init_state(specs.Array((), jnp.float32))
x = jnp.arange(5, dtype=jnp.float32)
state = update_and_validate(state, x)
normalized = running_statistics.normalize(x, state, max_abs_value=1.0)
mean = jnp.mean(normalized)
std = jnp.std(normalized)
self.assert_allclose(mean, jnp.zeros_like(mean))
self.assert_allclose(std, jnp.ones_like(std) * math.sqrt(0.6))
def test_nested_normalize(self):
state = running_statistics.init_state({
'a': specs.Array((5,), jnp.float32),
'b': specs.Array((2,), jnp.float32)
})
x1 = {
'a': jnp.arange(20, dtype=jnp.float32).reshape(2, 2, 5),
'b': jnp.arange(8, dtype=jnp.float32).reshape(2, 2, 2)
}
x2 = {
'a': jnp.arange(20, dtype=jnp.float32).reshape(2, 2, 5) + 20,
'b': jnp.arange(8, dtype=jnp.float32).reshape(2, 2, 2) + 8
}
x3 = {
'a': jnp.arange(40, dtype=jnp.float32).reshape(4, 2, 5),
'b': jnp.arange(16, dtype=jnp.float32).reshape(4, 2, 2)
}
state = update_and_validate(state, x1)
state = update_and_validate(state, x2)
state = update_and_validate(state, x3)
normalized = running_statistics.normalize(x3, state)
mean = tree.map_structure(lambda x: jnp.mean(x, axis=(0, 1)), normalized)
std = tree.map_structure(lambda x: jnp.std(x, axis=(0, 1)), normalized)
tree.map_structure(
lambda x: self.assert_allclose(x, jnp.zeros_like(x)),
mean)
tree.map_structure(
lambda x: self.assert_allclose(x, jnp.ones_like(x)),
std)
def test_validation(self):
state = running_statistics.init_state(specs.Array((1, 2, 3), jnp.float32))
x = jnp.arange(12, dtype=jnp.float32).reshape(2, 2, 3)
with self.assertRaises(AssertionError):
update_and_validate(state, x)
x = jnp.arange(3, dtype=jnp.float32).reshape(1, 1, 3)
with self.assertRaises(AssertionError):
update_and_validate(state, x)
def test_int_not_normalized(self):
state = running_statistics.init_state(specs.Array((), jnp.int32))
x = jnp.arange(5, dtype=jnp.int32)
state = update_and_validate(state, x)
normalized = running_statistics.normalize(x, state)
np.testing.assert_array_equal(normalized, x)
def test_pmap_update_nested(self):
local_device_count = jax.local_device_count()
state = running_statistics.init_state({
'a': specs.Array((5,), jnp.float32),
'b': specs.Array((2,), jnp.float32)
})
x = {
'a': (jnp.arange(15 * local_device_count,
dtype=jnp.float32)).reshape(local_device_count, 3, 5),
'b': (jnp.arange(6 * local_device_count,
dtype=jnp.float32)).reshape(local_device_count, 3, 2),
}
devices = jax.local_devices()
state = jax.device_put_replicated(state, devices)
pmap_axis_name = 'i'
state = jax.pmap(
functools.partial(update_and_validate, pmap_axis_name=pmap_axis_name),
pmap_axis_name)(state, x)
state = jax.pmap(
functools.partial(update_and_validate, pmap_axis_name=pmap_axis_name),
pmap_axis_name)(state, x)
normalized = jax.pmap(running_statistics.normalize)(x, state)
mean = tree.map_structure(lambda x: jnp.mean(x, axis=(0, 1)), normalized)
std = tree.map_structure(lambda x: jnp.std(x, axis=(0, 1)), normalized)
tree.map_structure(
lambda x: self.assert_allclose(x, jnp.zeros_like(x)), mean)
tree.map_structure(
lambda x: self.assert_allclose(x, jnp.ones_like(x)), std)
def test_different_structure_normalize(self):
spec = TestNestedSpec(
a=specs.Array((5,), jnp.float32), b=specs.Array((2,), jnp.float32))
state = running_statistics.init_state(spec)
x = {
'a': jnp.arange(20, dtype=jnp.float32).reshape(2, 2, 5),
'b': jnp.arange(8, dtype=jnp.float32).reshape(2, 2, 2)
}
with self.assertRaises(TypeError):
state = update_and_validate(state, x)
def test_weights(self):
state = running_statistics.init_state(specs.Array((), jnp.float32))
x = jnp.arange(5, dtype=jnp.float32)
x_weights = jnp.ones_like(x)
y = 2 * x + 5
y_weights = 2 * x_weights
z = jnp.concatenate([x, y])
weights = jnp.concatenate([x_weights, y_weights])
state = update_and_validate(state, z, weights=weights)
self.assertEqual(state.mean, (jnp.mean(x) + 2 * jnp.mean(y)) / 3)
big_z = jnp.concatenate([x, y, y])
normalized = running_statistics.normalize(big_z, state)
self.assertAlmostEqual(jnp.mean(normalized), 0., places=6)
self.assertAlmostEqual(jnp.std(normalized), 1., places=6)
def test_normalize_config(self):
x = jnp.arange(200, dtype=jnp.float32).reshape(20, 2, 5)
x_split = jnp.split(x, 5, axis=0)
y = jnp.arange(160, dtype=jnp.float32).reshape(20, 2, 4)
y_split = jnp.split(y, 5, axis=0)
z = {'a': x, 'b': y}
z_split = [{'a': xx, 'b': yy} for xx, yy in zip(x_split, y_split)]
update = jax.jit(running_statistics.update, static_argnames=('config',))
config = running_statistics.NestStatisticsConfig((('a',),))
state = running_statistics.init_state({
'a': specs.Array((5,), jnp.float32),
'b': specs.Array((4,), jnp.float32)
})
# Test initialization from the first element.
state = update(state, z_split[0], config=config)
state = update(state, z_split[1], config=config)
state = update(state, z_split[2], config=config)
state = update(state, z_split[3], config=config)
state = update(state, z_split[4], config=config)
normalize = jax.jit(running_statistics.normalize)
normalized = normalize(z, state)
for key in normalized:
mean = jnp.mean(normalized[key], axis=(0, 1))
std = jnp.std(normalized[key], axis=(0, 1))
if key == 'a':
self.assert_allclose(
mean,
jnp.zeros_like(mean),
err_msg=f'key:{key} mean:{mean} normalized:{normalized[key]}')
self.assert_allclose(
std,
jnp.ones_like(std),
err_msg=f'key:{key} std:{std} normalized:{normalized[key]}')
else:
assert key == 'b'
np.testing.assert_array_equal(
normalized[key],
z[key],
err_msg=f'z:{z[key]} normalized:{normalized[key]}')
def test_clip_config(self):
x = jnp.arange(10, dtype=jnp.float32) - 5
y = jnp.arange(8, dtype=jnp.float32) - 4
z = {'x': x, 'y': y}
max_abs_x = 2
config = running_statistics.NestClippingConfig(((('x',), max_abs_x),))
clipped_z = running_statistics.clip(z, config)
clipped_x = jnp.clip(a=x, a_min=-max_abs_x, a_max=max_abs_x)
np.testing.assert_array_equal(clipped_z['x'], clipped_x)
np.testing.assert_array_equal(clipped_z['y'], z['y'])
def test_denormalize(self):
state = running_statistics.init_state(specs.Array((5,), jnp.float32))
x = jnp.arange(100, dtype=jnp.float32).reshape(10, 2, 5)
x1, x2 = jnp.split(x, 2, axis=0)
state = update_and_validate(state, x1)
state = update_and_validate(state, x2)
normalized = running_statistics.normalize(x, state)
mean = jnp.mean(normalized)
std = jnp.std(normalized)
self.assert_allclose(mean, jnp.zeros_like(mean))
self.assert_allclose(std, jnp.ones_like(std))
denormalized = running_statistics.denormalize(normalized, state)
self.assert_allclose(denormalized, x)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/jax/running_statistics_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ActorCore wrapper to use observation stacking."""
from typing import Any, Mapping, NamedTuple, Tuple
from acme import specs
from acme import types as acme_types
from acme.agents.jax import actor_core as actor_core_lib
from acme.jax import networks as networks_lib
from acme.jax import types as jax_types
from acme.jax import utils as jax_utils
from acme.tf import utils as tf_utils
import jax
import jax.numpy as jnp
import reverb
import tensorflow as tf
import tree
ActorState = Any
Observation = networks_lib.Observation
Action = networks_lib.Action
Params = networks_lib.Params
class StackerState(NamedTuple):
stack: jax.Array # Observations stacked along the final dimension.
needs_reset: jax.Array # A scalar boolean.
class StackingActorState(NamedTuple):
actor_state: ActorState
stacker_state: StackerState
# TODO(bshahr): Consider moving to jax_utils, extending current tiling function.
def tile_nested_array(nest: acme_types.NestedArray, num: int, axis: int):
def _tile_array(array: jnp.ndarray) -> jnp.ndarray:
reps = [1] * array.ndim
reps[axis] = num
return jnp.tile(array, reps)
return jax.tree_map(_tile_array, nest)
class ObservationStacker:
"""Class used to handle agent-side observation stacking.
Once an ObservationStacker is initialized and an initial_state is obtained
from it, one can stack nested observations by simply calling the
ObservationStacker and passing it the new observation and current state of its
observation stack.
See also observation_stacking.wrap_actor_core for hints on how to use it.
"""
def __init__(self,
observation_spec: acme_types.NestedSpec,
stack_size: int = 4):
def _repeat_observation(state: StackerState,
first_observation: Observation) -> StackerState:
return state._replace(
needs_reset=jnp.array(False),
stack=tile_nested_array(first_observation, stack_size - 1, axis=-1))
self._zero_stack = tile_nested_array(
jax_utils.zeros_like(observation_spec), stack_size - 1, axis=-1)
self._repeat_observation = _repeat_observation
def __call__(self, inputs: Observation,
state: StackerState) -> Tuple[Observation, StackerState]:
# If this is a first observation, initialize the stack by repeating it,
# otherwise leave it intact.
state = jax.lax.cond(
state.needs_reset,
self._repeat_observation,
lambda state, *args: state, # No-op on state.
state,
inputs)
# Concatenate frames along the final axis (assumed to be for channels).
output = jax.tree_map(lambda *x: jnp.concatenate(x, axis=-1),
state.stack, inputs)
# Update the frame stack by adding the input and dropping the first
# observation in the stack. Note that we use the final dimension as each
# leaf in the nested observation may have a different last dim.
new_state = state._replace(
stack=jax.tree_map(lambda x, y: y[..., x.shape[-1]:], inputs, output))
return output, new_state
def initial_state(self) -> StackerState:
return StackerState(stack=self._zero_stack, needs_reset=jnp.array(True))
def get_adjusted_environment_spec(environment_spec: specs.EnvironmentSpec,
stack_size: int) -> specs.EnvironmentSpec:
"""Returns a spec where the observation spec accounts for stacking."""
def stack_observation_spec(obs_spec: specs.Array) -> specs.Array:
"""Adjusts last axis shape to account for observation stacking."""
new_shape = obs_spec.shape[:-1] + (obs_spec.shape[-1] * stack_size,)
return obs_spec.replace(shape=new_shape)
adjusted_observation_spec = jax.tree_map(stack_observation_spec,
environment_spec.observations)
return environment_spec._replace(observations=adjusted_observation_spec)
def wrap_actor_core(
actor_core: actor_core_lib.ActorCore,
observation_spec: specs.Array,
num_stacked_observations: int = 1) -> actor_core_lib.ActorCore:
"""Wraps an actor core so that it performs observation stacking."""
if num_stacked_observations <= 0:
raise ValueError(
'Number of stacked observations must be strictly positive.'
f' Received num_stacked_observations={num_stacked_observations}.')
if num_stacked_observations == 1:
# Return unwrapped core when a trivial stack size is requested.
return actor_core
obs_stacker = ObservationStacker(
observation_spec=observation_spec, stack_size=num_stacked_observations)
def init(key: jax_types.PRNGKey) -> StackingActorState:
return StackingActorState(
actor_state=actor_core.init(key),
stacker_state=obs_stacker.initial_state())
def select_action(
params: Params,
observations: Observation,
state: StackingActorState,
) -> Tuple[Action, StackingActorState]:
stacked_observations, stacker_state = obs_stacker(observations,
state.stacker_state)
actions, actor_state = actor_core.select_action(params,
stacked_observations,
state.actor_state)
new_state = StackingActorState(
actor_state=actor_state, stacker_state=stacker_state)
return actions, new_state
def get_extras(state: StackingActorState) -> Mapping[str, jnp.ndarray]:
return actor_core.get_extras(state.actor_state)
return actor_core_lib.ActorCore(
init=init, select_action=select_action, get_extras=get_extras)
def stack_reverb_observation(sample: reverb.ReplaySample,
stack_size: int) -> reverb.ReplaySample:
"""Stacks observations in a Reverb sample.
This function is meant to be used on the dataset creation side as a
post-processing function before batching.
Warnings!
* Only works if SequenceAdder is in end_of_episode_behavior=CONTINUE mode.
* Only tested on RGB and scalar (shape = (1,)) observations.
* At episode starts, this function repeats the first observation to form a
stack. Could consider using zeroed observations instead.
* At episode starts, this function always selects the latest possible
stacked trajectory. Could consider randomizing the start index of the
sequence.
Args:
sample: A sample coming from a Reverb replay table. Should be an unbatched
sequence so that sample.data.observation is a nested structure of
time-major tensors.
stack_size: Number of observations to stack.
Returns:
A new sample where sample.data.observation has the same nested structure as
the incoming sample but with every tensor having its final dimension
multiplied by `stack_size`.
"""
def _repeat_first(sequence: tf.Tensor) -> tf.Tensor:
repeated_first_step = tf_utils.tile_tensor(sequence[0], stack_size - 1)
return tf.concat([repeated_first_step, sequence], 0)[:-(stack_size - 1)]
def _stack_observation(observation: tf.Tensor) -> tf.Tensor:
stack = [tf.roll(observation, i, axis=0) for i in range(stack_size)]
stack.reverse() # Reverse stack order to be chronological.
return tf.concat(stack, axis=-1)
# Maybe repeat the first observation, if at the start of an episode.
data = tf.cond(sample.data.start_of_episode[0],
lambda: tree.map_structure(_repeat_first, sample.data),
lambda: sample.data)
# Stack observation in the sample's data.
data_with_stacked_obs = data._replace(
observation=tree.map_structure(_stack_observation, data.observation))
# Truncate the start of the sequence due to the first stacks containing the
# final observations that were rolled over to the start.
data = tree.map_structure(lambda x: x[stack_size - 1:], data_with_stacked_obs)
return reverb.ReplaySample(info=sample.info, data=data)
|
acme-master
|
acme/jax/observation_stacking.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for variable utilities."""
from acme.jax import variable_utils
from acme.testing import fakes
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tree
from absl.testing import absltest
def dummy_network(x):
return hk.nets.MLP([50, 10])(x)
class VariableClientTest(absltest.TestCase):
def test_update(self):
init_fn, _ = hk.without_apply_rng(
hk.transform(dummy_network))
params = init_fn(jax.random.PRNGKey(1), jnp.zeros(shape=(1, 32)))
variable_source = fakes.VariableSource(params)
variable_client = variable_utils.VariableClient(
variable_source, key='policy')
variable_client.update_and_wait()
tree.map_structure(np.testing.assert_array_equal, variable_client.params,
params)
def test_multiple_keys(self):
init_fn, _ = hk.without_apply_rng(
hk.transform(dummy_network))
params = init_fn(jax.random.PRNGKey(1), jnp.zeros(shape=(1, 32)))
steps = jnp.zeros(shape=1)
variables = {'network': params, 'steps': steps}
variable_source = fakes.VariableSource(variables, use_default_key=False)
variable_client = variable_utils.VariableClient(
variable_source, key=['network', 'steps'])
variable_client.update_and_wait()
tree.map_structure(np.testing.assert_array_equal, variable_client.params[0],
params)
tree.map_structure(np.testing.assert_array_equal, variable_client.params[1],
steps)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/jax/variable_utils_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Variable utilities for JAX."""
from concurrent import futures
import datetime
import time
from typing import List, NamedTuple, Optional, Sequence, Union
from acme import core
from acme.jax import networks as network_types
import jax
class VariableReference(NamedTuple):
variable_name: str
class ReferenceVariableSource(core.VariableSource):
"""Variable source which returns references instead of values.
This is passed to each actor when using a centralized inference server. The
actor uses this special variable source to get references rather than values.
These references are then passed to calls to the inference server, which will
dereference them to obtain the value of the corresponding variables at
inference time. This avoids passing around copies of variables from each
actor to the inference server.
"""
def get_variables(self, names: Sequence[str]) -> List[VariableReference]:
return [VariableReference(name) for name in names]
class VariableClient:
"""A variable client for updating variables from a remote source."""
def __init__(
self,
client: core.VariableSource,
key: Union[str, Sequence[str]],
update_period: Union[int, datetime.timedelta] = 1,
device: Optional[Union[str, jax.Device]] = None,
):
"""Initializes the variable client.
Args:
client: A variable source from which we fetch variables.
key: Which variables to request. When multiple keys are used, params
property will return a list of params.
update_period: Interval between fetches, specified as either (int) a
number of calls to update() between actual fetches or (timedelta) a time
interval that has to pass since the last fetch.
device: The name of a JAX device to put variables on. If None (default),
VariableClient won't put params on any device.
"""
self._update_period = update_period
self._call_counter = 0
self._last_call = time.time()
self._client = client
self._params: Sequence[network_types.Params] = None
self._device = device
if isinstance(self._device, str):
self._device = jax.devices(device)[0]
self._executor = futures.ThreadPoolExecutor(max_workers=1)
if isinstance(key, str):
key = [key]
self._key = key
self._request = lambda k=key: client.get_variables(k)
self._future: Optional[futures.Future] = None # pylint: disable=g-bare-generic
self._async_request = lambda: self._executor.submit(self._request)
def update(self, wait: bool = False) -> None:
"""Periodically updates the variables with the latest copy from the source.
If wait is True, a blocking request is executed. Any active request will be
cancelled.
If wait is False, this method makes an asynchronous request for variables.
Args:
wait: Whether to execute asynchronous (False) or blocking updates (True).
Defaults to False.
"""
# Track calls (we only update periodically).
self._call_counter += 1
# Return if it's not time to fetch another update.
if isinstance(self._update_period, datetime.timedelta):
if self._update_period.total_seconds() + self._last_call > time.time():
return
else:
if self._call_counter < self._update_period:
return
if wait:
if self._future is not None:
if self._future.running():
self._future.cancel()
self._future = None
self._call_counter = 0
self._last_call = time.time()
self.update_and_wait()
return
# Return early if we are still waiting for a previous request to come back.
if self._future and not self._future.done():
return
# Get a future and add the copy function as a callback.
self._call_counter = 0
self._last_call = time.time()
self._future = self._async_request()
self._future.add_done_callback(lambda f: self._callback(f.result()))
def update_and_wait(self):
"""Immediately update and block until we get the result."""
self._callback(self._request())
def _callback(self, params_list: List[network_types.Params]):
if self._device and not isinstance(self._client, ReferenceVariableSource):
# Move variables to a proper device.
self._params = jax.device_put(params_list, self._device)
else:
self._params = params_list
@property
def device(self) -> Optional[jax.Device]:
return self._device
@property
def params(self) -> Union[network_types.Params, List[network_types.Params]]:
"""Returns the first params for one key, otherwise the whole params list."""
if self._params is None:
self.update_and_wait()
if len(self._params) == 1:
return self._params[0]
else:
return self._params
|
acme-master
|
acme/jax/variable_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the IMPALA loss function."""
from acme.adders import reverb as adders
from acme.jax.losses import impala
from acme.utils.tree_utils import tree_map
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import reverb
from absl.testing import absltest
class ImpalaTest(absltest.TestCase):
def test_shapes(self):
#
batch_size = 2
sequence_len = 3
num_actions = 5
hidden_size = 7
# Define a trivial recurrent actor-critic network.
@hk.without_apply_rng
@hk.transform
def unroll_fn_transformed(observations, state):
lstm = hk.LSTM(hidden_size)
embedding, state = hk.dynamic_unroll(lstm, observations, state)
logits = hk.Linear(num_actions)(embedding)
values = jnp.squeeze(hk.Linear(1)(embedding), axis=-1)
return (logits, values), state
@hk.without_apply_rng
@hk.transform
def initial_state_fn():
return hk.LSTM(hidden_size).initial_state(None)
# Initial recurrent network state.
initial_state = initial_state_fn.apply(None)
# Make some fake data.
observations = np.ones(shape=(sequence_len, 50))
actions = np.random.randint(num_actions, size=sequence_len)
rewards = np.random.rand(sequence_len)
discounts = np.ones(shape=(sequence_len,))
batch_tile = tree_map(lambda x: np.tile(x, [batch_size, *([1] * x.ndim)]))
seq_tile = tree_map(lambda x: np.tile(x, [sequence_len, *([1] * x.ndim)]))
extras = {
'logits': np.random.rand(sequence_len, num_actions),
'core_state': seq_tile(initial_state),
}
# Package up the data into a ReverbSample.
data = adders.Step(
observations,
actions,
rewards,
discounts,
extras=extras,
start_of_episode=())
data = batch_tile(data)
sample = reverb.ReplaySample(info=None, data=data)
# Initialise parameters.
rng = hk.PRNGSequence(1)
params = unroll_fn_transformed.init(next(rng), observations, initial_state)
# Make loss function.
loss_fn = impala.impala_loss(
unroll_fn_transformed.apply, discount=0.99)
# Return value should be scalar.
loss, metrics = loss_fn(params, sample)
loss = jax.device_get(loss)
self.assertEqual(loss.shape, ())
for value in metrics.values():
value = jax.device_get(value)
self.assertEqual(value.shape, ())
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/jax/losses/impala_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the MPO loss.
The MPO loss uses MPOParams, which can be initialized using init_params,
to track the temperature and the dual variables.
Tensor shapes are annotated, where helpful, as follow:
B: batch size,
N: number of sampled actions, see MPO paper for more details,
D: dimensionality of the action space.
"""
from typing import NamedTuple, Optional, Tuple, Union
import jax
import jax.numpy as jnp
import tensorflow_probability
tfp = tensorflow_probability.substrates.jax
tfd = tensorflow_probability.substrates.jax.distributions
_MPO_FLOAT_EPSILON = 1e-8
_MIN_LOG_TEMPERATURE = -18.0
_MIN_LOG_ALPHA = -18.0
Shape = Tuple[int]
DType = type(jnp.float32) # _ScalarMeta, a private type.
class MPOParams(NamedTuple):
"""NamedTuple to store trainable loss parameters."""
log_temperature: jnp.ndarray
log_alpha_mean: jnp.ndarray
log_alpha_stddev: jnp.ndarray
log_penalty_temperature: Optional[jnp.ndarray] = None
class MPOStats(NamedTuple):
"""NamedTuple to store loss statistics."""
dual_alpha_mean: Union[float, jnp.ndarray]
dual_alpha_stddev: Union[float, jnp.ndarray]
dual_temperature: Union[float, jnp.ndarray]
loss_policy: Union[float, jnp.ndarray]
loss_alpha: Union[float, jnp.ndarray]
loss_temperature: Union[float, jnp.ndarray]
kl_q_rel: Union[float, jnp.ndarray]
kl_mean_rel: Union[float, jnp.ndarray]
kl_stddev_rel: Union[float, jnp.ndarray]
q_min: Union[float, jnp.ndarray]
q_max: Union[float, jnp.ndarray]
pi_stddev_min: Union[float, jnp.ndarray]
pi_stddev_max: Union[float, jnp.ndarray]
pi_stddev_cond: Union[float, jnp.ndarray]
penalty_kl_q_rel: Optional[float] = None
class MPO:
"""MPO loss with decoupled KL constraints as in (Abdolmaleki et al., 2018).
This implementation of the MPO loss includes the following features, as
options:
- Satisfying the KL-constraint on a per-dimension basis (on by default);
- Penalizing actions that fall outside of [-1, 1] (on by default) as a
special case of multi-objective MPO (MO-MPO; Abdolmaleki et al., 2020).
For best results on the control suite, keep both of these on.
(Abdolmaleki et al., 2018): https://arxiv.org/pdf/1812.02256.pdf
(Abdolmaleki et al., 2020): https://arxiv.org/pdf/2005.07513.pdf
"""
def __init__(self,
epsilon: float,
epsilon_mean: float,
epsilon_stddev: float,
init_log_temperature: float,
init_log_alpha_mean: float,
init_log_alpha_stddev: float,
per_dim_constraining: bool = True,
action_penalization: bool = True,
epsilon_penalty: float = 0.001):
"""Initialize and configure the MPO loss.
Args:
epsilon: KL constraint on the non-parametric auxiliary policy, the one
associated with the dual variable called temperature.
epsilon_mean: KL constraint on the mean of the Gaussian policy, the one
associated with the dual variable called alpha_mean.
epsilon_stddev: KL constraint on the stddev of the Gaussian policy, the
one associated with the dual variable called alpha_mean.
init_log_temperature: initial value for the temperature in log-space, note
a softplus (rather than an exp) will be used to transform this.
init_log_alpha_mean: initial value for the alpha_mean in log-space, note a
softplus (rather than an exp) will be used to transform this.
init_log_alpha_stddev: initial value for the alpha_stddev in log-space,
note a softplus (rather than an exp) will be used to transform this.
per_dim_constraining: whether to enforce the KL constraint on each
dimension independently; this is the default. Otherwise the overall KL
is constrained, which allows some dimensions to change more at the
expense of others staying put.
action_penalization: whether to use a KL constraint to penalize actions
via the MO-MPO algorithm.
epsilon_penalty: KL constraint on the probability of violating the action
constraint.
"""
# MPO constrain thresholds.
self._epsilon = epsilon
self._epsilon_mean = epsilon_mean
self._epsilon_stddev = epsilon_stddev
# Initial values for the constraints' dual variables.
self._init_log_temperature = init_log_temperature
self._init_log_alpha_mean = init_log_alpha_mean
self._init_log_alpha_stddev = init_log_alpha_stddev
# Whether to penalize out-of-bound actions via MO-MPO and its corresponding
# constraint threshold.
self._action_penalization = action_penalization
self._epsilon_penalty = epsilon_penalty
# Whether to ensure per-dimension KL constraint satisfication.
self._per_dim_constraining = per_dim_constraining
@property
def per_dim_constraining(self):
return self._per_dim_constraining
def init_params(self, action_dim: int, dtype: DType = jnp.float32):
"""Creates an initial set of parameters."""
if self._per_dim_constraining:
dual_variable_shape = [action_dim]
else:
dual_variable_shape = [1]
log_temperature = jnp.full([1], self._init_log_temperature, dtype=dtype)
log_alpha_mean = jnp.full(
dual_variable_shape, self._init_log_alpha_mean, dtype=dtype)
log_alpha_stddev = jnp.full(
dual_variable_shape, self._init_log_alpha_stddev, dtype=dtype)
if self._action_penalization:
log_penalty_temperature = jnp.full([1],
self._init_log_temperature,
dtype=dtype)
else:
log_penalty_temperature = None
return MPOParams(
log_temperature=log_temperature,
log_alpha_mean=log_alpha_mean,
log_alpha_stddev=log_alpha_stddev,
log_penalty_temperature=log_penalty_temperature)
def __call__(
self,
params: MPOParams,
online_action_distribution: Union[tfd.MultivariateNormalDiag,
tfd.Independent],
target_action_distribution: Union[tfd.MultivariateNormalDiag,
tfd.Independent],
actions: jnp.ndarray, # Shape [N, B, D].
q_values: jnp.ndarray, # Shape [N, B].
) -> Tuple[jnp.ndarray, MPOStats]:
"""Computes the decoupled MPO loss.
Args:
params: parameters tracking the temperature and the dual variables.
online_action_distribution: online distribution returned by the online
policy network; expects batch_dims of [B] and event_dims of [D].
target_action_distribution: target distribution returned by the target
policy network; expects same shapes as online distribution.
actions: actions sampled from the target policy; expects shape [N, B, D].
q_values: Q-values associated with each action; expects shape [N, B].
Returns:
Loss, combining the policy loss, KL penalty, and dual losses required to
adapt the dual variables.
Stats, for diagnostics and tracking performance.
"""
# Cast `MultivariateNormalDiag`s to Independent Normals.
# The latter allows us to satisfy KL constraints per-dimension.
if isinstance(target_action_distribution, tfd.MultivariateNormalDiag):
target_action_distribution = tfd.Independent(
tfd.Normal(target_action_distribution.mean(),
target_action_distribution.stddev()))
online_action_distribution = tfd.Independent(
tfd.Normal(online_action_distribution.mean(),
online_action_distribution.stddev()))
# Transform dual variables from log-space.
# Note: using softplus instead of exponential for numerical stability.
temperature = jax.nn.softplus(params.log_temperature) + _MPO_FLOAT_EPSILON
alpha_mean = jax.nn.softplus(params.log_alpha_mean) + _MPO_FLOAT_EPSILON
alpha_stddev = jax.nn.softplus(params.log_alpha_stddev) + _MPO_FLOAT_EPSILON
# Get online and target means and stddevs in preparation for decomposition.
online_mean = online_action_distribution.distribution.mean()
online_scale = online_action_distribution.distribution.stddev()
target_mean = target_action_distribution.distribution.mean()
target_scale = target_action_distribution.distribution.stddev()
# Compute normalized importance weights, used to compute expectations with
# respect to the non-parametric policy; and the temperature loss, used to
# adapt the tempering of Q-values.
normalized_weights, loss_temperature = compute_weights_and_temperature_loss(
q_values, self._epsilon, temperature)
# Only needed for diagnostics: Compute estimated actualized KL between the
# non-parametric and current target policies.
kl_nonparametric = compute_nonparametric_kl_from_normalized_weights(
normalized_weights)
if self._action_penalization:
# Transform action penalization temperature.
penalty_temperature = jax.nn.softplus(
params.log_penalty_temperature) + _MPO_FLOAT_EPSILON
# Compute action penalization cost.
# Note: the cost is zero in [-1, 1] and quadratic beyond.
diff_out_of_bound = actions - jnp.clip(actions, -1.0, 1.0)
cost_out_of_bound = -jnp.linalg.norm(diff_out_of_bound, axis=-1)
penalty_normalized_weights, loss_penalty_temperature = compute_weights_and_temperature_loss(
cost_out_of_bound, self._epsilon_penalty, penalty_temperature)
# Only needed for diagnostics: Compute estimated actualized KL between the
# non-parametric and current target policies.
penalty_kl_nonparametric = compute_nonparametric_kl_from_normalized_weights(
penalty_normalized_weights)
# Combine normalized weights.
normalized_weights += penalty_normalized_weights
loss_temperature += loss_penalty_temperature
# Decompose the online policy into fixed-mean & fixed-stddev distributions.
# This has been documented as having better performance in bandit settings,
# see e.g. https://arxiv.org/pdf/1812.02256.pdf.
fixed_stddev_distribution = tfd.Independent(
tfd.Normal(loc=online_mean, scale=target_scale))
fixed_mean_distribution = tfd.Independent(
tfd.Normal(loc=target_mean, scale=online_scale))
# Compute the decomposed policy losses.
loss_policy_mean = compute_cross_entropy_loss(actions, normalized_weights,
fixed_stddev_distribution)
loss_policy_stddev = compute_cross_entropy_loss(actions, normalized_weights,
fixed_mean_distribution)
# Compute the decomposed KL between the target and online policies.
if self._per_dim_constraining:
kl_mean = target_action_distribution.distribution.kl_divergence(
fixed_stddev_distribution.distribution) # Shape [B, D].
kl_stddev = target_action_distribution.distribution.kl_divergence(
fixed_mean_distribution.distribution) # Shape [B, D].
else:
kl_mean = target_action_distribution.kl_divergence(
fixed_stddev_distribution) # Shape [B].
kl_stddev = target_action_distribution.kl_divergence(
fixed_mean_distribution) # Shape [B].
# Compute the alpha-weighted KL-penalty and dual losses to adapt the alphas.
loss_kl_mean, loss_alpha_mean = compute_parametric_kl_penalty_and_dual_loss(
kl_mean, alpha_mean, self._epsilon_mean)
loss_kl_stddev, loss_alpha_stddev = compute_parametric_kl_penalty_and_dual_loss(
kl_stddev, alpha_stddev, self._epsilon_stddev)
# Combine losses.
loss_policy = loss_policy_mean + loss_policy_stddev
loss_kl_penalty = loss_kl_mean + loss_kl_stddev
loss_dual = loss_alpha_mean + loss_alpha_stddev + loss_temperature
loss = loss_policy + loss_kl_penalty + loss_dual
# Create statistics.
pi_stddev = online_action_distribution.distribution.stddev()
stats = MPOStats(
# Dual Variables.
dual_alpha_mean=jnp.mean(alpha_mean),
dual_alpha_stddev=jnp.mean(alpha_stddev),
dual_temperature=jnp.mean(temperature),
# Losses.
loss_policy=jnp.mean(loss),
loss_alpha=jnp.mean(loss_alpha_mean + loss_alpha_stddev),
loss_temperature=jnp.mean(loss_temperature),
# KL measurements.
kl_q_rel=jnp.mean(kl_nonparametric) / self._epsilon,
penalty_kl_q_rel=((jnp.mean(penalty_kl_nonparametric) /
self._epsilon_penalty)
if self._action_penalization else None),
kl_mean_rel=jnp.mean(kl_mean, axis=0) / self._epsilon_mean,
kl_stddev_rel=jnp.mean(kl_stddev, axis=0) / self._epsilon_stddev,
# Q measurements.
q_min=jnp.mean(jnp.min(q_values, axis=0)),
q_max=jnp.mean(jnp.max(q_values, axis=0)),
# If the policy has stddev, log summary stats for this as well.
pi_stddev_min=jnp.mean(jnp.min(pi_stddev, axis=-1)),
pi_stddev_max=jnp.mean(jnp.max(pi_stddev, axis=-1)),
# Condition number of the diagonal covariance (actually, stddev) matrix.
pi_stddev_cond=jnp.mean(
jnp.max(pi_stddev, axis=-1) / jnp.min(pi_stddev, axis=-1)),
)
return loss, stats
def compute_weights_and_temperature_loss(
q_values: jnp.ndarray,
epsilon: float,
temperature: jnp.ndarray,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Computes normalized importance weights for the policy optimization.
Args:
q_values: Q-values associated with the actions sampled from the target
policy; expected shape [N, B].
epsilon: Desired constraint on the KL between the target and non-parametric
policies.
temperature: Scalar used to temper the Q-values before computing normalized
importance weights from them. This is really the Lagrange dual variable in
the constrained optimization problem, the solution of which is the
non-parametric policy targeted by the policy loss.
Returns:
Normalized importance weights, used for policy optimization.
Temperature loss, used to adapt the temperature.
"""
# Temper the given Q-values using the current temperature.
tempered_q_values = jax.lax.stop_gradient(q_values) / temperature
# Compute the normalized importance weights used to compute expectations with
# respect to the non-parametric policy.
normalized_weights = jax.nn.softmax(tempered_q_values, axis=0)
normalized_weights = jax.lax.stop_gradient(normalized_weights)
# Compute the temperature loss (dual of the E-step optimization problem).
q_logsumexp = jax.scipy.special.logsumexp(tempered_q_values, axis=0)
log_num_actions = jnp.log(q_values.shape[0] / 1.)
loss_temperature = epsilon + jnp.mean(q_logsumexp) - log_num_actions
loss_temperature = temperature * loss_temperature
return normalized_weights, loss_temperature
def compute_nonparametric_kl_from_normalized_weights(
normalized_weights: jnp.ndarray) -> jnp.ndarray:
"""Estimate the actualized KL between the non-parametric and target policies."""
# Compute integrand.
num_action_samples = normalized_weights.shape[0] / 1.
integrand = jnp.log(num_action_samples * normalized_weights + 1e-8)
# Return the expectation with respect to the non-parametric policy.
return jnp.sum(normalized_weights * integrand, axis=0)
def compute_cross_entropy_loss(
sampled_actions: jnp.ndarray,
normalized_weights: jnp.ndarray,
online_action_distribution: tfd.Distribution,
) -> jnp.ndarray:
"""Compute cross-entropy online and the reweighted target policy.
Args:
sampled_actions: samples used in the Monte Carlo integration in the policy
loss. Expected shape is [N, B, ...], where N is the number of sampled
actions and B is the number of sampled states.
normalized_weights: target policy multiplied by the exponentiated Q values
and normalized; expected shape is [N, B].
online_action_distribution: policy to be optimized.
Returns:
loss_policy_gradient: the cross-entropy loss that, when differentiated,
produces the policy gradient.
"""
# Compute the M-step loss.
log_prob = online_action_distribution.log_prob(sampled_actions)
# Compute the weighted average log-prob using the normalized weights.
loss_policy_gradient = -jnp.sum(log_prob * normalized_weights, axis=0)
# Return the mean loss over the batch of states.
return jnp.mean(loss_policy_gradient, axis=0)
def compute_parametric_kl_penalty_and_dual_loss(
kl: jnp.ndarray,
alpha: jnp.ndarray,
epsilon: float,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Computes the KL cost to be added to the Lagragian and its dual loss.
The KL cost is simply the alpha-weighted KL divergence and it is added as a
regularizer to the policy loss. The dual variable alpha itself has a loss that
can be minimized to adapt the strength of the regularizer to keep the KL
between consecutive updates at the desired target value of epsilon.
Args:
kl: KL divergence between the target and online policies.
alpha: Lagrange multipliers (dual variables) for the KL constraints.
epsilon: Desired value for the KL.
Returns:
loss_kl: alpha-weighted KL regularization to be added to the policy loss.
loss_alpha: The Lagrange dual loss minimized to adapt alpha.
"""
# Compute the mean KL over the batch.
mean_kl = jnp.mean(kl, axis=0)
# Compute the regularization.
loss_kl = jnp.sum(jax.lax.stop_gradient(alpha) * mean_kl)
# Compute the dual loss.
loss_alpha = jnp.sum(alpha * (epsilon - jax.lax.stop_gradient(mean_kl)))
return loss_kl, loss_alpha
def clip_mpo_params(params: MPOParams, per_dim_constraining: bool) -> MPOParams:
clipped_params = MPOParams(
log_temperature=jnp.maximum(_MIN_LOG_TEMPERATURE, params.log_temperature),
log_alpha_mean=jnp.maximum(_MIN_LOG_ALPHA, params.log_alpha_mean),
log_alpha_stddev=jnp.maximum(_MIN_LOG_ALPHA, params.log_alpha_stddev))
if not per_dim_constraining:
return clipped_params
else:
return clipped_params._replace(
log_penalty_temperature=jnp.maximum(_MIN_LOG_TEMPERATURE,
params.log_penalty_temperature))
|
acme-master
|
acme/jax/losses/mpo.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common loss functions."""
from acme.jax.losses.impala import impala_loss
from acme.jax.losses.mpo import MPO
from acme.jax.losses.mpo import MPOParams
from acme.jax.losses.mpo import MPOStats
|
acme-master
|
acme/jax/losses/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss function for IMPALA (Espeholt et al., 2018) [1].
[1] https://arxiv.org/abs/1802.01561
"""
from typing import Callable, Mapping, Tuple
from acme.agents.jax.impala import types
from acme.jax import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import reverb
import rlax
import tree
def impala_loss(
unroll_fn: types.PolicyValueFn,
*,
discount: float,
max_abs_reward: float = np.inf,
baseline_cost: float = 1.0,
entropy_cost: float = 0.0,
) -> Callable[[hk.Params, reverb.ReplaySample], jax.Array]:
"""Builds the standard entropy-regularised IMPALA loss function.
Args:
unroll_fn: A `hk.Transformed` object containing a callable which maps
(params, observations_sequence, initial_state) -> ((logits, value), state)
discount: The standard geometric discount rate to apply.
max_abs_reward: Optional symmetric reward clipping to apply.
baseline_cost: Weighting of the critic loss relative to the policy loss.
entropy_cost: Weighting of the entropy regulariser relative to policy loss.
Returns:
A loss function with signature (params, data) -> (loss_scalar, metrics).
"""
def loss_fn(
params: hk.Params,
sample: reverb.ReplaySample,
) -> Tuple[jax.Array, Mapping[str, jax.Array]]:
"""Batched, entropy-regularised actor-critic loss with V-trace."""
# Extract the data.
data = sample.data
observations, actions, rewards, discounts, extra = (data.observation,
data.action,
data.reward,
data.discount,
data.extras)
initial_state = tree.map_structure(lambda s: s[0], extra['core_state'])
behaviour_logits = extra['logits']
# Apply reward clipping.
rewards = jnp.clip(rewards, -max_abs_reward, max_abs_reward)
# Unroll current policy over observations.
(logits, values), _ = unroll_fn(params, observations, initial_state)
# Compute importance sampling weights: current policy / behavior policy.
rhos = rlax.categorical_importance_sampling_ratios(logits[:-1],
behaviour_logits[:-1],
actions[:-1])
# Critic loss.
vtrace_returns = rlax.vtrace_td_error_and_advantage(
v_tm1=values[:-1],
v_t=values[1:],
r_t=rewards[:-1],
discount_t=discounts[:-1] * discount,
rho_tm1=rhos)
critic_loss = jnp.square(vtrace_returns.errors)
# Policy gradient loss.
policy_gradient_loss = rlax.policy_gradient_loss(
logits_t=logits[:-1],
a_t=actions[:-1],
adv_t=vtrace_returns.pg_advantage,
w_t=jnp.ones_like(rewards[:-1]))
# Entropy regulariser.
entropy_loss = rlax.entropy_loss(logits[:-1], jnp.ones_like(rewards[:-1]))
# Combine weighted sum of actor & critic losses, averaged over the sequence.
mean_loss = jnp.mean(policy_gradient_loss + baseline_cost * critic_loss +
entropy_cost * entropy_loss) # []
metrics = {
'policy_loss': jnp.mean(policy_gradient_loss),
'critic_loss': jnp.mean(baseline_cost * critic_loss),
'entropy_loss': jnp.mean(entropy_cost * entropy_loss),
'entropy': jnp.mean(entropy_loss),
}
return mean_loss, metrics
return utils.mapreduce(loss_fn, in_axes=(None, 0)) # pytype: disable=bad-return-type # jax-devicearray
|
acme-master
|
acme/jax/losses/impala.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing of acme.jax.experiments functions."""
from acme.jax import experiments
from acme.tf import savers
from acme.utils import counting
def restore_counter(
checkpointing_config: experiments.CheckpointingConfig) -> counting.Counter:
"""Restores a counter from the latest checkpoint saved with this config."""
counter = counting.Counter()
savers.Checkpointer(
objects_to_save={'counter': counter},
directory=checkpointing_config.directory,
add_uid=checkpointing_config.add_uid,
max_to_keep=checkpointing_config.max_to_keep)
return counter
|
acme-master
|
acme/jax/experiments/test_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runner used for executing local offline RL agents."""
import acme
from acme import specs
from acme.jax.experiments import config
from acme.tf import savers
from acme.utils import counting
import jax
def run_offline_experiment(experiment: config.OfflineExperimentConfig,
eval_every: int = 100,
num_eval_episodes: int = 1):
"""Runs a simple, single-threaded training loop using the default evaluators.
It targets simplicity of the code and so only the basic features of the
OfflineExperimentConfig are supported.
Arguments:
experiment: Definition and configuration of the agent to run.
eval_every: After how many learner steps to perform evaluation.
num_eval_episodes: How many evaluation episodes to execute at each
evaluation step.
"""
key = jax.random.PRNGKey(experiment.seed)
# Create the environment and get its spec.
environment = experiment.environment_factory(experiment.seed)
environment_spec = experiment.environment_spec or specs.make_environment_spec(
environment)
# Create the networks and policy.
networks = experiment.network_factory(environment_spec)
# Parent counter allows to share step counts between train and eval loops and
# the learner, so that it is possible to plot for example evaluator's return
# value as a function of the number of training episodes.
parent_counter = counting.Counter(time_delta=0.)
# Create the demonstrations dataset.
dataset_key, key = jax.random.split(key)
dataset = experiment.demonstration_dataset_factory(dataset_key)
# Create the learner.
learner_key, key = jax.random.split(key)
learner = experiment.builder.make_learner(
random_key=learner_key,
networks=networks,
dataset=dataset,
logger_fn=experiment.logger_factory,
environment_spec=environment_spec,
counter=counting.Counter(parent_counter, prefix='learner', time_delta=0.))
# Define the evaluation loop.
eval_loop = None
if num_eval_episodes > 0:
# Create the evaluation actor and loop.
eval_counter = counting.Counter(
parent_counter, prefix='evaluator', time_delta=0.)
eval_logger = experiment.logger_factory('evaluator',
eval_counter.get_steps_key(), 0)
eval_key, key = jax.random.split(key)
eval_actor = experiment.builder.make_actor(
random_key=eval_key,
policy=experiment.builder.make_policy(networks, environment_spec, True),
environment_spec=environment_spec,
variable_source=learner)
eval_loop = acme.EnvironmentLoop(
environment,
eval_actor,
counter=eval_counter,
logger=eval_logger,
observers=experiment.observers)
checkpointer = None
if experiment.checkpointing is not None:
checkpointing = experiment.checkpointing
checkpointer = savers.Checkpointer(
objects_to_save={'learner': learner, 'counter': parent_counter},
time_delta_minutes=checkpointing.time_delta_minutes,
directory=checkpointing.directory,
add_uid=checkpointing.add_uid,
max_to_keep=checkpointing.max_to_keep,
keep_checkpoint_every_n_hours=checkpointing.keep_checkpoint_every_n_hours,
checkpoint_ttl_seconds=checkpointing.checkpoint_ttl_seconds,
)
max_num_learner_steps = (
experiment.max_num_learner_steps -
parent_counter.get_counts().get('learner_steps', 0))
# Run the training loop.
if eval_loop:
eval_loop.run(num_eval_episodes)
steps = 0
while steps < max_num_learner_steps:
learner_steps = min(eval_every, max_num_learner_steps - steps)
for _ in range(learner_steps):
learner.step()
if checkpointer is not None:
checkpointer.save()
if eval_loop:
eval_loop.run(num_eval_episodes)
steps += learner_steps
|
acme-master
|
acme/jax/experiments/run_offline_experiment.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX experiment config."""
import dataclasses
import datetime
from typing import Any, Callable, Dict, Generic, Iterator, Optional, Sequence
from acme import core
from acme import environment_loop
from acme import specs
from acme.agents.jax import builders
from acme.jax import types
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
from acme.utils import observers as observers_lib
from acme.utils import experiment_utils
import jax
from typing_extensions import Protocol
class MakeActorFn(Protocol, Generic[builders.Policy]):
def __call__(self, random_key: types.PRNGKey, policy: builders.Policy,
environment_spec: specs.EnvironmentSpec,
variable_source: core.VariableSource) -> core.Actor:
...
class NetworkFactory(Protocol, Generic[builders.Networks]):
def __call__(self,
environment_spec: specs.EnvironmentSpec) -> builders.Networks:
...
class DeprecatedPolicyFactory(Protocol, Generic[builders.Networks,
builders.Policy]):
def __call__(self, networks: builders.Networks) -> builders.Policy:
...
class PolicyFactory(Protocol, Generic[builders.Networks, builders.Policy]):
def __call__(self, networks: builders.Networks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool) -> builders.Policy:
...
class EvaluatorFactory(Protocol, Generic[builders.Policy]):
def __call__(self, random_key: types.PRNGKey,
variable_source: core.VariableSource, counter: counting.Counter,
make_actor_fn: MakeActorFn[builders.Policy]) -> core.Worker:
...
class SnapshotModelFactory(Protocol, Generic[builders.Networks]):
def __call__(
self, networks: builders.Networks, environment_spec: specs.EnvironmentSpec
) -> Dict[str, Callable[[core.VariableSource], types.ModelToSnapshot]]:
...
@dataclasses.dataclass(frozen=True)
class CheckpointingConfig:
"""Configuration options for checkpointing.
Attributes:
max_to_keep: Maximum number of checkpoints to keep. Unless preserved by
keep_checkpoint_every_n_hours, checkpoints will be deleted from the active
set, oldest first, until only max_to_keep checkpoints remain. Does not
apply to replay checkpointing.
directory: Where to store the checkpoints.
add_uid: Whether or not to add a unique identifier, see
`paths.get_unique_id()` for how it is generated.
time_delta_minutes: How often to save the checkpoint, in minutes.
keep_checkpoint_every_n_hours: Upon removal from the active set, a
checkpoint will be preserved if it has been at least
keep_checkpoint_every_n_hours since the last preserved checkpoint. The
default setting of None does not preserve any checkpoints in this way.
replay_checkpointing_time_delta_minutes: How frequently to write replay
checkpoints; defaults to None, which disables periodic checkpointing.
Warning! These are written asynchronously so as not to interrupt other
replay duties, however this does pose a risk of OOM since items that would
otherwise be removed are temporarily kept alive for checkpointing
purposes.
Note: Since replay buffers tend to be quite large O(100GiB), writing can
take up to 10 minutes so keep that in mind when setting this frequency.
checkpoint_ttl_seconds: TTL (time to leave) in seconds for checkpoints.
Indefinite if set to None.
"""
max_to_keep: int = 1
directory: str = '~/acme'
add_uid: bool = True
time_delta_minutes: int = 5
keep_checkpoint_every_n_hours: Optional[int] = None
replay_checkpointing_time_delta_minutes: Optional[int] = None
checkpoint_ttl_seconds: Optional[int] = int(
datetime.timedelta(days=5).total_seconds()
)
@dataclasses.dataclass(frozen=True)
class ExperimentConfig(Generic[builders.Networks, builders.Policy,
builders.Sample]):
"""Config which defines aspects of constructing an experiment.
Attributes:
builder: Builds components of an RL agent (Learner, Actor...).
network_factory: Builds networks used by the agent.
environment_factory: Returns an instance of an environment.
max_num_actor_steps: How many environment steps to perform.
seed: Seed used for agent initialization.
policy_network_factory: Policy network factory which is used actors to
perform inference.
evaluator_factories: Factories of policy evaluators. When not specified the
default evaluators are constructed using eval_policy_network_factory. Set
to an empty list to disable evaluators.
eval_policy_network_factory: Policy network factory used by evaluators.
Should be specified to use the default evaluators (when
evaluator_factories is not provided).
environment_spec: Specification of the environment. Can be specified to
reduce the number of times environment_factory is invoked (for performance
or resource usage reasons).
observers: Observers used for extending logs with custom information.
logger_factory: Loggers factory used to construct loggers for learner,
actors and evaluators.
checkpointing: Configuration options for checkpointing. If None,
checkpointing and snapshotting is disabled.
"""
# Below fields must be explicitly specified for any Agent.
builder: builders.ActorLearnerBuilder[builders.Networks, builders.Policy,
builders.Sample]
network_factory: NetworkFactory[builders.Networks]
environment_factory: types.EnvironmentFactory
max_num_actor_steps: int
seed: int
# policy_network_factory is deprecated. Use builder.make_policy to
# create the policy.
policy_network_factory: Optional[DeprecatedPolicyFactory[
builders.Networks, builders.Policy]] = None
# Fields below are optional. If you just started with Acme do not worry about
# them. You might need them later when you want to customize your RL agent.
# TODO(stanczyk): Introduce a marker for the default value (instead of None).
evaluator_factories: Optional[Sequence[EvaluatorFactory[
builders.Policy]]] = None
# eval_policy_network_factory is deprecated. Use builder.make_policy to
# create the policy.
eval_policy_network_factory: Optional[DeprecatedPolicyFactory[
builders.Networks, builders.Policy]] = None
environment_spec: Optional[specs.EnvironmentSpec] = None
observers: Sequence[observers_lib.EnvLoopObserver] = ()
logger_factory: loggers.LoggerFactory = dataclasses.field(
default_factory=experiment_utils.create_experiment_logger_factory)
checkpointing: Optional[CheckpointingConfig] = CheckpointingConfig()
# TODO(stanczyk): Make get_evaluator_factories a standalone function.
def get_evaluator_factories(self):
"""Constructs the evaluator factories."""
if self.evaluator_factories is not None:
return self.evaluator_factories
def eval_policy_factory(networks: builders.Networks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool) -> builders.Policy:
del evaluation
# The config factory has precedence until all agents are migrated to use
# builder.make_policy
if self.eval_policy_network_factory is not None:
return self.eval_policy_network_factory(networks)
else:
return self.builder.make_policy(
networks=networks,
environment_spec=environment_spec,
evaluation=True)
return [
default_evaluator_factory(
environment_factory=self.environment_factory,
network_factory=self.network_factory,
policy_factory=eval_policy_factory,
logger_factory=self.logger_factory,
observers=self.observers)
]
@dataclasses.dataclass
class OfflineExperimentConfig(Generic[builders.Networks, builders.Policy,
builders.Sample]):
"""Config which defines aspects of constructing an offline RL experiment.
This class is similar to the ExperimentConfig, but is tailored to offline RL
setting, so it excludes attributes related to training via interaction with
the environment (max_num_actor_steps, policy_network_factory) and instead
includes attributes specific to learning from demonstration.
Attributes:
builder: Builds components of an offline RL agent (Learner and Evaluator).
network_factory: Builds networks used by the agent.
demonstration_dataset_factory: Function that returns an iterator over
demonstrations.
environment_spec: Specification of the environment.
max_num_learner_steps: How many learner steps to perform.
seed: Seed used for agent initialization.
evaluator_factories: Factories of policy evaluators. When not specified the
default evaluators are constructed using eval_policy_network_factory. Set
to an empty list to disable evaluators.
eval_policy_factory: Policy factory used by evaluators. Should be specified
to use the default evaluators (when evaluator_factories is not provided).
environment_factory: Returns an instance of an environment to be used for
evaluation. Should be specified to use the default evaluators (when
evaluator_factories is not provided).
observers: Observers used for extending logs with custom information.
logger_factory: Loggers factory used to construct loggers for learner,
actors and evaluators.
checkpointing: Configuration options for checkpointing. If None,
checkpointing and snapshotting is disabled.
"""
# Below fields must be explicitly specified for any Agent.
builder: builders.OfflineBuilder[builders.Networks, builders.Policy,
builders.Sample]
network_factory: Callable[[specs.EnvironmentSpec], builders.Networks]
demonstration_dataset_factory: Callable[[types.PRNGKey],
Iterator[builders.Sample]]
environment_factory: types.EnvironmentFactory
max_num_learner_steps: int
seed: int
# Fields below are optional. If you just started with Acme do not worry about
# them. You might need them later when you want to customize your RL agent.
# TODO(stanczyk): Introduce a marker for the default value (instead of None).
evaluator_factories: Optional[Sequence[EvaluatorFactory]] = None
environment_spec: Optional[specs.EnvironmentSpec] = None
observers: Sequence[observers_lib.EnvLoopObserver] = ()
logger_factory: loggers.LoggerFactory = dataclasses.field(
default_factory=experiment_utils.create_experiment_logger_factory)
checkpointing: Optional[CheckpointingConfig] = CheckpointingConfig()
# TODO(stanczyk): Make get_evaluator_factories a standalone function.
def get_evaluator_factories(self):
"""Constructs the evaluator factories."""
if self.evaluator_factories is not None:
return self.evaluator_factories
if self.environment_factory is None:
raise ValueError(
'You need to set `environment_factory` in `OfflineExperimentConfig` '
'when `evaluator_factories` are not specified. To disable evaluation '
'altogether just set `evaluator_factories = []`')
return [
default_evaluator_factory(
environment_factory=self.environment_factory,
network_factory=self.network_factory,
policy_factory=self.builder.make_policy,
logger_factory=self.logger_factory,
observers=self.observers)
]
def default_evaluator_factory(
environment_factory: types.EnvironmentFactory,
network_factory: NetworkFactory[builders.Networks],
policy_factory: PolicyFactory[builders.Networks, builders.Policy],
logger_factory: loggers.LoggerFactory,
observers: Sequence[observers_lib.EnvLoopObserver] = (),
) -> EvaluatorFactory[builders.Policy]:
"""Returns a default evaluator process."""
def evaluator(
random_key: types.PRNGKey,
variable_source: core.VariableSource,
counter: counting.Counter,
make_actor: MakeActorFn[builders.Policy],
):
"""The evaluation process."""
# Create environment and evaluator networks
environment_key, actor_key = jax.random.split(random_key)
# Environments normally require uint32 as a seed.
environment = environment_factory(utils.sample_uint32(environment_key))
environment_spec = specs.make_environment_spec(environment)
networks = network_factory(environment_spec)
policy = policy_factory(networks, environment_spec, True)
actor = make_actor(actor_key, policy, environment_spec, variable_source)
# Create logger and counter.
counter = counting.Counter(counter, 'evaluator')
logger = logger_factory('evaluator', 'actor_steps', 0)
# Create the run loop and return it.
return environment_loop.EnvironmentLoop(
environment, actor, counter, logger, observers=observers)
return evaluator
def make_policy(experiment: ExperimentConfig[builders.Networks, builders.Policy,
Any], networks: builders.Networks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool) -> builders.Policy:
"""Constructs a policy. It is only meant to be used internally."""
# TODO(sabela): remove and update callers once all agents use
# builder.make_policy
if not evaluation and experiment.policy_network_factory:
return experiment.policy_network_factory(networks)
if evaluation and experiment.eval_policy_network_factory:
return experiment.eval_policy_network_factory(networks)
return experiment.builder.make_policy(
networks=networks,
environment_spec=environment_spec,
evaluation=evaluation)
|
acme-master
|
acme/jax/experiments/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Program definition for a distributed layout for an offline RL experiment."""
from typing import Any, Optional
from acme import core
from acme import specs
from acme.agents.jax import builders
from acme.jax import networks as networks_lib
from acme.jax import savers
from acme.jax import utils
from acme.jax.experiments import config
from acme.jax import snapshotter
from acme.utils import counting
from acme.utils import lp_utils
import jax
import launchpad as lp
def make_distributed_offline_experiment(
experiment: config.OfflineExperimentConfig[builders.Networks, Any, Any],
*,
make_snapshot_models: Optional[config.SnapshotModelFactory[
builders.Networks]] = None,
name: str = 'agent',
program: Optional[lp.Program] = None) -> lp.Program:
"""Builds a Launchpad program for running the experiment.
Args:
experiment: configuration for the experiment.
make_snapshot_models: a factory that defines what is saved in snapshots.
name: name of the constructed program. Ignored if an existing program is
passed.
program: a program where agent nodes are added to. If None, a new program is
created.
Returns:
The Launchpad program with all the nodes needed for running the experiment.
"""
def build_model_saver(variable_source: core.VariableSource):
assert experiment.checkpointing
environment = experiment.environment_factory(0)
spec = specs.make_environment_spec(environment)
networks = experiment.network_factory(spec)
models = make_snapshot_models(networks, spec)
# TODO(raveman): Decouple checkpointing and snahpshotting configs.
return snapshotter.JAXSnapshotter(
variable_source=variable_source,
models=models,
path=experiment.checkpointing.directory,
add_uid=experiment.checkpointing.add_uid)
def build_counter():
counter = counting.Counter()
if experiment.checkpointing:
counter = savers.CheckpointingRunner(
counter,
key='counter',
subdirectory='counter',
time_delta_minutes=experiment.checkpointing.time_delta_minutes,
directory=experiment.checkpointing.directory,
add_uid=experiment.checkpointing.add_uid,
max_to_keep=experiment.checkpointing.max_to_keep,
checkpoint_ttl_seconds=experiment.checkpointing.checkpoint_ttl_seconds,
)
return counter
def build_learner(
random_key: networks_lib.PRNGKey,
counter: Optional[counting.Counter] = None,
):
"""The Learning part of the agent."""
dummy_seed = 1
spec = (
experiment.environment_spec or
specs.make_environment_spec(experiment.environment_factory(dummy_seed)))
# Creates the networks to optimize (online) and target networks.
networks = experiment.network_factory(spec)
dataset_key, random_key = jax.random.split(random_key)
iterator = experiment.demonstration_dataset_factory(dataset_key)
# make_demonstrations is responsible for putting data onto appropriate
# training devices, so here we apply prefetch, so that data is copied over
# in the background.
iterator = utils.prefetch(iterable=iterator, buffer_size=1)
counter = counting.Counter(counter, 'learner')
learner = experiment.builder.make_learner(
random_key=random_key,
networks=networks,
dataset=iterator,
logger_fn=experiment.logger_factory,
environment_spec=spec,
counter=counter)
if experiment.checkpointing:
learner = savers.CheckpointingRunner(
learner,
key='learner',
subdirectory='learner',
time_delta_minutes=5,
directory=experiment.checkpointing.directory,
add_uid=experiment.checkpointing.add_uid,
max_to_keep=experiment.checkpointing.max_to_keep,
checkpoint_ttl_seconds=experiment.checkpointing.checkpoint_ttl_seconds,
)
return learner
if not program:
program = lp.Program(name=name)
key = jax.random.PRNGKey(experiment.seed)
counter = program.add_node(lp.CourierNode(build_counter), label='counter')
if experiment.max_num_learner_steps is not None:
program.add_node(
lp.CourierNode(
lp_utils.StepsLimiter,
counter,
experiment.max_num_learner_steps,
steps_key='learner_steps'),
label='counter')
learner_key, key = jax.random.split(key)
learner_node = lp.CourierNode(build_learner, learner_key, counter)
learner = learner_node.create_handle()
program.add_node(learner_node, label='learner')
for evaluator in experiment.get_evaluator_factories():
evaluator_key, key = jax.random.split(key)
program.add_node(
lp.CourierNode(evaluator, evaluator_key, learner, counter,
experiment.builder.make_actor),
label='evaluator')
if make_snapshot_models and experiment.checkpointing:
program.add_node(lp.CourierNode(build_model_saver, learner),
label='model_saver')
return program
|
acme-master
|
acme/jax/experiments/make_distributed_offline_experiment.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX experiment utils."""
from acme.jax.experiments.config import CheckpointingConfig
from acme.jax.experiments.config import default_evaluator_factory
from acme.jax.experiments.config import DeprecatedPolicyFactory
from acme.jax.experiments.config import EvaluatorFactory
from acme.jax.experiments.config import ExperimentConfig
from acme.jax.experiments.config import make_policy
from acme.jax.experiments.config import MakeActorFn
from acme.jax.experiments.config import NetworkFactory
from acme.jax.experiments.config import OfflineExperimentConfig
from acme.jax.experiments.config import PolicyFactory
from acme.jax.experiments.config import SnapshotModelFactory
from acme.jax.experiments.make_distributed_experiment import make_distributed_experiment
from acme.jax.experiments.make_distributed_offline_experiment import make_distributed_offline_experiment
from acme.jax.experiments.run_experiment import run_experiment
from acme.jax.experiments.run_offline_experiment import run_offline_experiment
|
acme-master
|
acme/jax/experiments/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the run_experiment function."""
from acme.agents.jax import sac
from acme.jax import experiments
from acme.jax.experiments import test_utils as experiment_test_utils
from acme.testing import fakes
from acme.testing import test_utils
import dm_env
from absl.testing import absltest
from absl.testing import parameterized
class RunExperimentTest(test_utils.TestCase):
@parameterized.named_parameters(
dict(testcase_name='noeval', num_eval_episodes=0),
dict(testcase_name='eval', num_eval_episodes=1))
def test_checkpointing(self, num_eval_episodes: int):
num_train_steps = 100
experiment_config = self._get_experiment_config(
num_train_steps=num_train_steps)
experiments.run_experiment(
experiment_config, eval_every=10, num_eval_episodes=num_eval_episodes)
checkpoint_counter = experiment_test_utils.restore_counter(
experiment_config.checkpointing)
self.assertIn('actor_steps', checkpoint_counter.get_counts())
self.assertGreater(checkpoint_counter.get_counts()['actor_steps'], 0)
# Run the second experiment with the same checkpointing config to verify
# that it restores from the latest saved checkpoint.
experiments.run_experiment(
experiment_config, eval_every=50, num_eval_episodes=num_eval_episodes)
checkpoint_counter = experiment_test_utils.restore_counter(
experiment_config.checkpointing)
self.assertIn('actor_steps', checkpoint_counter.get_counts())
# Verify that the steps done in the first run are taken into account.
self.assertLessEqual(checkpoint_counter.get_counts()['actor_steps'],
num_train_steps)
def test_eval_every(self):
num_train_steps = 100
experiment_config = self._get_experiment_config(
num_train_steps=num_train_steps)
experiments.run_experiment(
experiment_config, eval_every=70, num_eval_episodes=1)
checkpoint_counter = experiment_test_utils.restore_counter(
experiment_config.checkpointing)
self.assertIn('actor_steps', checkpoint_counter.get_counts())
self.assertGreater(checkpoint_counter.get_counts()['actor_steps'], 0)
self.assertLessEqual(checkpoint_counter.get_counts()['actor_steps'],
num_train_steps)
def _get_experiment_config(
self, *, num_train_steps: int) -> experiments.ExperimentConfig:
"""Returns a config for a test experiment with the given number of steps."""
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return fakes.ContinuousEnvironment(
episode_length=10, action_dim=3, observation_dim=5)
num_train_steps = 100
sac_config = sac.SACConfig()
checkpointing_config = experiments.CheckpointingConfig(
directory=self.get_tempdir(), time_delta_minutes=0)
return experiments.ExperimentConfig(
builder=sac.SACBuilder(sac_config),
environment_factory=environment_factory,
network_factory=sac.make_networks,
seed=0,
max_num_actor_steps=num_train_steps,
checkpointing=checkpointing_config)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/jax/experiments/run_experiment_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Program definition for a distributed layout based on a builder."""
import itertools
import math
from typing import Any, List, Optional
from acme import core
from acme import environment_loop
from acme import specs
from acme.agents.jax import actor_core
from acme.agents.jax import builders
from acme.jax import inference_server as inference_server_lib
from acme.jax import networks as networks_lib
from acme.jax import savers
from acme.jax import utils
from acme.jax import variable_utils
from acme.jax.experiments import config
from acme.jax import snapshotter
from acme.utils import counting
from acme.utils import lp_utils
import jax
import launchpad as lp
import reverb
ActorId = int
InferenceServer = inference_server_lib.InferenceServer[
actor_core.SelectActionFn]
def make_distributed_experiment(
experiment: config.ExperimentConfig[builders.Networks, Any, Any],
num_actors: int,
*,
inference_server_config: Optional[
inference_server_lib.InferenceServerConfig
] = None,
num_learner_nodes: int = 1,
num_actors_per_node: int = 1,
num_inference_servers: int = 1,
multiprocessing_colocate_actors: bool = False,
multithreading_colocate_learner_and_reverb: bool = False,
make_snapshot_models: Optional[
config.SnapshotModelFactory[builders.Networks]
] = None,
name: str = 'agent',
program: Optional[lp.Program] = None,
) -> lp.Program:
"""Builds a Launchpad program for running the experiment.
Args:
experiment: configuration of the experiment.
num_actors: number of actors to run.
inference_server_config: If provided we will attempt to use
`num_inference_servers` inference servers for selecting actions.
There are two assumptions if this config is provided:
1) The experiment's policy is an `ActorCore` and a
`TypeError` will be raised if not.
2) The `ActorCore`'s `select_action` method runs on
unbatched observations.
num_learner_nodes: number of learner nodes to run. When using multiple
learner nodes, make sure the learner class does the appropriate pmap/pmean
operations on the loss/gradients, respectively.
num_actors_per_node: number of actors per one program node. Actors within
one node are colocated in one or multiple processes depending on the value
of multiprocessing_colocate_actors.
num_inference_servers: number of inference servers to serve actors. (Only
used if `inference_server_config` is provided.)
multiprocessing_colocate_actors: whether to colocate actor nodes as
subprocesses on a single machine. False by default, which means colocate
within a single process.
multithreading_colocate_learner_and_reverb: whether to colocate the learner
and reverb nodes in one process. Not supported if the learner is spread
across multiple nodes (num_learner_nodes > 1). False by default, which
means no colocation.
make_snapshot_models: a factory that defines what is saved in snapshots.
name: name of the constructed program. Ignored if an existing program is
passed.
program: a program where agent nodes are added to. If None, a new program is
created.
Returns:
The Launchpad program with all the nodes needed for running the experiment.
"""
if multithreading_colocate_learner_and_reverb and num_learner_nodes > 1:
raise ValueError(
'Replay and learner colocation is not yet supported when the learner is'
' spread across multiple nodes (num_learner_nodes > 1). Please contact'
' Acme devs if this is a feature you want. Got:'
'\tmultithreading_colocate_learner_and_reverb='
f'{multithreading_colocate_learner_and_reverb}'
f'\tnum_learner_nodes={num_learner_nodes}.')
def build_replay():
"""The replay storage."""
dummy_seed = 1
spec = (
experiment.environment_spec or
specs.make_environment_spec(experiment.environment_factory(dummy_seed)))
network = experiment.network_factory(spec)
policy = config.make_policy(
experiment=experiment,
networks=network,
environment_spec=spec,
evaluation=False)
return experiment.builder.make_replay_tables(spec, policy)
def build_model_saver(variable_source: core.VariableSource):
assert experiment.checkpointing
environment = experiment.environment_factory(0)
spec = specs.make_environment_spec(environment)
networks = experiment.network_factory(spec)
models = make_snapshot_models(networks, spec)
# TODO(raveman): Decouple checkpointing and snapshotting configs.
return snapshotter.JAXSnapshotter(
variable_source=variable_source,
models=models,
path=experiment.checkpointing.directory,
subdirectory='snapshots',
add_uid=experiment.checkpointing.add_uid)
def build_counter():
counter = counting.Counter()
if experiment.checkpointing:
checkpointing = experiment.checkpointing
counter = savers.CheckpointingRunner(
counter,
key='counter',
subdirectory='counter',
time_delta_minutes=checkpointing.time_delta_minutes,
directory=checkpointing.directory,
add_uid=checkpointing.add_uid,
max_to_keep=checkpointing.max_to_keep,
keep_checkpoint_every_n_hours=checkpointing.keep_checkpoint_every_n_hours,
checkpoint_ttl_seconds=checkpointing.checkpoint_ttl_seconds,
)
return counter
def build_learner(
random_key: networks_lib.PRNGKey,
replay: reverb.Client,
counter: Optional[counting.Counter] = None,
primary_learner: Optional[core.Learner] = None,
):
"""The Learning part of the agent."""
dummy_seed = 1
spec = (
experiment.environment_spec or
specs.make_environment_spec(experiment.environment_factory(dummy_seed)))
# Creates the networks to optimize (online) and target networks.
networks = experiment.network_factory(spec)
iterator = experiment.builder.make_dataset_iterator(replay)
# make_dataset_iterator is responsible for putting data onto appropriate
# training devices, so here we apply prefetch, so that data is copied over
# in the background.
iterator = utils.prefetch(iterable=iterator, buffer_size=1)
counter = counting.Counter(counter, 'learner')
learner = experiment.builder.make_learner(random_key, networks, iterator,
experiment.logger_factory, spec,
replay, counter)
if experiment.checkpointing:
if primary_learner is None:
checkpointing = experiment.checkpointing
learner = savers.CheckpointingRunner(
learner,
key='learner',
subdirectory='learner',
time_delta_minutes=5,
directory=checkpointing.directory,
add_uid=checkpointing.add_uid,
max_to_keep=checkpointing.max_to_keep,
keep_checkpoint_every_n_hours=checkpointing.keep_checkpoint_every_n_hours,
checkpoint_ttl_seconds=checkpointing.checkpoint_ttl_seconds,
)
else:
learner.restore(primary_learner.save())
# NOTE: This initially synchronizes secondary learner states with the
# primary one. Further synchronization should be handled by the learner
# properly doing a pmap/pmean on the loss/gradients, respectively.
return learner
def build_inference_server(
inference_server_config: inference_server_lib.InferenceServerConfig,
variable_source: core.VariableSource,
) -> InferenceServer:
"""Builds an inference server for `ActorCore` policies."""
dummy_seed = 1
spec = (
experiment.environment_spec or
specs.make_environment_spec(experiment.environment_factory(dummy_seed)))
networks = experiment.network_factory(spec)
policy = config.make_policy(
experiment=experiment,
networks=networks,
environment_spec=spec,
evaluation=False,
)
if not isinstance(policy, actor_core.ActorCore):
raise TypeError(
f'Using InferenceServer with policy of unsupported type:'
f'{type(policy)}. InferenceServer only supports `ActorCore` policies.'
)
return InferenceServer(
handler=jax.jit(
jax.vmap(
policy.select_action,
in_axes=(None, 0, 0),
# Note on in_axes: Params will not be batched. Only the
# observations and actor state will be stacked along a new
# leading axis by the inference server.
),),
variable_source=variable_source,
devices=jax.local_devices(),
config=inference_server_config,
)
def build_actor(
random_key: networks_lib.PRNGKey,
replay: reverb.Client,
variable_source: core.VariableSource,
counter: counting.Counter,
actor_id: ActorId,
inference_server: Optional[InferenceServer],
) -> environment_loop.EnvironmentLoop:
"""The actor process."""
environment_key, actor_key = jax.random.split(random_key)
# Create environment and policy core.
# Environments normally require uint32 as a seed.
environment = experiment.environment_factory(
utils.sample_uint32(environment_key))
environment_spec = specs.make_environment_spec(environment)
networks = experiment.network_factory(environment_spec)
policy_network = config.make_policy(
experiment=experiment,
networks=networks,
environment_spec=environment_spec,
evaluation=False)
if inference_server is not None:
policy_network = actor_core.ActorCore(
init=policy_network.init,
select_action=inference_server.handler,
get_extras=policy_network.get_extras,
)
variable_source = variable_utils.ReferenceVariableSource()
adder = experiment.builder.make_adder(replay, environment_spec,
policy_network)
actor = experiment.builder.make_actor(actor_key, policy_network,
environment_spec, variable_source,
adder)
# Create logger and counter.
counter = counting.Counter(counter, 'actor')
logger = experiment.logger_factory('actor', counter.get_steps_key(),
actor_id)
# Create the loop to connect environment and agent.
return environment_loop.EnvironmentLoop(
environment, actor, counter, logger, observers=experiment.observers)
if not program:
program = lp.Program(name=name)
key = jax.random.PRNGKey(experiment.seed)
checkpoint_time_delta_minutes: Optional[int] = (
experiment.checkpointing.replay_checkpointing_time_delta_minutes
if experiment.checkpointing else None)
replay_node = lp.ReverbNode(
build_replay, checkpoint_time_delta_minutes=checkpoint_time_delta_minutes)
replay = replay_node.create_handle()
counter = program.add_node(lp.CourierNode(build_counter), label='counter')
if experiment.max_num_actor_steps is not None:
program.add_node(
lp.CourierNode(lp_utils.StepsLimiter, counter,
experiment.max_num_actor_steps),
label='counter')
learner_key, key = jax.random.split(key)
learner_node = lp.CourierNode(build_learner, learner_key, replay, counter)
learner = learner_node.create_handle()
variable_sources = [learner]
if multithreading_colocate_learner_and_reverb:
program.add_node(
lp.MultiThreadingColocation([learner_node, replay_node]),
label='learner')
else:
program.add_node(replay_node, label='replay')
with program.group('learner'):
program.add_node(learner_node)
# Maybe create secondary learners, necessary when using multi-host
# accelerators.
# Warning! If you set num_learner_nodes > 1, make sure the learner class
# does the appropriate pmap/pmean operations on the loss/gradients,
# respectively.
for _ in range(1, num_learner_nodes):
learner_key, key = jax.random.split(key)
variable_sources.append(
program.add_node(
lp.CourierNode(
build_learner, learner_key, replay,
primary_learner=learner)))
# NOTE: Secondary learners are used to load-balance get_variables calls,
# which is why they get added to the list of available variable sources.
# NOTE: Only the primary learner checkpoints.
# NOTE: Do not pass the counter to the secondary learners to avoid
# double counting of learner steps.
if inference_server_config is not None:
num_actors_per_server = math.ceil(num_actors / num_inference_servers)
with program.group('inference_server'):
inference_nodes = []
for _ in range(num_inference_servers):
inference_nodes.append(
program.add_node(
lp.CourierNode(
build_inference_server,
inference_server_config,
learner,
courier_kwargs={'thread_pool_size': num_actors_per_server
})))
else:
num_inference_servers = 1
inference_nodes = [None]
num_actor_nodes, remainder = divmod(num_actors, num_actors_per_node)
num_actor_nodes += int(remainder > 0)
with program.group('actor'):
# Create all actor threads.
*actor_keys, key = jax.random.split(key, num_actors + 1)
# Create (maybe colocated) actor nodes.
for node_id, variable_source, inference_node in zip(
range(num_actor_nodes),
itertools.cycle(variable_sources),
itertools.cycle(inference_nodes),
):
colocation_nodes = []
first_actor_id = node_id * num_actors_per_node
for actor_id in range(
first_actor_id, min(first_actor_id + num_actors_per_node, num_actors)
):
actor = lp.CourierNode(
build_actor,
actor_keys[actor_id],
replay,
variable_source,
counter,
actor_id,
inference_node,
)
colocation_nodes.append(actor)
if len(colocation_nodes) == 1:
program.add_node(colocation_nodes[0])
elif multiprocessing_colocate_actors:
program.add_node(lp.MultiProcessingColocation(colocation_nodes))
else:
program.add_node(lp.MultiThreadingColocation(colocation_nodes))
for evaluator in experiment.get_evaluator_factories():
evaluator_key, key = jax.random.split(key)
program.add_node(
lp.CourierNode(evaluator, evaluator_key, learner, counter,
experiment.builder.make_actor),
label='evaluator')
if make_snapshot_models and experiment.checkpointing:
program.add_node(
lp.CourierNode(build_model_saver, learner), label='model_saver')
return program
|
acme-master
|
acme/jax/experiments/make_distributed_experiment.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runners used for executing local agents."""
import sys
import time
from typing import Optional, Sequence, Tuple
import acme
from acme import core
from acme import specs
from acme import types
from acme.jax import utils
from acme.jax.experiments import config
from acme.tf import savers
from acme.utils import counting
import dm_env
import jax
import reverb
def run_experiment(experiment: config.ExperimentConfig,
eval_every: int = 100,
num_eval_episodes: int = 1):
"""Runs a simple, single-threaded training loop using the default evaluators.
It targets simplicity of the code and so only the basic features of the
ExperimentConfig are supported.
Arguments:
experiment: Definition and configuration of the agent to run.
eval_every: After how many actor steps to perform evaluation.
num_eval_episodes: How many evaluation episodes to execute at each
evaluation step.
"""
key = jax.random.PRNGKey(experiment.seed)
# Create the environment and get its spec.
environment = experiment.environment_factory(experiment.seed)
environment_spec = experiment.environment_spec or specs.make_environment_spec(
environment)
# Create the networks and policy.
networks = experiment.network_factory(environment_spec)
policy = config.make_policy(
experiment=experiment,
networks=networks,
environment_spec=environment_spec,
evaluation=False)
# Create the replay server and grab its address.
replay_tables = experiment.builder.make_replay_tables(environment_spec,
policy)
# Disable blocking of inserts by tables' rate limiters, as this function
# executes learning (sampling from the table) and data generation
# (inserting into the table) sequentially from the same thread
# which could result in blocked insert making the algorithm hang.
replay_tables, rate_limiters_max_diff = _disable_insert_blocking(
replay_tables)
replay_server = reverb.Server(replay_tables, port=None)
replay_client = reverb.Client(f'localhost:{replay_server.port}')
# Parent counter allows to share step counts between train and eval loops and
# the learner, so that it is possible to plot for example evaluator's return
# value as a function of the number of training episodes.
parent_counter = counting.Counter(time_delta=0.)
dataset = experiment.builder.make_dataset_iterator(replay_client)
# We always use prefetch as it provides an iterator with an additional
# 'ready' method.
dataset = utils.prefetch(dataset, buffer_size=1)
# Create actor, adder, and learner for generating, storing, and consuming
# data respectively.
# NOTE: These are created in reverse order as the actor needs to be given the
# adder and the learner (as a source of variables).
learner_key, key = jax.random.split(key)
learner = experiment.builder.make_learner(
random_key=learner_key,
networks=networks,
dataset=dataset,
logger_fn=experiment.logger_factory,
environment_spec=environment_spec,
replay_client=replay_client,
counter=counting.Counter(parent_counter, prefix='learner', time_delta=0.))
adder = experiment.builder.make_adder(replay_client, environment_spec, policy)
actor_key, key = jax.random.split(key)
actor = experiment.builder.make_actor(
actor_key, policy, environment_spec, variable_source=learner, adder=adder)
# Create the environment loop used for training.
train_counter = counting.Counter(
parent_counter, prefix='actor', time_delta=0.)
train_logger = experiment.logger_factory('actor',
train_counter.get_steps_key(), 0)
checkpointer = None
if experiment.checkpointing is not None:
checkpointing = experiment.checkpointing
checkpointer = savers.Checkpointer(
objects_to_save={'learner': learner, 'counter': parent_counter},
time_delta_minutes=checkpointing.time_delta_minutes,
directory=checkpointing.directory,
add_uid=checkpointing.add_uid,
max_to_keep=checkpointing.max_to_keep,
keep_checkpoint_every_n_hours=checkpointing.keep_checkpoint_every_n_hours,
checkpoint_ttl_seconds=checkpointing.checkpoint_ttl_seconds,
)
# Replace the actor with a LearningActor. This makes sure that every time
# that `update` is called on the actor it checks to see whether there is
# any new data to learn from and if so it runs a learner step. The rate
# at which new data is released is controlled by the replay table's
# rate_limiter which is created by the builder.make_replay_tables call above.
actor = _LearningActor(actor, learner, dataset, replay_tables,
rate_limiters_max_diff, checkpointer)
train_loop = acme.EnvironmentLoop(
environment,
actor,
counter=train_counter,
logger=train_logger,
observers=experiment.observers)
max_num_actor_steps = (
experiment.max_num_actor_steps -
parent_counter.get_counts().get(train_counter.get_steps_key(), 0))
if num_eval_episodes == 0:
# No evaluation. Just run the training loop.
train_loop.run(num_steps=max_num_actor_steps)
return
# Create the evaluation actor and loop.
eval_counter = counting.Counter(
parent_counter, prefix='evaluator', time_delta=0.)
eval_logger = experiment.logger_factory('evaluator',
eval_counter.get_steps_key(), 0)
eval_policy = config.make_policy(
experiment=experiment,
networks=networks,
environment_spec=environment_spec,
evaluation=True)
eval_actor = experiment.builder.make_actor(
random_key=jax.random.PRNGKey(experiment.seed),
policy=eval_policy,
environment_spec=environment_spec,
variable_source=learner)
eval_loop = acme.EnvironmentLoop(
environment,
eval_actor,
counter=eval_counter,
logger=eval_logger,
observers=experiment.observers)
steps = 0
while steps < max_num_actor_steps:
eval_loop.run(num_episodes=num_eval_episodes)
num_steps = min(eval_every, max_num_actor_steps - steps)
steps += train_loop.run(num_steps=num_steps)
eval_loop.run(num_episodes=num_eval_episodes)
environment.close()
class _LearningActor(core.Actor):
"""Actor which learns (updates its parameters) when `update` is called.
This combines a base actor and a learner. Whenever `update` is called
on the wrapping actor the learner will take a step (e.g. one step of gradient
descent) as long as there is data available for training
(provided iterator and replay_tables are used to check for that).
Selecting actions and making observations are handled by the base actor.
Intended to be used by the `run_experiment` only.
"""
def __init__(self, actor: core.Actor, learner: core.Learner,
iterator: core.PrefetchingIterator,
replay_tables: Sequence[reverb.Table],
sample_sizes: Sequence[int],
checkpointer: Optional[savers.Checkpointer]):
"""Initializes _LearningActor.
Args:
actor: Actor to be wrapped.
learner: Learner on which step() is to be called when there is data.
iterator: Iterator used by the Learner to fetch training data.
replay_tables: Collection of tables from which Learner fetches data
through the iterator.
sample_sizes: For each table from `replay_tables`, how many elements the
table should have available for sampling to wait for the `iterator` to
prefetch a batch of data. Otherwise more experience needs to be
collected by the actor.
checkpointer: Checkpointer to save the state on update.
"""
self._actor = actor
self._learner = learner
self._iterator = iterator
self._replay_tables = replay_tables
self._sample_sizes = sample_sizes
self._learner_steps = 0
self._checkpointer = checkpointer
def select_action(self, observation: types.NestedArray) -> types.NestedArray:
return self._actor.select_action(observation)
def observe_first(self, timestep: dm_env.TimeStep):
self._actor.observe_first(timestep)
def observe(self, action: types.NestedArray, next_timestep: dm_env.TimeStep):
self._actor.observe(action, next_timestep)
def _maybe_train(self):
trained = False
while True:
if self._iterator.ready():
self._learner.step()
batches = self._iterator.retrieved_elements() - self._learner_steps
self._learner_steps += 1
assert batches == 1, (
'Learner step must retrieve exactly one element from the iterator'
f' (retrieved {batches}). Otherwise agent can deadlock. Example '
'cause is that your chosen agent'
's Builder has a `make_learner` '
'factory that prefetches the data but it shouldn'
't.')
trained = True
else:
# Wait for the iterator to fetch more data from the table(s) only
# if there plenty of data to sample from each table.
for table, sample_size in zip(self._replay_tables, self._sample_sizes):
if not table.can_sample(sample_size):
return trained
# Let iterator's prefetching thread get data from the table(s).
time.sleep(0.001)
def update(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
if self._maybe_train():
# Update the actor weights only when learner was updated.
self._actor.update()
if self._checkpointer:
self._checkpointer.save()
def _disable_insert_blocking(
tables: Sequence[reverb.Table]
) -> Tuple[Sequence[reverb.Table], Sequence[int]]:
"""Disables blocking of insert operations for a given collection of tables."""
modified_tables = []
sample_sizes = []
for table in tables:
rate_limiter_info = table.info.rate_limiter_info
rate_limiter = reverb.rate_limiters.RateLimiter(
samples_per_insert=rate_limiter_info.samples_per_insert,
min_size_to_sample=rate_limiter_info.min_size_to_sample,
min_diff=rate_limiter_info.min_diff,
max_diff=sys.float_info.max)
modified_tables.append(table.replace(rate_limiter=rate_limiter))
# Target the middle of the rate limiter's insert-sample balance window.
sample_sizes.append(
max(1, int(
(rate_limiter_info.max_diff - rate_limiter_info.min_diff) / 2)))
return modified_tables, sample_sizes
|
acme-master
|
acme/jax/experiments/run_experiment.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the run_offline_experiment function."""
from typing import Iterator
from acme import specs
from acme import types
from acme.agents.jax import crr
from acme.jax import experiments
from acme.jax import types as jax_types
from acme.jax.experiments import test_utils as experiment_test_utils
from acme.testing import fakes
from acme.testing import test_utils
import dm_env
from absl.testing import absltest
from absl.testing import parameterized
class RunOfflineExperimentTest(test_utils.TestCase):
@parameterized.named_parameters(
dict(testcase_name='noeval', num_eval_episodes=0),
dict(testcase_name='eval', num_eval_episodes=1))
def test_checkpointing(self, num_eval_episodes: int):
num_learner_steps = 100
experiment_config = self._get_experiment_config(
num_learner_steps=num_learner_steps)
experiments.run_offline_experiment(
experiment_config, num_eval_episodes=num_eval_episodes)
checkpoint_counter = experiment_test_utils.restore_counter(
experiment_config.checkpointing)
self.assertIn('learner_steps', checkpoint_counter.get_counts())
self.assertGreater(checkpoint_counter.get_counts()['learner_steps'], 0)
# Run the second experiment with the same checkpointing config to verify
# that it restores from the latest saved checkpoint.
experiments.run_offline_experiment(
experiment_config, num_eval_episodes=num_eval_episodes)
checkpoint_counter = experiment_test_utils.restore_counter(
experiment_config.checkpointing)
self.assertIn('learner_steps', checkpoint_counter.get_counts())
# Verify that the steps done in the first run are taken into account.
self.assertLessEqual(checkpoint_counter.get_counts()['learner_steps'],
num_learner_steps)
def test_eval_every(self):
num_learner_steps = 100
experiment_config = self._get_experiment_config(
num_learner_steps=num_learner_steps)
experiments.run_offline_experiment(
experiment_config, eval_every=70, num_eval_episodes=1)
checkpoint_counter = experiment_test_utils.restore_counter(
experiment_config.checkpointing)
self.assertIn('learner_steps', checkpoint_counter.get_counts())
self.assertGreater(checkpoint_counter.get_counts()['learner_steps'], 0)
self.assertLessEqual(checkpoint_counter.get_counts()['learner_steps'],
num_learner_steps)
def _get_experiment_config(
self, *, num_learner_steps: int) -> experiments.OfflineExperimentConfig:
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return fakes.ContinuousEnvironment(
episode_length=10, action_dim=3, observation_dim=5)
environment = environment_factory(seed=1)
environment_spec = specs.make_environment_spec(environment)
def demonstration_dataset_factory(
random_key: jax_types.PRNGKey) -> Iterator[types.Transition]:
del random_key
batch_size = 64
return fakes.transition_iterator_from_spec(environment_spec)(batch_size)
crr_config = crr.CRRConfig()
crr_builder = crr.CRRBuilder(
crr_config, policy_loss_coeff_fn=crr.policy_loss_coeff_advantage_exp)
checkpointing_config = experiments.CheckpointingConfig(
directory=self.get_tempdir(), time_delta_minutes=0)
return experiments.OfflineExperimentConfig(
builder=crr_builder,
network_factory=crr.make_networks,
demonstration_dataset_factory=demonstration_dataset_factory,
environment_factory=environment_factory,
max_num_learner_steps=num_learner_steps,
seed=0,
environment_spec=environment_spec,
checkpointing=checkpointing_config,
)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/jax/experiments/run_offline_experiment_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common networks for Atari.
Glossary of shapes:
- T: Sequence length.
- B: Batch size.
- A: Number of actions.
- D: Embedding size.
- X?: X is optional (e.g. optional batch/sequence dimension).
"""
from typing import Any, Optional, Sequence, Tuple
from acme.jax.networks import base
from acme.jax.networks import duelling
from acme.jax.networks import embedding
from acme.jax.networks import policy_value
from acme.jax.networks import resnet
from acme.wrappers import observation_action_reward
import haiku as hk
import jax
import jax.numpy as jnp
# Useful type aliases.
Images = jnp.ndarray
class AtariTorso(hk.Module):
"""Simple convolutional stack commonly used for Atari."""
def __init__(self):
super().__init__(name='atari_torso')
self._network = hk.Sequential([
hk.Conv2D(32, [8, 8], 4), jax.nn.relu,
hk.Conv2D(64, [4, 4], 2), jax.nn.relu,
hk.Conv2D(64, [3, 3], 1), jax.nn.relu
])
def __call__(self, inputs: Images) -> jnp.ndarray:
inputs_rank = jnp.ndim(inputs)
batched_inputs = inputs_rank == 4
if inputs_rank < 3 or inputs_rank > 4:
raise ValueError('Expected input BHWC or HWC. Got rank %d' % inputs_rank)
outputs = self._network(inputs)
if batched_inputs:
return jnp.reshape(outputs, [outputs.shape[0], -1]) # [B, D]
return jnp.reshape(outputs, [-1]) # [D]
def dqn_atari_network(num_actions: int) -> base.QNetwork:
"""A feed-forward network for use with Ape-X DQN."""
def network(inputs: Images) -> base.QValues:
model = hk.Sequential([
AtariTorso(),
duelling.DuellingMLP(num_actions, hidden_sizes=[512]),
])
return model(inputs)
return network
class DeepAtariTorso(hk.Module):
"""Deep torso for Atari, from the IMPALA paper."""
def __init__(
self,
channels_per_group: Sequence[int] = (16, 32, 32),
blocks_per_group: Sequence[int] = (2, 2, 2),
downsampling_strategies: Sequence[resnet.DownsamplingStrategy] = (
resnet.DownsamplingStrategy.CONV_MAX,) * 3,
hidden_sizes: Sequence[int] = (256,),
use_layer_norm: bool = False,
name: str = 'deep_atari_torso'):
super().__init__(name=name)
self._use_layer_norm = use_layer_norm
self.resnet = resnet.ResNetTorso(
channels_per_group=channels_per_group,
blocks_per_group=blocks_per_group,
downsampling_strategies=downsampling_strategies,
use_layer_norm=use_layer_norm)
# Make sure to activate the last layer as this torso is expected to feed
# into the rest of a bigger network.
self.mlp_head = hk.nets.MLP(output_sizes=hidden_sizes, activate_final=True)
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
output = self.resnet(x)
output = jax.nn.relu(output)
output = hk.Flatten(preserve_dims=-3)(output)
output = self.mlp_head(output)
return output
class DeepIMPALAAtariNetwork(hk.RNNCore):
"""A recurrent network for use with IMPALA.
See https://arxiv.org/pdf/1802.01561.pdf for more information.
"""
def __init__(self, num_actions: int):
super().__init__(name='impala_atari_network')
self._embed = embedding.OAREmbedding(
DeepAtariTorso(use_layer_norm=True), num_actions)
self._core = hk.GRU(256)
self._head = policy_value.PolicyValueHead(num_actions)
self._num_actions = num_actions
def __call__(
self, inputs: observation_action_reward.OAR, state: hk.LSTMState
) -> Any:
embeddings = self._embed(inputs) # [B?, D+A+1]
embeddings, new_state = self._core(embeddings, state)
logits, value = self._head(embeddings) # logits: [B?, A], value: [B?, 1]
return (logits, value), new_state
def initial_state(self, batch_size: Optional[int],
**unused_kwargs) -> hk.LSTMState:
return self._core.initial_state(batch_size)
def unroll(
self, inputs: observation_action_reward.OAR, state: hk.LSTMState
) -> Any:
"""Efficient unroll that applies embeddings, MLP, & convnet in one pass."""
embeddings = self._embed(inputs)
embeddings, new_states = hk.static_unroll(self._core, embeddings, state)
logits, values = self._head(embeddings)
return (logits, values), new_states
class R2D2AtariNetwork(hk.RNNCore):
"""A duelling recurrent network for use with Atari observations as seen in R2D2.
See https://openreview.net/forum?id=r1lyTjAqYX for more information.
"""
def __init__(self, num_actions: int):
super().__init__(name='r2d2_atari_network')
self._embed = embedding.OAREmbedding(
DeepAtariTorso(hidden_sizes=[512], use_layer_norm=True), num_actions)
self._core = hk.LSTM(512)
self._duelling_head = duelling.DuellingMLP(num_actions, hidden_sizes=[512])
self._num_actions = num_actions
def __call__(
self,
inputs: observation_action_reward.OAR, # [B, ...]
state: hk.LSTMState # [B, ...]
) -> Tuple[base.QValues, hk.LSTMState]:
embeddings = self._embed(inputs) # [B, D+A+1]
core_outputs, new_state = self._core(embeddings, state)
q_values = self._duelling_head(core_outputs)
return q_values, new_state
def initial_state(self, batch_size: Optional[int],
**unused_kwargs) -> hk.LSTMState:
return self._core.initial_state(batch_size)
def unroll(
self,
inputs: observation_action_reward.OAR, # [T, B, ...]
state: hk.LSTMState # [T, ...]
) -> Tuple[base.QValues, hk.LSTMState]:
"""Efficient unroll that applies torso, core, and duelling mlp in one pass."""
embeddings = hk.BatchApply(self._embed)(inputs) # [T, B, D+A+1]
core_outputs, new_states = hk.static_unroll(self._core, embeddings, state)
q_values = hk.BatchApply(self._duelling_head)(core_outputs) # [T, B, A]
return q_values, new_states
|
acme-master
|
acme/jax/networks/atari.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rescaling layers (e.g. to match action specs)."""
import dataclasses
from acme import specs
from jax import lax
import jax.numpy as jnp
@dataclasses.dataclass
class ClipToSpec:
"""Clips inputs to within a BoundedArraySpec."""
spec: specs.BoundedArray
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
return jnp.clip(inputs, self.spec.minimum, self.spec.maximum)
@dataclasses.dataclass
class RescaleToSpec:
"""Rescales inputs in [-1, 1] to match a BoundedArraySpec."""
spec: specs.BoundedArray
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
scale = self.spec.maximum - self.spec.minimum
offset = self.spec.minimum
inputs = 0.5 * (inputs + 1.0) # [0, 1]
output = inputs * scale + offset # [minimum, maximum]
return output
@dataclasses.dataclass
class TanhToSpec:
"""Squashes real-valued inputs to match a BoundedArraySpec."""
spec: specs.BoundedArray
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
scale = self.spec.maximum - self.spec.minimum
offset = self.spec.minimum
inputs = lax.tanh(inputs) # [-1, 1]
inputs = 0.5 * (inputs + 1.0) # [0, 1]
output = inputs * scale + offset # [minimum, maximum]
return output
|
acme-master
|
acme/jax/networks/rescaling.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules for computing custom embeddings."""
import dataclasses
from acme.wrappers import observation_action_reward
import haiku as hk
import jax
import jax.numpy as jnp
@dataclasses.dataclass
class OAREmbedding(hk.Module):
"""Module for embedding (observation, action, reward) inputs together."""
torso: hk.SupportsCall
num_actions: int
def __call__(self, inputs: observation_action_reward.OAR) -> jnp.ndarray:
"""Embed each of the (observation, action, reward) inputs & concatenate."""
# Add dummy batch dimension to observation if necessary.
# This is needed because Conv2D assumes a leading batch dimension, i.e.
# that inputs are in [B, H, W, C] format.
expand_obs = len(inputs.observation.shape) == 3
if expand_obs:
inputs = inputs._replace(
observation=jnp.expand_dims(inputs.observation, axis=0))
features = self.torso(inputs.observation) # [T?, B, D]
if expand_obs:
features = jnp.squeeze(features, axis=0)
# Do a one-hot embedding of the actions.
action = jax.nn.one_hot(
inputs.action, num_classes=self.num_actions) # [T?, B, A]
# Map rewards -> [-1, 1].
reward = jnp.tanh(inputs.reward)
# Add dummy trailing dimensions to rewards if necessary.
while reward.ndim < action.ndim:
reward = jnp.expand_dims(reward, axis=-1)
# Concatenate on final dimension.
embedding = jnp.concatenate(
[features, action, reward], axis=-1) # [T?, B, D+A+1]
return embedding
|
acme-master
|
acme/jax/networks/embedding.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A duelling network architecture, as described in [0].
[0] https://arxiv.org/abs/1511.06581
"""
from typing import Sequence, Optional
import haiku as hk
import jax.numpy as jnp
class DuellingMLP(hk.Module):
"""A Duelling MLP Q-network."""
def __init__(
self,
num_actions: int,
hidden_sizes: Sequence[int],
w_init: Optional[hk.initializers.Initializer] = None,
):
super().__init__(name='duelling_q_network')
self._value_mlp = hk.nets.MLP([*hidden_sizes, 1], w_init=w_init)
self._advantage_mlp = hk.nets.MLP([*hidden_sizes, num_actions],
w_init=w_init)
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Forward pass of the duelling network.
Args:
inputs: 2-D tensor of shape [batch_size, embedding_size].
Returns:
q_values: 2-D tensor of action values of shape [batch_size, num_actions]
"""
# Compute value & advantage for duelling.
value = self._value_mlp(inputs) # [B, 1]
advantages = self._advantage_mlp(inputs) # [B, A]
# Advantages have zero mean.
advantages -= jnp.mean(advantages, axis=-1, keepdims=True) # [B, A]
q_values = value + advantages # [B, A]
return q_values
|
acme-master
|
acme/jax/networks/duelling.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX networks implemented with Haiku."""
from acme.jax.networks.atari import AtariTorso
from acme.jax.networks.atari import DeepIMPALAAtariNetwork
from acme.jax.networks.atari import dqn_atari_network
from acme.jax.networks.atari import R2D2AtariNetwork
from acme.jax.networks.base import Action
from acme.jax.networks.base import Entropy
from acme.jax.networks.base import FeedForwardNetwork
from acme.jax.networks.base import Logits
from acme.jax.networks.base import LogProb
from acme.jax.networks.base import LogProbFn
from acme.jax.networks.base import LSTMOutputs
from acme.jax.networks.base import make_unrollable_network
from acme.jax.networks.base import NetworkOutput
from acme.jax.networks.base import non_stochastic_network_to_typed
from acme.jax.networks.base import Observation
from acme.jax.networks.base import Params
from acme.jax.networks.base import PolicyValueRNN
from acme.jax.networks.base import PRNGKey
from acme.jax.networks.base import QNetwork
from acme.jax.networks.base import QValues
from acme.jax.networks.base import RecurrentQNetwork
from acme.jax.networks.base import RecurrentState
from acme.jax.networks.base import SampleFn
from acme.jax.networks.base import TypedFeedForwardNetwork
from acme.jax.networks.base import UnrollableNetwork
from acme.jax.networks.base import Value
from acme.jax.networks.continuous import LayerNormMLP
from acme.jax.networks.continuous import NearZeroInitializedLinear
from acme.jax.networks.distributional import CategoricalCriticHead
from acme.jax.networks.distributional import CategoricalHead
from acme.jax.networks.distributional import CategoricalValueHead
from acme.jax.networks.distributional import DiscreteValued
from acme.jax.networks.distributional import GaussianMixture
from acme.jax.networks.distributional import MultivariateNormalDiagHead
from acme.jax.networks.distributional import NormalTanhDistribution
from acme.jax.networks.distributional import TanhTransformedDistribution
from acme.jax.networks.duelling import DuellingMLP
from acme.jax.networks.multiplexers import CriticMultiplexer
from acme.jax.networks.policy_value import PolicyValueHead
from acme.jax.networks.rescaling import ClipToSpec
from acme.jax.networks.rescaling import TanhToSpec
from acme.jax.networks.resnet import DownsamplingStrategy
from acme.jax.networks.resnet import ResidualBlock
from acme.jax.networks.resnet import ResNetTorso
|
acme-master
|
acme/jax/networks/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Haiku modules that output tfd.Distributions."""
from typing import Any, List, Optional, Sequence, Union, Callable
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow_probability as tf_tfp
import tensorflow_probability.substrates.jax as tfp
hk_init = hk.initializers
tfd = tfp.distributions
_MIN_SCALE = 1e-4
Initializer = hk.initializers.Initializer
class CategoricalHead(hk.Module):
"""Module that produces a categorical distribution with the given number of values."""
def __init__(
self,
num_values: Union[int, List[int]],
dtype: Optional[Any] = jnp.int32,
w_init: Optional[Initializer] = None,
name: Optional[str] = None,
):
super().__init__(name=name)
self._dtype = dtype
self._logit_shape = num_values
self._linear = hk.Linear(np.prod(num_values), w_init=w_init)
def __call__(self, inputs: jnp.ndarray) -> tfd.Distribution:
logits = self._linear(inputs)
if not isinstance(self._logit_shape, int):
logits = hk.Reshape(self._logit_shape)(logits)
return tfd.Categorical(logits=logits, dtype=self._dtype)
class GaussianMixture(hk.Module):
"""Module that outputs a Gaussian Mixture Distribution."""
def __init__(self,
num_dimensions: int,
num_components: int,
multivariate: bool,
init_scale: Optional[float] = None,
append_singleton_event_dim: bool = False,
reinterpreted_batch_ndims: Optional[int] = None,
transformation_fn: Optional[Callable[[tfd.Distribution],
tfd.Distribution]] = None,
name: str = 'GaussianMixture'):
"""Initialization.
Args:
num_dimensions: dimensionality of the output distribution
num_components: number of mixture components.
multivariate: whether the resulting distribution is multivariate or not.
init_scale: the initial scale for the Gaussian mixture components.
append_singleton_event_dim: (univariate only) Whether to add an extra
singleton dimension to the event shape.
reinterpreted_batch_ndims: (univariate only) Number of batch dimensions to
reinterpret as event dimensions.
transformation_fn: Distribution transform such as TanhTransformation
applied to individual components.
name: name of the module passed to snt.Module parent class.
"""
super().__init__(name=name)
self._num_dimensions = num_dimensions
self._num_components = num_components
self._multivariate = multivariate
self._append_singleton_event_dim = append_singleton_event_dim
self._reinterpreted_batch_ndims = reinterpreted_batch_ndims
if init_scale is not None:
self._scale_factor = init_scale / jax.nn.softplus(0.)
else:
self._scale_factor = 1.0 # Corresponds to init_scale = softplus(0).
self._transformation_fn = transformation_fn
def __call__(self,
inputs: jnp.ndarray,
low_noise_policy: bool = False) -> tfd.Distribution:
"""Run the networks through inputs.
Args:
inputs: hidden activations of the policy network body.
low_noise_policy: whether to set vanishingly small scales for each
component. If this flag is set to True, the policy is effectively run
without Gaussian noise.
Returns:
Mixture Gaussian distribution.
"""
# Define the weight initializer.
w_init = hk.initializers.VarianceScaling(scale=1e-5)
# Create a layer that outputs the unnormalized log-weights.
if self._multivariate:
logits_size = self._num_components
else:
logits_size = self._num_dimensions * self._num_components
logit_layer = hk.Linear(logits_size, w_init=w_init)
# Create two layers that outputs a location and a scale, respectively, for
# each dimension and each component.
loc_layer = hk.Linear(
self._num_dimensions * self._num_components, w_init=w_init)
scale_layer = hk.Linear(
self._num_dimensions * self._num_components, w_init=w_init)
# Compute logits, locs, and scales if necessary.
logits = logit_layer(inputs)
locs = loc_layer(inputs)
# When a low_noise_policy is requested, set the scales to its minimum value.
if low_noise_policy:
scales = jnp.full(locs.shape, _MIN_SCALE)
else:
scales = scale_layer(inputs)
scales = self._scale_factor * jax.nn.softplus(scales) + _MIN_SCALE
if self._multivariate:
components_class = tfd.MultivariateNormalDiag
shape = [-1, self._num_components, self._num_dimensions] # [B, C, D]
# In this case, no need to reshape logits as they are in the correct shape
# already, namely [batch_size, num_components].
else:
components_class = tfd.Normal
shape = [-1, self._num_dimensions, self._num_components] # [B, D, C]
if self._append_singleton_event_dim:
shape.insert(2, 1) # [B, D, 1, C]
logits = logits.reshape(shape)
# Reshape the mixture's location and scale parameters appropriately.
locs = locs.reshape(shape)
scales = scales.reshape(shape)
if self._multivariate:
components_distribution = components_class(loc=locs, scale_diag=scales)
else:
components_distribution = components_class(loc=locs, scale=scales)
# Transformed the component distributions in the mixture.
if self._transformation_fn:
components_distribution = self._transformation_fn(components_distribution)
# Create the mixture distribution.
distribution = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=logits),
components_distribution=components_distribution)
if not self._multivariate:
distribution = tfd.Independent(
distribution,
reinterpreted_batch_ndims=self._reinterpreted_batch_ndims)
return distribution
class TanhTransformedDistribution(tfd.TransformedDistribution):
"""Distribution followed by tanh."""
def __init__(self, distribution, threshold=.999, validate_args=False):
"""Initialize the distribution.
Args:
distribution: The distribution to transform.
threshold: Clipping value of the action when computing the logprob.
validate_args: Passed to super class.
"""
super().__init__(
distribution=distribution,
bijector=tfp.bijectors.Tanh(),
validate_args=validate_args)
# Computes the log of the average probability distribution outside the
# clipping range, i.e. on the interval [-inf, -atanh(threshold)] for
# log_prob_left and [atanh(threshold), inf] for log_prob_right.
self._threshold = threshold
inverse_threshold = self.bijector.inverse(threshold)
# average(pdf) = p/epsilon
# So log(average(pdf)) = log(p) - log(epsilon)
log_epsilon = jnp.log(1. - threshold)
# Those 2 values are differentiable w.r.t. model parameters, such that the
# gradient is defined everywhere.
self._log_prob_left = self.distribution.log_cdf(
-inverse_threshold) - log_epsilon
self._log_prob_right = self.distribution.log_survival_function(
inverse_threshold) - log_epsilon
def log_prob(self, event):
# Without this clip there would be NaNs in the inner tf.where and that
# causes issues for some reasons.
event = jnp.clip(event, -self._threshold, self._threshold)
# The inverse image of {threshold} is the interval [atanh(threshold), inf]
# which has a probability of "log_prob_right" under the given distribution.
return jnp.where(
event <= -self._threshold, self._log_prob_left,
jnp.where(event >= self._threshold, self._log_prob_right,
super().log_prob(event)))
def mode(self):
return self.bijector.forward(self.distribution.mode())
def entropy(self, seed=None):
# We return an estimation using a single sample of the log_det_jacobian.
# We can still do some backpropagation with this estimate.
return self.distribution.entropy() + self.bijector.forward_log_det_jacobian(
self.distribution.sample(seed=seed), event_ndims=0)
@classmethod
def _parameter_properties(cls, dtype: Optional[Any], num_classes=None):
td_properties = super()._parameter_properties(dtype,
num_classes=num_classes)
del td_properties['bijector']
return td_properties
class NormalTanhDistribution(hk.Module):
"""Module that produces a TanhTransformedDistribution distribution."""
def __init__(self,
num_dimensions: int,
min_scale: float = 1e-3,
w_init: hk_init.Initializer = hk_init.VarianceScaling(
1.0, 'fan_in', 'uniform'),
b_init: hk_init.Initializer = hk_init.Constant(0.)):
"""Initialization.
Args:
num_dimensions: Number of dimensions of a distribution.
min_scale: Minimum standard deviation.
w_init: Initialization for linear layer weights.
b_init: Initialization for linear layer biases.
"""
super().__init__(name='Normal')
self._min_scale = min_scale
self._loc_layer = hk.Linear(num_dimensions, w_init=w_init, b_init=b_init)
self._scale_layer = hk.Linear(num_dimensions, w_init=w_init, b_init=b_init)
def __call__(self, inputs: jnp.ndarray) -> tfd.Distribution:
loc = self._loc_layer(inputs)
scale = self._scale_layer(inputs)
scale = jax.nn.softplus(scale) + self._min_scale
distribution = tfd.Normal(loc=loc, scale=scale)
return tfd.Independent(
TanhTransformedDistribution(distribution), reinterpreted_batch_ndims=1)
class MultivariateNormalDiagHead(hk.Module):
"""Module that produces a tfd.MultivariateNormalDiag distribution."""
def __init__(self,
num_dimensions: int,
init_scale: float = 0.3,
min_scale: float = 1e-6,
w_init: hk_init.Initializer = hk_init.VarianceScaling(1e-4),
b_init: hk_init.Initializer = hk_init.Constant(0.)):
"""Initialization.
Args:
num_dimensions: Number of dimensions of MVN distribution.
init_scale: Initial standard deviation.
min_scale: Minimum standard deviation.
w_init: Initialization for linear layer weights.
b_init: Initialization for linear layer biases.
"""
super().__init__(name='MultivariateNormalDiagHead')
self._min_scale = min_scale
self._init_scale = init_scale
self._loc_layer = hk.Linear(num_dimensions, w_init=w_init, b_init=b_init)
self._scale_layer = hk.Linear(num_dimensions, w_init=w_init, b_init=b_init)
def __call__(self, inputs: jnp.ndarray) -> tfd.Distribution:
loc = self._loc_layer(inputs)
scale = jax.nn.softplus(self._scale_layer(inputs))
scale *= self._init_scale / jax.nn.softplus(0.)
scale += self._min_scale
return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale)
class CategoricalValueHead(hk.Module):
"""Network head that produces a categorical distribution and value."""
def __init__(
self,
num_values: int,
name: Optional[str] = None,
):
super().__init__(name=name)
self._logit_layer = hk.Linear(num_values)
self._value_layer = hk.Linear(1)
def __call__(self, inputs: jnp.ndarray):
logits = self._logit_layer(inputs)
value = jnp.squeeze(self._value_layer(inputs), axis=-1)
return (tfd.Categorical(logits=logits), value)
class DiscreteValued(hk.Module):
"""C51-style head.
For each action, it produces the logits for a discrete distribution over
atoms. Therefore, the returned logits represents several distributions, one
for each action.
"""
def __init__(
self,
num_actions: int,
head_units: int = 512,
num_atoms: int = 51,
v_min: float = -1.0,
v_max: float = 1.0,
):
super().__init__('DiscreteValued')
self._num_actions = num_actions
self._num_atoms = num_atoms
self._atoms = jnp.linspace(v_min, v_max, self._num_atoms)
self._network = hk.nets.MLP([head_units, num_actions * num_atoms])
def __call__(self, inputs: jnp.ndarray):
q_logits = self._network(inputs)
q_logits = jnp.reshape(q_logits, (-1, self._num_actions, self._num_atoms))
q_dist = jax.nn.softmax(q_logits)
q_values = jnp.sum(q_dist * self._atoms, axis=2)
q_values = jax.lax.stop_gradient(q_values)
return q_values, q_logits, self._atoms
class CategoricalCriticHead(hk.Module):
"""Critic head that uses a categorical to represent action values."""
def __init__(self,
num_bins: int = 601,
vmax: Optional[float] = None,
vmin: Optional[float] = None,
w_init: hk_init.Initializer = hk_init.VarianceScaling(1e-5)):
super().__init__(name='categorical_critic_head')
vmax = vmax if vmax is not None else 0.5 * (num_bins - 1)
vmin = vmin if vmin is not None else -1.0 * vmax
self._head = DiscreteValuedTfpHead(
vmin=vmin,
vmax=vmax,
logits_shape=(1,),
num_atoms=num_bins,
w_init=w_init)
def __call__(self, embedding: chex.Array) -> tfd.Distribution:
output = self._head(embedding)
return output
class DiscreteValuedTfpHead(hk.Module):
"""Represents a parameterized discrete valued distribution.
The returned distribution is essentially a `tfd.Categorical` that knows its
support and thus can compute the mean value.
"""
def __init__(self,
vmin: float,
vmax: float,
num_atoms: int,
logits_shape: Optional[Sequence[int]] = None,
w_init: Optional[Initializer] = None,
b_init: Optional[Initializer] = None):
"""Initialization.
If vmin and vmax have shape S, this will store the category values as a
Tensor of shape (S*, num_atoms).
Args:
vmin: Minimum of the value range
vmax: Maximum of the value range
num_atoms: The atom values associated with each bin.
logits_shape: The shape of the logits, excluding batch and num_atoms
dimensions.
w_init: Initialization for linear layer weights.
b_init: Initialization for linear layer biases.
"""
super().__init__(name='DiscreteValuedHead')
self._values = np.linspace(vmin, vmax, num=num_atoms, axis=-1)
if not logits_shape:
logits_shape = ()
self._logits_shape = logits_shape + (num_atoms,)
self._w_init = w_init
self._b_init = b_init
def __call__(self, inputs: chex.Array) -> tfd.Distribution:
net = hk.Linear(
np.prod(self._logits_shape), w_init=self._w_init, b_init=self._b_init)
logits = net(inputs)
logits = hk.Reshape(self._logits_shape, preserve_dims=1)(logits)
return DiscreteValuedTfpDistribution(values=self._values, logits=logits)
@tf_tfp.experimental.auto_composite_tensor
class DiscreteValuedTfpDistribution(tfd.Categorical):
"""This is a generalization of a categorical distribution.
The support for the DiscreteValued distribution can be any real valued range,
whereas the categorical distribution has support [0, n_categories - 1] or
[1, n_categories]. This generalization allows us to take the mean of the
distribution over its support.
"""
def __init__(self,
values: chex.Array,
logits: Optional[chex.Array] = None,
probs: Optional[chex.Array] = None,
name: str = 'DiscreteValuedDistribution'):
"""Initialization.
Args:
values: Values making up support of the distribution. Should have a shape
compatible with logits.
logits: An N-D Tensor, N >= 1, representing the log probabilities of a set
of Categorical distributions. The first N - 1 dimensions index into a
batch of independent distributions and the last dimension indexes into
the classes.
probs: An N-D Tensor, N >= 1, representing the probabilities of a set of
Categorical distributions. The first N - 1 dimensions index into a batch
of independent distributions and the last dimension represents a vector
of probabilities for each class. Only one of logits or probs should be
passed in.
name: Name of the distribution object.
"""
parameters = dict(locals())
self._values = np.asarray(values)
if logits is not None:
logits = jnp.asarray(logits)
chex.assert_shape(logits, (..., *self._values.shape))
if probs is not None:
probs = jnp.asarray(probs)
chex.assert_shape(probs, (..., *self._values.shape))
super().__init__(logits=logits, probs=probs, name=name)
self._parameters = parameters
@property
def values(self):
return self._values
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
values=tfp.util.ParameterProperties(
event_ndims=None,
shape_fn=lambda shape: (num_classes,),
specifies_shape=True),
logits=tfp.util.ParameterProperties(event_ndims=1),
probs=tfp.util.ParameterProperties(event_ndims=1, is_preferred=False))
def _sample_n(self, key: chex.PRNGKey, n: int) -> chex.Array:
indices = super()._sample_n(key=key, n=n)
return jnp.take_along_axis(self._values, indices, axis=-1)
def mean(self) -> chex.Array:
"""Overrides the Categorical mean by incorporating category values."""
return jnp.sum(self.probs_parameter() * self._values, axis=-1)
def variance(self) -> chex.Array:
"""Overrides the Categorical variance by incorporating category values."""
dist_squared = jnp.square(jnp.expand_dims(self.mean(), -1) - self._values)
return jnp.sum(self.probs_parameter() * dist_squared, axis=-1)
def _event_shape(self):
return jnp.zeros((), dtype=jnp.int32)
def _event_shape_tensor(self):
return []
|
acme-master
|
acme/jax/networks/distributional.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiplexers are networks that take multiple inputs."""
from typing import Callable, Optional, Union
from acme.jax import utils
import haiku as hk
import jax.numpy as jnp
import tensorflow_probability
tfd = tensorflow_probability.substrates.jax.distributions
ModuleOrArrayTransform = Union[hk.Module, Callable[[jnp.ndarray], jnp.ndarray]]
class CriticMultiplexer(hk.Module):
"""Module connecting a critic torso to (transformed) observations/actions.
This takes as input a `critic_network`, an `observation_network`, and an
`action_network` and returns another network whose outputs are given by
`critic_network(observation_network(o), action_network(a))`.
The observations and actions passed to this module are assumed to have a batch
dimension that match.
Notes:
- Either the `observation_` or `action_network` can be `None`, in which case
the observation or action, resp., are passed to the critic network as is.
- If all `critic_`, `observation_` and `action_network` are `None`, this
module reduces to a simple `tf2_utils.batch_concat()`.
"""
def __init__(self,
critic_network: Optional[ModuleOrArrayTransform] = None,
observation_network: Optional[ModuleOrArrayTransform] = None,
action_network: Optional[ModuleOrArrayTransform] = None):
self._critic_network = critic_network
self._observation_network = observation_network
self._action_network = action_network
super().__init__(name='critic_multiplexer')
def __call__(self,
observation: jnp.ndarray,
action: jnp.ndarray) -> jnp.ndarray:
# Maybe transform observations and actions before feeding them on.
if self._observation_network:
observation = self._observation_network(observation)
if self._action_network:
action = self._action_network(action)
# Concat observations and actions, with one batch dimension.
outputs = utils.batch_concat([observation, action])
# Maybe transform output before returning.
if self._critic_network:
outputs = self._critic_network(outputs)
return outputs
|
acme-master
|
acme/jax/networks/multiplexers.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet Modules."""
import enum
import functools
from typing import Callable, Sequence, Union
import haiku as hk
import jax
import jax.numpy as jnp
InnerOp = Union[hk.Module, Callable[..., jnp.ndarray]]
MakeInnerOp = Callable[..., InnerOp]
NonLinearity = Callable[[jnp.ndarray], jnp.ndarray]
class ResidualBlock(hk.Module):
"""Residual block of operations, e.g. convolutional or MLP."""
def __init__(self,
make_inner_op: MakeInnerOp,
non_linearity: NonLinearity = jax.nn.relu,
use_layer_norm: bool = False,
name: str = 'residual_block'):
super().__init__(name=name)
self.inner_op1 = make_inner_op()
self.inner_op2 = make_inner_op()
self.non_linearity = non_linearity
self.use_layer_norm = use_layer_norm
if use_layer_norm:
self.layernorm1 = hk.LayerNorm(
axis=(-3, -2, -1), create_scale=True, create_offset=True, eps=1e-6)
self.layernorm2 = hk.LayerNorm(
axis=(-3, -2, -1), create_scale=True, create_offset=True, eps=1e-6)
def __call__(self, x: jnp.ndarray):
output = x
# First layer in residual block.
if self.use_layer_norm:
output = self.layernorm1(output)
output = self.non_linearity(output)
output = self.inner_op1(output)
# Second layer in residual block.
if self.use_layer_norm:
output = self.layernorm2(output)
output = self.non_linearity(output)
output = self.inner_op2(output)
return x + output
# TODO(nikola): Remove this enum and configure downsampling with a layer factory
# instead.
class DownsamplingStrategy(enum.Enum):
AVG_POOL = 'avg_pool'
CONV_MAX = 'conv+max' # Used in IMPALA
LAYERNORM_RELU_CONV = 'layernorm+relu+conv' # Used in MuZero
CONV = 'conv'
def make_downsampling_layer(
strategy: Union[str, DownsamplingStrategy],
output_channels: int,
) -> hk.SupportsCall:
"""Returns a sequence of modules corresponding to the desired downsampling."""
strategy = DownsamplingStrategy(strategy)
if strategy is DownsamplingStrategy.AVG_POOL:
return hk.AvgPool(window_shape=(3, 3, 1), strides=(2, 2, 1), padding='SAME')
elif strategy is DownsamplingStrategy.CONV:
return hk.Sequential([
hk.Conv2D(
output_channels,
kernel_shape=3,
stride=2,
w_init=hk.initializers.TruncatedNormal(1e-2)),
])
elif strategy is DownsamplingStrategy.LAYERNORM_RELU_CONV:
return hk.Sequential([
hk.LayerNorm(
axis=(-3, -2, -1), create_scale=True, create_offset=True, eps=1e-6),
jax.nn.relu,
hk.Conv2D(
output_channels,
kernel_shape=3,
stride=2,
w_init=hk.initializers.TruncatedNormal(1e-2)),
])
elif strategy is DownsamplingStrategy.CONV_MAX:
return hk.Sequential([
hk.Conv2D(output_channels, kernel_shape=3, stride=1),
hk.MaxPool(window_shape=(3, 3, 1), strides=(2, 2, 1), padding='SAME')
])
else:
raise ValueError('Unrecognized downsampling strategy. Expected one of'
f' {[strategy.value for strategy in DownsamplingStrategy]}'
f' but received {strategy}.')
class ResNetTorso(hk.Module):
"""ResNetTorso for visual inputs, inspired by the IMPALA paper."""
def __init__(self,
channels_per_group: Sequence[int] = (16, 32, 32),
blocks_per_group: Sequence[int] = (2, 2, 2),
downsampling_strategies: Sequence[DownsamplingStrategy] = (
DownsamplingStrategy.CONV_MAX,) * 3,
use_layer_norm: bool = False,
name: str = 'resnet_torso'):
super().__init__(name=name)
self._channels_per_group = channels_per_group
self._blocks_per_group = blocks_per_group
self._downsampling_strategies = downsampling_strategies
self._use_layer_norm = use_layer_norm
if (len(channels_per_group) != len(blocks_per_group) or
len(channels_per_group) != len(downsampling_strategies)):
raise ValueError('Length of channels_per_group, blocks_per_group, and '
'downsampling_strategies must be equal. '
f'Got channels_per_group={channels_per_group}, '
f'blocks_per_group={blocks_per_group}, and'
f'downsampling_strategies={downsampling_strategies}.')
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
output = inputs
channels_blocks_strategies = zip(self._channels_per_group,
self._blocks_per_group,
self._downsampling_strategies)
for i, (num_channels, num_blocks,
strategy) in enumerate(channels_blocks_strategies):
output = make_downsampling_layer(strategy, num_channels)(output)
for j in range(num_blocks):
output = ResidualBlock(
make_inner_op=functools.partial(
hk.Conv2D, output_channels=num_channels, kernel_shape=3),
use_layer_norm=self._use_layer_norm,
name=f'residual_{i}_{j}')(
output)
return output
|
acme-master
|
acme/jax/networks/resnet.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy-value network head for actor-critic algorithms."""
from typing import Tuple
import haiku as hk
import jax.numpy as jnp
class PolicyValueHead(hk.Module):
"""A network with two linear layers, for policy and value respectively."""
def __init__(self, num_actions: int):
super().__init__(name='policy_value_network')
self._policy_layer = hk.Linear(num_actions)
self._value_layer = hk.Linear(1)
def __call__(self, inputs: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Returns a (Logits, Value) tuple."""
logits = self._policy_layer(inputs) # [B, A]
value = jnp.squeeze(self._value_layer(inputs), axis=-1) # [B]
return logits, value
|
acme-master
|
acme/jax/networks/policy_value.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks used in continuous control."""
from typing import Callable, Sequence
import haiku as hk
import jax
import jax.numpy as jnp
uniform_initializer = hk.initializers.UniformScaling(scale=0.333)
class NearZeroInitializedLinear(hk.Linear):
"""Simple linear layer, initialized at near zero weights and zero biases."""
def __init__(self, output_size: int, scale: float = 1e-4):
super().__init__(output_size, w_init=hk.initializers.VarianceScaling(scale))
class LayerNormMLP(hk.Module):
"""Simple feedforward MLP torso with initial layer-norm.
This MLP's first linear layer is followed by a LayerNorm layer and a tanh
non-linearity; subsequent layers use `activation`, which defaults to elu.
Note! The default activation differs from the usual MLP default of ReLU for
legacy reasons.
"""
def __init__(self,
layer_sizes: Sequence[int],
w_init: hk.initializers.Initializer = uniform_initializer,
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.elu,
activate_final: bool = False,
name: str = 'feedforward_mlp_torso'):
"""Construct the MLP.
Args:
layer_sizes: a sequence of ints specifying the size of each layer.
w_init: initializer for Linear layers.
activation: nonlinearity to use in the MLP, defaults to elu.
Note! The default activation differs from the usual MLP default of ReLU
for legacy reasons.
activate_final: whether or not to use the activation function on the final
layer of the neural network.
name: a name for the module.
"""
super().__init__(name=name)
self._network = hk.Sequential([
hk.Linear(layer_sizes[0], w_init=w_init),
hk.LayerNorm(axis=-1, create_scale=True, create_offset=True),
jax.lax.tanh,
hk.nets.MLP(
layer_sizes[1:],
w_init=w_init,
activation=activation,
activate_final=activate_final),
])
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Forwards the policy network."""
return self._network(inputs)
|
acme-master
|
acme/jax/networks/continuous.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base interfaces for networks."""
import dataclasses
from typing import Callable, Optional, Tuple
from acme import specs
from acme import types
from acme.jax import types as jax_types
from acme.jax import utils as jax_utils
import haiku as hk
import jax.numpy as jnp
from typing_extensions import Protocol
# This definition is deprecated. Use jax_types.PRNGKey directly instead.
# TODO(sinopalnikov): migrate all users and remove this definition.
PRNGKey = jax_types.PRNGKey
# Commonly-used types.
BatchSize = int
Observation = types.NestedArray
Action = types.NestedArray
Params = types.NestedArray
NetworkOutput = types.NestedArray
QValues = jnp.ndarray
Logits = jnp.ndarray
LogProb = jnp.ndarray
Value = jnp.ndarray
RecurrentState = types.NestedArray
Entropy = jnp.ndarray
# Commonly-used function/network signatures.
QNetwork = Callable[[Observation], QValues]
LSTMOutputs = Tuple[Tuple[Logits, Value], hk.LSTMState]
PolicyValueRNN = Callable[[Observation, hk.LSTMState], LSTMOutputs]
RecurrentQNetwork = Callable[[Observation, hk.LSTMState],
Tuple[QValues, hk.LSTMState]]
SampleFn = Callable[[NetworkOutput, PRNGKey], Action]
LogProbFn = Callable[[NetworkOutput, Action], LogProb]
@dataclasses.dataclass
class FeedForwardNetwork:
"""Holds a pair of pure functions defining a feed-forward network.
Attributes:
init: A pure function: ``params = init(rng, *a, **k)``
apply: A pure function: ``out = apply(params, rng, *a, **k)``
"""
# Initializes and returns the networks parameters.
init: Callable[..., Params]
# Computes and returns the outputs of a forward pass.
apply: Callable[..., NetworkOutput]
class ApplyFn(Protocol):
def __call__(self,
params: Params,
observation: Observation,
*args,
is_training: bool,
key: Optional[PRNGKey] = None,
**kwargs) -> NetworkOutput:
...
@dataclasses.dataclass
class TypedFeedForwardNetwork:
"""FeedForwardNetwork with more specific types of the member functions.
Attributes:
init: A pure function. Initializes and returns the networks parameters.
apply: A pure function. Computes and returns the outputs of a forward pass.
"""
init: Callable[[PRNGKey], Params]
apply: ApplyFn
def non_stochastic_network_to_typed(
network: FeedForwardNetwork) -> TypedFeedForwardNetwork:
"""Converts non-stochastic FeedForwardNetwork to TypedFeedForwardNetwork.
Non-stochastic network is the one that doesn't take a random key as an input
for its `apply` method.
Arguments:
network: non-stochastic feed-forward network.
Returns:
corresponding TypedFeedForwardNetwork
"""
def apply(params: Params,
observation: Observation,
*args,
is_training: bool,
key: Optional[PRNGKey] = None,
**kwargs) -> NetworkOutput:
del is_training, key
return network.apply(params, observation, *args, **kwargs)
return TypedFeedForwardNetwork(init=network.init, apply=apply)
@dataclasses.dataclass
class UnrollableNetwork:
"""Network that can unroll over an input sequence."""
init: Callable[[PRNGKey], Params]
apply: Callable[[Params, PRNGKey, Observation, RecurrentState],
Tuple[NetworkOutput, RecurrentState]]
unroll: Callable[[Params, PRNGKey, Observation, RecurrentState],
Tuple[NetworkOutput, RecurrentState]]
init_recurrent_state: Callable[[PRNGKey, Optional[BatchSize]], RecurrentState]
# TODO(b/244311990): Consider supporting parameterized and learnable initial
# state functions.
def make_unrollable_network(
environment_spec: specs.EnvironmentSpec,
make_core_module: Callable[[], hk.RNNCore]) -> UnrollableNetwork:
"""Builds an UnrollableNetwork from a hk.Module factory."""
dummy_observation = jax_utils.zeros_like(environment_spec.observations)
def make_unrollable_network_functions():
model = make_core_module()
apply = model.__call__
def init() -> Tuple[NetworkOutput, RecurrentState]:
return model(dummy_observation, model.initial_state(None))
return init, (apply, model.unroll, model.initial_state) # pytype: disable=attribute-error
# Transform and unpack pure functions
f = hk.multi_transform(make_unrollable_network_functions)
apply, unroll, initial_state_fn = f.apply
def init_recurrent_state(key: jax_types.PRNGKey,
batch_size: Optional[int]) -> RecurrentState:
# TODO(b/244311990): Consider supporting parameterized and learnable initial
# state functions.
no_params = None
return initial_state_fn(no_params, key, batch_size)
return UnrollableNetwork(f.init, apply, unroll, init_recurrent_state)
|
acme-master
|
acme/jax/networks/base.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility classes for saving model checkpoints and snapshots."""
import abc
import datetime
import os
import pickle
import time
from typing import Mapping, Optional, Union
from absl import logging
from acme import core
from acme.utils import signals
from acme.utils import paths
import sonnet as snt
import tensorflow as tf
import tree
from tensorflow.python.saved_model import revived_types
PythonState = tf.train.experimental.PythonState
Checkpointable = Union[tf.Module, tf.Variable, PythonState]
_DEFAULT_CHECKPOINT_TTL = int(datetime.timedelta(days=5).total_seconds())
_DEFAULT_SNAPSHOT_TTL = int(datetime.timedelta(days=90).total_seconds())
class TFSaveable(abc.ABC):
"""An interface for objects that expose their checkpointable TF state."""
@property
@abc.abstractmethod
def state(self) -> Mapping[str, Checkpointable]:
"""Returns TensorFlow checkpointable state."""
class Checkpointer:
"""Convenience class for periodically checkpointing.
This can be used to checkpoint any object with trackable state (e.g.
tensorflow variables or modules); see tf.train.Checkpoint for
details. Objects inheriting from tf.train.experimental.PythonState can also
be checkpointed.
Typically people use Checkpointer to make sure that they can correctly recover
from a machine going down during learning. For more permanent storage of self-
contained "networks" see the Snapshotter object.
Usage example:
```python
model = snt.Linear(10)
checkpointer = Checkpointer(objects_to_save={'model': model})
for _ in range(100):
# ...
checkpointer.save()
```
"""
def __init__(
self,
objects_to_save: Mapping[str, Union[Checkpointable, core.Saveable]],
*,
directory: str = '~/acme/',
subdirectory: str = 'default',
time_delta_minutes: float = 10.0,
enable_checkpointing: bool = True,
add_uid: bool = True,
max_to_keep: int = 1,
checkpoint_ttl_seconds: Optional[int] = _DEFAULT_CHECKPOINT_TTL,
keep_checkpoint_every_n_hours: Optional[int] = None,
):
"""Builds the saver object.
Args:
objects_to_save: Mapping specifying what to checkpoint.
directory: Which directory to put the checkpoint in.
subdirectory: Sub-directory to use (e.g. if multiple checkpoints are being
saved).
time_delta_minutes: How often to save the checkpoint, in minutes.
enable_checkpointing: whether to checkpoint or not.
add_uid: If True adds a UID to the checkpoint path, see
`paths.get_unique_id()` for how this UID is generated.
max_to_keep: The maximum number of checkpoints to keep.
checkpoint_ttl_seconds: TTL (time to leave) in seconds for checkpoints.
keep_checkpoint_every_n_hours: keep_checkpoint_every_n_hours passed to
tf.train.CheckpointManager.
"""
# Convert `Saveable` objects to TF `Checkpointable` first, if necessary.
def to_ckptable(x: Union[Checkpointable, core.Saveable]) -> Checkpointable:
if isinstance(x, core.Saveable):
return SaveableAdapter(x)
return x
objects_to_save = {k: to_ckptable(v) for k, v in objects_to_save.items()}
self._time_delta_minutes = time_delta_minutes
self._last_saved = 0.
self._enable_checkpointing = enable_checkpointing
self._checkpoint_manager = None
if enable_checkpointing:
# Checkpoint object that handles saving/restoring.
self._checkpoint = tf.train.Checkpoint(**objects_to_save)
self._checkpoint_dir = paths.process_path(
directory,
'checkpoints',
subdirectory,
ttl_seconds=checkpoint_ttl_seconds,
backups=False,
add_uid=add_uid)
# Create a manager to maintain different checkpoints.
self._checkpoint_manager = tf.train.CheckpointManager(
self._checkpoint,
directory=self._checkpoint_dir,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
self.restore()
def save(self, force: bool = False) -> bool:
"""Save the checkpoint if it's the appropriate time, otherwise no-ops.
Args:
force: Whether to force a save regardless of time elapsed since last save.
Returns:
A boolean indicating if a save event happened.
"""
if not self._enable_checkpointing:
return False
if (not force and
time.time() - self._last_saved < 60 * self._time_delta_minutes):
return False
# Save any checkpoints.
logging.info('Saving checkpoint: %s', self._checkpoint_manager.directory)
self._checkpoint_manager.save()
self._last_saved = time.time()
return True
def restore(self):
# Restore from the most recent checkpoint (if it exists).
checkpoint_to_restore = self._checkpoint_manager.latest_checkpoint
logging.info('Attempting to restore checkpoint: %s',
checkpoint_to_restore)
self._checkpoint.restore(checkpoint_to_restore)
@property
def directory(self):
return self._checkpoint_manager.directory
class CheckpointingRunner(core.Worker):
"""Wrap an object and expose a run method which checkpoints periodically.
This internally creates a Checkpointer around `wrapped` object and exposes
all of the methods of `wrapped`. Additionally, any `**kwargs` passed to the
runner are forwarded to the internal Checkpointer.
"""
def __init__(
self,
wrapped: Union[Checkpointable, core.Saveable, TFSaveable],
key: str = 'wrapped',
*,
time_delta_minutes: int = 30,
**kwargs,
):
if isinstance(wrapped, TFSaveable):
# If the object to be wrapped exposes its TF State, checkpoint that.
objects_to_save = wrapped.state
else:
# Otherwise checkpoint the wrapped object itself.
objects_to_save = wrapped
self._wrapped = wrapped
self._time_delta_minutes = time_delta_minutes
self._checkpointer = Checkpointer(
objects_to_save={key: objects_to_save},
time_delta_minutes=time_delta_minutes,
**kwargs)
# Handle preemption signal. Note that this must happen in the main thread.
def _signal_handler(self):
logging.info('Caught SIGTERM: forcing a checkpoint save.')
self._checkpointer.save(force=True)
def step(self):
if isinstance(self._wrapped, core.Learner):
# Learners have a step() method, so alternate between that and ckpt call.
self._wrapped.step()
self._checkpointer.save()
else:
# Wrapped object doesn't have a run method; set our run method to ckpt.
self.checkpoint()
def run(self):
"""Runs the checkpointer."""
with signals.runtime_terminator(self._signal_handler):
while True:
self.step()
def __dir__(self):
return dir(self._wrapped) + ['get_directory']
# TODO(b/195915583) : Throw when wrapped object has get_directory() method.
def __getattr__(self, name):
if name == 'get_directory':
return self.get_directory
return getattr(self._wrapped, name)
def checkpoint(self):
self._checkpointer.save()
# Do not sleep for a long period of time to avoid LaunchPad program
# termination hangs (time.sleep is not interruptible).
for _ in range(self._time_delta_minutes * 60):
time.sleep(1)
def get_directory(self):
return self._checkpointer.directory
class Snapshotter:
"""Convenience class for periodically snapshotting.
Objects which can be snapshotted are limited to Sonnet or tensorflow Modules
which implement a __call__ method. This will save the module's graph and
variables such that they can be loaded later using `tf.saved_model.load`. See
https://www.tensorflow.org/guide/saved_model for more details.
The Snapshotter is typically used to save infrequent permanent self-contained
snapshots which can be loaded later for inspection. For frequent saving of
model parameters in order to guard against pre-emption of the learning process
see the Checkpointer class.
Usage example:
```python
model = snt.Linear(10)
snapshotter = Snapshotter(objects_to_save={'model': model})
for _ in range(100):
# ...
snapshotter.save()
```
"""
def __init__(
self,
objects_to_save: Mapping[str, snt.Module],
*,
directory: str = '~/acme/',
time_delta_minutes: float = 30.0,
snapshot_ttl_seconds: int = _DEFAULT_SNAPSHOT_TTL,
):
"""Builds the saver object.
Args:
objects_to_save: Mapping specifying what to snapshot.
directory: Which directory to put the snapshot in.
time_delta_minutes: How often to save the snapshot, in minutes.
snapshot_ttl_seconds: TTL (time to leave) in seconds for snapshots.
"""
objects_to_save = objects_to_save or {}
self._time_delta_minutes = time_delta_minutes
self._last_saved = 0.
self._snapshots = {}
# Save the base directory path so we can refer to it if needed.
self.directory = paths.process_path(
directory, 'snapshots', ttl_seconds=snapshot_ttl_seconds)
# Save a dictionary mapping paths to snapshot capable models.
for name, module in objects_to_save.items():
path = os.path.join(self.directory, name)
self._snapshots[path] = make_snapshot(module)
def save(self, force: bool = False) -> bool:
"""Snapshots if it's the appropriate time, otherwise no-ops.
Args:
force: If True, save new snapshot no matter how long it's been since the
last one.
Returns:
A boolean indicating if a save event happened.
"""
seconds_since_last = time.time() - self._last_saved
if (self._snapshots and
(force or seconds_since_last >= 60 * self._time_delta_minutes)):
# Save any snapshots.
for path, snapshot in self._snapshots.items():
tf.saved_model.save(snapshot, path)
# Record the time we finished saving.
self._last_saved = time.time()
return True
return False
class Snapshot(tf.Module):
"""Thin wrapper which allows the module to be saved."""
def __init__(self):
super().__init__()
self._module = None
self._variables = None
self._trainable_variables = None
@tf.function
def __call__(self, *args, **kwargs):
return self._module(*args, **kwargs)
@property
def submodules(self):
return [self._module]
@property
def variables(self):
return self._variables
@property
def trainable_variables(self):
return self._trainable_variables
# Registers the Snapshot object above such that when it is restored by
# tf.saved_model.load it will be restored as a Snapshot. This is important
# because it allows us to expose the __call__, and *_variables properties.
revived_types.register_revived_type(
'acme_snapshot',
lambda obj: isinstance(obj, Snapshot),
versions=[
revived_types.VersionedTypeRegistration(
object_factory=lambda proto: Snapshot(),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=setattr,
)
])
def make_snapshot(module: snt.Module):
"""Create a thin wrapper around a module to make it snapshottable."""
# Get the input signature as long as it has been created.
input_signature = _get_input_signature(module)
if input_signature is None:
raise ValueError(
('module instance "{}" has no input_signature attribute, '
'which is required for snapshotting; run '
'create_variables to add this annotation.').format(module.name))
# Wrap the module up in tf.function so we can process it properly.
@tf.function
def wrapped_module(*args, **kwargs):
return module(*args, **kwargs)
# pylint: disable=protected-access
snapshot = Snapshot()
snapshot._module = wrapped_module
snapshot._variables = module.variables
snapshot._trainable_variables = module.trainable_variables
# pylint: disable=protected-access
# Make sure the snapshot has the proper input signature.
snapshot.__call__.get_concrete_function(*input_signature)
# If we are an RNN also save the initial-state generating function.
if isinstance(module, snt.RNNCore):
snapshot.initial_state = tf.function(module.initial_state)
snapshot.initial_state.get_concrete_function(
tf.TensorSpec(shape=(), dtype=tf.int32))
return snapshot
def _get_input_signature(module: snt.Module) -> Optional[tf.TensorSpec]:
"""Get module input signature.
Works even if the module with signature is wrapper into snt.Sequentual or
snt.DeepRNN.
Args:
module: the module which input signature we need to get. The module has to
either have input_signature itself (i.e. you have to run create_variables
on the module), or it has to be a module (with input_signature) wrapped in
(one or multiple) snt.Sequential or snt.DeepRNNs.
Returns:
Input signature of the module or None if it's not available.
"""
if hasattr(module, '_input_signature'):
return module._input_signature # pylint: disable=protected-access
if isinstance(module, snt.Sequential):
first_layer = module._layers[0] # pylint: disable=protected-access
return _get_input_signature(first_layer)
if isinstance(module, snt.DeepRNN):
first_layer = module._layers[0] # pylint: disable=protected-access
input_signature = _get_input_signature(first_layer)
# Wrapping a module in DeepRNN changes its state shape, so we need to bring
# it up to date.
state = module.initial_state(1)
input_signature[-1] = tree.map_structure(
lambda t: tf.TensorSpec((None,) + t.shape[1:], t.dtype), state)
return input_signature
return None
class SaveableAdapter(tf.train.experimental.PythonState):
"""Adapter which allows `Saveable` object to be checkpointed by TensorFlow."""
def __init__(self, object_to_save: core.Saveable):
self._object_to_save = object_to_save
def serialize(self):
state = self._object_to_save.save()
return pickle.dumps(state)
def deserialize(self, pickled: bytes):
state = pickle.loads(pickled)
self._object_to_save.restore(state)
|
acme-master
|
acme/tf/savers.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.tf.utils."""
from typing import Sequence, Tuple
from acme import specs
from acme.tf import utils as tf2_utils
import numpy as np
import sonnet as snt
import tensorflow as tf
from absl.testing import absltest
from absl.testing import parameterized
class PolicyValueHead(snt.Module):
"""A network with two linear layers, for policy and value respectively."""
def __init__(self, num_actions: int):
super().__init__(name='policy_value_network')
self._policy_layer = snt.Linear(num_actions)
self._value_layer = snt.Linear(1)
def __call__(self, inputs: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Returns a (Logits, Value) tuple."""
logits = self._policy_layer(inputs) # [B, A]
value = tf.squeeze(self._value_layer(inputs), axis=-1) # [B]
return logits, value
class CreateVariableTest(parameterized.TestCase):
"""Tests for tf2_utils.create_variables method."""
@parameterized.parameters([True, False])
def test_feedforward(self, recurrent: bool):
model = snt.Linear(42)
if recurrent:
model = snt.DeepRNN([model])
input_spec = specs.Array(shape=(10,), dtype=np.float32)
tf2_utils.create_variables(model, [input_spec])
variables: Sequence[tf.Variable] = model.variables
shapes = [v.shape.as_list() for v in variables]
self.assertSequenceEqual(shapes, [[42], [10, 42]])
@parameterized.parameters([True, False])
def test_output_spec_feedforward(self, recurrent: bool):
input_spec = specs.Array(shape=(10,), dtype=np.float32)
model = snt.Linear(42)
expected_spec = tf.TensorSpec(shape=(42,), dtype=tf.float32)
if recurrent:
model = snt.DeepRNN([model])
expected_spec = (expected_spec, ())
output_spec = tf2_utils.create_variables(model, [input_spec])
self.assertEqual(output_spec, expected_spec)
def test_multiple_outputs(self):
model = PolicyValueHead(42)
input_spec = specs.Array(shape=(10,), dtype=np.float32)
expected_spec = (tf.TensorSpec(shape=(42,), dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.float32))
output_spec = tf2_utils.create_variables(model, [input_spec])
variables: Sequence[tf.Variable] = model.variables
shapes = [v.shape.as_list() for v in variables]
self.assertSequenceEqual(shapes, [[42], [10, 42], [1], [10, 1]])
self.assertSequenceEqual(output_spec, expected_spec)
def test_scalar_output(self):
model = tf2_utils.to_sonnet_module(tf.reduce_sum)
input_spec = specs.Array(shape=(10,), dtype=np.float32)
expected_spec = tf.TensorSpec(shape=(), dtype=tf.float32)
output_spec = tf2_utils.create_variables(model, [input_spec])
self.assertEqual(model.variables, ())
self.assertEqual(output_spec, expected_spec)
def test_none_output(self):
model = tf2_utils.to_sonnet_module(lambda x: None)
input_spec = specs.Array(shape=(10,), dtype=np.float32)
expected_spec = None
output_spec = tf2_utils.create_variables(model, [input_spec])
self.assertEqual(model.variables, ())
self.assertEqual(output_spec, expected_spec)
def test_multiple_inputs_and_outputs(self):
def transformation(aa, bb, cc):
return (tf.concat([aa, bb, cc], axis=-1),
tf.concat([bb, cc], axis=-1))
model = tf2_utils.to_sonnet_module(transformation)
dtype = np.float32
input_spec = [specs.Array(shape=(2,), dtype=dtype),
specs.Array(shape=(3,), dtype=dtype),
specs.Array(shape=(4,), dtype=dtype)]
expected_output_spec = (tf.TensorSpec(shape=(9,), dtype=dtype),
tf.TensorSpec(shape=(7,), dtype=dtype))
output_spec = tf2_utils.create_variables(model, input_spec)
self.assertEqual(model.variables, ())
self.assertEqual(output_spec, expected_output_spec)
class Tf2UtilsTest(parameterized.TestCase):
"""Tests for tf2_utils methods."""
def test_batch_concat(self):
batch_size = 32
inputs = [
tf.zeros(shape=(batch_size, 2)),
{
'foo': tf.zeros(shape=(batch_size, 5, 3))
},
[tf.zeros(shape=(batch_size, 1))],
]
output_shape = tf2_utils.batch_concat(inputs).shape.as_list()
expected_shape = [batch_size, 2 + 5 * 3 + 1]
self.assertSequenceEqual(output_shape, expected_shape)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/tf/utils_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TF2 savers."""
import os
import re
import time
from unittest import mock
from acme import specs
from acme.testing import test_utils
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import paths
import launchpad
import numpy as np
import sonnet as snt
import tensorflow as tf
import tree
from absl.testing import absltest
class DummySaveable(tf2_savers.TFSaveable):
_state: tf.Variable
def __init__(self):
self._state = tf.Variable(0, dtype=tf.int32)
@property
def state(self):
return {'state': self._state}
class CheckpointerTest(test_utils.TestCase):
def test_save_and_restore(self):
"""Test that checkpointer correctly calls save and restore."""
x = tf.Variable(0, dtype=tf.int32)
directory = self.get_tempdir()
checkpointer = tf2_savers.Checkpointer(
objects_to_save={'x': x}, time_delta_minutes=0., directory=directory)
for _ in range(10):
saved = checkpointer.save()
self.assertTrue(saved)
x.assign_add(1)
checkpointer.restore()
np.testing.assert_array_equal(x.numpy(), np.int32(0))
def test_save_and_new_restore(self):
"""Tests that a fresh checkpointer correctly restores an existing ckpt."""
with mock.patch.object(paths, 'get_unique_id') as mock_unique_id:
mock_unique_id.return_value = ('test',)
x = tf.Variable(0, dtype=tf.int32)
directory = self.get_tempdir()
checkpointer1 = tf2_savers.Checkpointer(
objects_to_save={'x': x}, time_delta_minutes=0., directory=directory)
checkpointer1.save()
x.assign_add(1)
# Simulate a preemption: x is changed, and we make a new Checkpointer.
checkpointer2 = tf2_savers.Checkpointer(
objects_to_save={'x': x}, time_delta_minutes=0., directory=directory)
checkpointer2.restore()
np.testing.assert_array_equal(x.numpy(), np.int32(0))
def test_save_and_restore_time_based(self):
"""Test that checkpointer correctly calls save and restore based on time."""
x = tf.Variable(0, dtype=tf.int32)
directory = self.get_tempdir()
checkpointer = tf2_savers.Checkpointer(
objects_to_save={'x': x}, time_delta_minutes=1., directory=directory)
with mock.patch.object(time, 'time') as mock_time:
mock_time.return_value = 0.
self.assertFalse(checkpointer.save())
mock_time.return_value = 40.
self.assertFalse(checkpointer.save())
mock_time.return_value = 70.
self.assertTrue(checkpointer.save())
x.assign_add(1)
checkpointer.restore()
np.testing.assert_array_equal(x.numpy(), np.int32(0))
def test_no_checkpoint(self):
"""Test that checkpointer does nothing when checkpoint=False."""
num_steps = tf.Variable(0)
checkpointer = tf2_savers.Checkpointer(
objects_to_save={'num_steps': num_steps}, enable_checkpointing=False)
for _ in range(10):
self.assertFalse(checkpointer.save())
self.assertIsNone(checkpointer._checkpoint_manager)
def test_tf_saveable(self):
x = DummySaveable()
directory = self.get_tempdir()
checkpoint_runner = tf2_savers.CheckpointingRunner(
x, time_delta_minutes=0, directory=directory)
checkpoint_runner._checkpointer.save()
x._state.assign_add(1)
checkpoint_runner._checkpointer.restore()
np.testing.assert_array_equal(x._state.numpy(), np.int32(0))
class CheckpointingRunnerTest(test_utils.TestCase):
def test_signal_handling(self):
x = DummySaveable()
# Increment the value of DummySavable.
x.state['state'].assign_add(1)
directory = self.get_tempdir()
# Patch signals.add_handler so the registered signal handler sets the event.
with mock.patch.object(
launchpad, 'register_stop_handler') as mock_register_stop_handler:
def add_handler(fn):
fn()
mock_register_stop_handler.side_effect = add_handler
runner = tf2_savers.CheckpointingRunner(
wrapped=x,
time_delta_minutes=0,
directory=directory)
with self.assertRaises(SystemExit):
runner.run()
# Recreate DummySavable(), its tf.Variable is initialized to 0.
x = DummySaveable()
# Recreate the CheckpointingRunner, which will restore DummySavable() to 1.
tf2_savers.CheckpointingRunner(
wrapped=x,
time_delta_minutes=0,
directory=directory)
# Check DummyVariable() was restored properly.
np.testing.assert_array_equal(x.state['state'].numpy(), np.int32(1))
def test_checkpoint_dir(self):
directory = self.get_tempdir()
ckpt_runner = tf2_savers.CheckpointingRunner(
wrapped=DummySaveable(),
time_delta_minutes=0,
directory=directory)
expected_dir_re = f'{directory}/[a-z0-9-]*/checkpoints/default'
regexp = re.compile(expected_dir_re)
self.assertIsNotNone(regexp.fullmatch(ckpt_runner.get_directory()))
class SnapshotterTest(test_utils.TestCase):
def test_snapshot(self):
"""Test that snapshotter correctly calls saves/restores snapshots."""
# Create a test network.
net1 = networks.LayerNormMLP([10, 10])
spec = specs.Array([10], dtype=np.float32)
tf2_utils.create_variables(net1, [spec])
# Save the test network.
directory = self.get_tempdir()
objects_to_save = {'net': net1}
snapshotter = tf2_savers.Snapshotter(objects_to_save, directory=directory)
snapshotter.save()
# Reload the test network.
net2 = tf.saved_model.load(os.path.join(snapshotter.directory, 'net'))
inputs = tf2_utils.add_batch_dim(tf2_utils.zeros_like(spec))
with tf.GradientTape() as tape:
outputs1 = net1(inputs)
loss1 = tf.math.reduce_sum(outputs1)
grads1 = tape.gradient(loss1, net1.trainable_variables)
with tf.GradientTape() as tape:
outputs2 = net2(inputs)
loss2 = tf.math.reduce_sum(outputs2)
grads2 = tape.gradient(loss2, net2.trainable_variables)
assert np.allclose(outputs1, outputs2)
assert all(tree.map_structure(np.allclose, list(grads1), list(grads2)))
def test_snapshot_distribution(self):
"""Test that snapshotter correctly calls saves/restores snapshots."""
# Create a test network.
net1 = snt.Sequential([
networks.LayerNormMLP([10, 10]),
networks.MultivariateNormalDiagHead(1)
])
spec = specs.Array([10], dtype=np.float32)
tf2_utils.create_variables(net1, [spec])
# Save the test network.
directory = self.get_tempdir()
objects_to_save = {'net': net1}
snapshotter = tf2_savers.Snapshotter(objects_to_save, directory=directory)
snapshotter.save()
# Reload the test network.
net2 = tf.saved_model.load(os.path.join(snapshotter.directory, 'net'))
inputs = tf2_utils.add_batch_dim(tf2_utils.zeros_like(spec))
with tf.GradientTape() as tape:
dist1 = net1(inputs)
loss1 = tf.math.reduce_sum(dist1.mean() + dist1.variance())
grads1 = tape.gradient(loss1, net1.trainable_variables)
with tf.GradientTape() as tape:
dist2 = net2(inputs)
loss2 = tf.math.reduce_sum(dist2.mean() + dist2.variance())
grads2 = tape.gradient(loss2, net2.trainable_variables)
assert all(tree.map_structure(np.allclose, list(grads1), list(grads2)))
def test_force_snapshot(self):
"""Test that the force feature in Snapshotter.save() works correctly."""
# Create a test network.
net = snt.Linear(10)
spec = specs.Array([10], dtype=np.float32)
tf2_utils.create_variables(net, [spec])
# Save the test network.
directory = self.get_tempdir()
objects_to_save = {'net': net}
# Very long time_delta_minutes.
snapshotter = tf2_savers.Snapshotter(objects_to_save, directory=directory,
time_delta_minutes=1000)
self.assertTrue(snapshotter.save(force=False))
# Due to the long time_delta_minutes, only force=True will create a new
# snapshot. This also checks the default is force=False.
self.assertFalse(snapshotter.save())
self.assertTrue(snapshotter.save(force=True))
def test_rnn_snapshot(self):
"""Test that snapshotter correctly calls saves/restores snapshots on RNNs."""
# Create a test network.
net = snt.LSTM(10)
spec = specs.Array([10], dtype=np.float32)
tf2_utils.create_variables(net, [spec])
# Test that if you add some postprocessing without rerunning
# create_variables, it still works.
wrapped_net = snt.DeepRNN([net, lambda x: x])
for net1 in [net, wrapped_net]:
# Save the test network.
directory = self.get_tempdir()
objects_to_save = {'net': net1}
snapshotter = tf2_savers.Snapshotter(objects_to_save, directory=directory)
snapshotter.save()
# Reload the test network.
net2 = tf.saved_model.load(os.path.join(snapshotter.directory, 'net'))
inputs = tf2_utils.add_batch_dim(tf2_utils.zeros_like(spec))
with tf.GradientTape() as tape:
outputs1, next_state1 = net1(inputs, net1.initial_state(1))
loss1 = tf.math.reduce_sum(outputs1)
grads1 = tape.gradient(loss1, net1.trainable_variables)
with tf.GradientTape() as tape:
outputs2, next_state2 = net2(inputs, net2.initial_state(1))
loss2 = tf.math.reduce_sum(outputs2)
grads2 = tape.gradient(loss2, net2.trainable_variables)
assert np.allclose(outputs1, outputs2)
assert np.allclose(tree.flatten(next_state1), tree.flatten(next_state2))
assert all(tree.map_structure(np.allclose, list(grads1), list(grads2)))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/tf/savers_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
acme-master
|
acme/tf/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for nested data structures involving NumPy and TensorFlow 2.x."""
import functools
from typing import List, Optional, Union
from acme import types
from acme.utils import tree_utils
import sonnet as snt
import tensorflow as tf
import tree
def add_batch_dim(nest: types.NestedTensor) -> types.NestedTensor:
"""Adds a batch dimension to each leaf of a nested structure of Tensors."""
return tree.map_structure(lambda x: tf.expand_dims(x, axis=0), nest)
def squeeze_batch_dim(nest: types.NestedTensor) -> types.NestedTensor:
"""Squeezes out a batch dimension from each leaf of a nested structure."""
return tree.map_structure(lambda x: tf.squeeze(x, axis=0), nest)
def batch_concat(inputs: types.NestedTensor) -> tf.Tensor:
"""Concatenate a collection of Tensors while preserving the batch dimension.
This takes a potentially nested collection of tensors, flattens everything
but the batch (first) dimension, and concatenates along the resulting data
(second) dimension.
Args:
inputs: a tensor or nested collection of tensors.
Returns:
A concatenated tensor which maintains the batch dimension but concatenates
all other data along the flattened second dimension.
"""
flat_leaves = tree.map_structure(snt.Flatten(), inputs)
return tf.concat(tree.flatten(flat_leaves), axis=-1)
def batch_to_sequence(data: types.NestedTensor) -> types.NestedTensor:
"""Converts data between sequence-major and batch-major format."""
return tree.map_structure(
lambda t: tf.transpose(t, [1, 0] + list(range(2, t.shape.rank))), data)
def tile_tensor(tensor: tf.Tensor, multiple: int) -> tf.Tensor:
"""Tiles `multiple` copies of `tensor` along a new leading axis."""
rank = len(tensor.shape)
multiples = tf.constant([multiple] + [1] * rank, dtype=tf.int32)
expanded_tensor = tf.expand_dims(tensor, axis=0)
return tf.tile(expanded_tensor, multiples)
def tile_nested(inputs: types.NestedTensor,
multiple: int) -> types.NestedTensor:
"""Tiles tensors in a nested structure along a new leading axis."""
tile = functools.partial(tile_tensor, multiple=multiple)
return tree.map_structure(tile, inputs)
def create_variables(
network: snt.Module,
input_spec: List[Union[types.NestedSpec, tf.TensorSpec]],
) -> Optional[tf.TensorSpec]:
"""Builds the network with dummy inputs to create the necessary variables.
Args:
network: Sonnet Module whose variables are to be created.
input_spec: list of input specs to the network. The length of this list
should match the number of arguments expected by `network`.
Returns:
output_spec: only returns an output spec if the output is a tf.Tensor, else
it doesn't return anything (None); e.g. if the output is a
tfp.distributions.Distribution.
"""
# Create a dummy observation with no batch dimension.
dummy_input = zeros_like(input_spec)
# If we have an RNNCore the hidden state will be an additional input.
if isinstance(network, snt.RNNCore):
initial_state = squeeze_batch_dim(network.initial_state(1))
dummy_input += [initial_state]
# Forward pass of the network which will create variables as a side effect.
dummy_output = network(*add_batch_dim(dummy_input))
# Evaluate the input signature by converting the dummy input into a
# TensorSpec. We then save the signature as a property of the network. This is
# done so that we can later use it when creating snapshots. We do this here
# because the snapshot code may not have access to the precise form of the
# inputs.
input_signature = tree.map_structure(
lambda t: tf.TensorSpec((None,) + t.shape, t.dtype), dummy_input)
network._input_signature = input_signature # pylint: disable=protected-access
def spec(output):
# If the output is not a Tensor, return None as spec is ill-defined.
if not isinstance(output, tf.Tensor):
return None
# If this is not a scalar Tensor, make sure to squeeze out the batch dim.
if tf.rank(output) > 0:
output = squeeze_batch_dim(output)
return tf.TensorSpec(output.shape, output.dtype)
return tree.map_structure(spec, dummy_output)
class TransformationWrapper(snt.Module):
"""Helper class for to_sonnet_module.
This wraps arbitrary Tensor-valued callables as a Sonnet module.
A use case for this is in agent code that could take either a trainable
sonnet module or a hard-coded function as its policy. By wrapping a hard-coded
policy with this class, the agent can then treat it as if it were a Sonnet
module. This removes the need for "if is_hard_coded:..." branches, which you'd
otherwise need if e.g. calling get_variables() on the policy.
"""
def __init__(self,
transformation: types.TensorValuedCallable,
name: Optional[str] = None):
super().__init__(name=name)
self._transformation = transformation
def __call__(self, *args, **kwargs):
return self._transformation(*args, **kwargs)
def to_sonnet_module(
transformation: types.TensorValuedCallable
) -> snt.Module:
"""Convert a tensor transformation to a Sonnet Module.
Args:
transformation: A Callable that takes one or more (nested) Tensors, and
returns one or more (nested) Tensors.
Returns:
A Sonnet Module that wraps the transformation.
"""
if isinstance(transformation, snt.Module):
return transformation
module = TransformationWrapper(transformation)
# Wrap the module to allow it to return an empty variable tuple.
return snt.allow_empty_variables(module)
def to_numpy(nest: types.NestedTensor) -> types.NestedArray:
"""Converts a nest of Tensors to a nest of numpy arrays."""
return tree.map_structure(lambda x: x.numpy(), nest)
def to_numpy_squeeze(nest: types.NestedTensor, axis=0) -> types.NestedArray:
"""Converts a nest of Tensors to a nest of numpy arrays and squeeze axis."""
return tree.map_structure(lambda x: tf.squeeze(x, axis=axis).numpy(), nest)
def zeros_like(nest: types.Nest) -> types.NestedTensor:
"""Given a nest of array-like objects, returns similarly nested tf.zeros."""
return tree.map_structure(lambda x: tf.zeros(x.shape, x.dtype), nest)
# TODO(b/160311329): Migrate call-sites and remove.
stack_sequence_fields = tree_utils.stack_sequence_fields
|
acme-master
|
acme/tf/utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.tf.variable_utils."""
import threading
from acme.testing import fakes
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
import sonnet as snt
import tensorflow as tf
_MLP_LAYERS = [50, 30]
_INPUT_SIZE = 28
_BATCH_SIZE = 8
_UPDATE_PERIOD = 2
class VariableClientTest(tf.test.TestCase):
def setUp(self):
super().setUp()
# Create two instances of the same model.
self._actor_model = snt.nets.MLP(_MLP_LAYERS)
self._learner_model = snt.nets.MLP(_MLP_LAYERS)
# Create variables first.
input_spec = tf.TensorSpec(shape=(_INPUT_SIZE,), dtype=tf.float32)
tf2_utils.create_variables(self._actor_model, [input_spec])
tf2_utils.create_variables(self._learner_model, [input_spec])
def test_update_and_wait(self):
# Create a variable source (emulating the learner).
np_learner_variables = tf2_utils.to_numpy(self._learner_model.variables)
variable_source = fakes.VariableSource(np_learner_variables)
# Create a variable client (emulating the actor).
variable_client = tf2_variable_utils.VariableClient(
variable_source, {'policy': self._actor_model.variables})
# Create some random batch of test input:
x = tf.random.normal(shape=(_BATCH_SIZE, _INPUT_SIZE))
# Before copying variables, the models have different outputs.
self.assertNotAllClose(self._actor_model(x), self._learner_model(x))
# Update the variable client.
variable_client.update_and_wait()
# After copying variables (by updating the client), the models are the same.
self.assertAllClose(self._actor_model(x), self._learner_model(x))
def test_update(self):
# Create a barrier to be shared between the test body and the variable
# source. The barrier will block until, in this case, two threads call
# wait(). Note that the (fake) variable source will call it within its
# get_variables() call.
barrier = threading.Barrier(2)
# Create a variable source (emulating the learner).
np_learner_variables = tf2_utils.to_numpy(self._learner_model.variables)
variable_source = fakes.VariableSource(np_learner_variables, barrier)
# Create a variable client (emulating the actor).
variable_client = tf2_variable_utils.VariableClient(
variable_source, {'policy': self._actor_model.variables},
update_period=_UPDATE_PERIOD)
# Create some random batch of test input:
x = tf.random.normal(shape=(_BATCH_SIZE, _INPUT_SIZE))
# Create variables by doing the computation once.
learner_output = self._learner_model(x)
actor_output = self._actor_model(x)
del learner_output, actor_output
for _ in range(_UPDATE_PERIOD):
# Before the update period is reached, the models have different outputs.
self.assertNotAllClose(self._actor_model.variables,
self._learner_model.variables)
# Before the update period is reached, the variable client should not make
# any requests for variables.
self.assertIsNone(variable_client._future)
variable_client.update()
# Make sure the last call created a request for variables and reset the
# internal call counter.
self.assertIsNotNone(variable_client._future)
self.assertEqual(variable_client._call_counter, 0)
future = variable_client._future
for _ in range(_UPDATE_PERIOD):
# Before the barrier allows the variables to be released, the models have
# different outputs.
self.assertNotAllClose(self._actor_model.variables,
self._learner_model.variables)
variable_client.update()
# Make sure no new requests are made.
self.assertEqual(variable_client._future, future)
# Calling wait() on the barrier will now allow the variables to be copied
# over from source to client.
barrier.wait()
# Update once more to ensure the variables are copied over.
while variable_client._future is not None:
variable_client.update()
# After a number of update calls, the variables should be the same.
self.assertAllClose(self._actor_model.variables,
self._learner_model.variables)
if __name__ == '__main__':
tf.test.main()
|
acme-master
|
acme/tf/variable_utils_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Variable handling utilities for TensorFlow 2."""
from concurrent import futures
from typing import Mapping, Optional, Sequence
from acme import core
import tensorflow as tf
import tree
class VariableClient:
"""A variable client for updating variables from a remote source."""
def __init__(self,
client: core.VariableSource,
variables: Mapping[str, Sequence[tf.Variable]],
update_period: int = 1):
self._keys = list(variables.keys())
self._variables = tree.flatten(list(variables.values()))
self._call_counter = 0
self._update_period = update_period
self._client = client
self._request = lambda: client.get_variables(self._keys)
# Create a single background thread to fetch variables without necessarily
# blocking the actor.
self._executor = futures.ThreadPoolExecutor(max_workers=1)
self._async_request = lambda: self._executor.submit(self._request)
# Initialize this client's future to None to indicate to the `update()`
# method that there is no pending/running request.
self._future: Optional[futures.Future] = None
def update(self, wait: bool = False):
"""Periodically updates the variables with the latest copy from the source.
This stateful update method keeps track of the number of calls to it and,
every `update_period` call, sends a request to its server to retrieve the
latest variables.
If wait is True, a blocking request is executed. Any active request will be
cancelled.
If wait is False, this method makes an asynchronous request for variables
and returns. Unless the request is immediately fulfilled, the variables are
only copied _within a subsequent call to_ `update()`, whenever the request
is fulfilled by the `VariableSource`. If there is an existing fulfilled
request when this method is called, the resulting variables are immediately
copied.
Args:
wait: if True, executes blocking update.
"""
# Track the number of calls (we only update periodically).
if self._call_counter < self._update_period:
self._call_counter += 1
period_reached: bool = self._call_counter >= self._update_period
if period_reached and wait:
# Cancel any active request.
self._future: Optional[futures.Future] = None
self.update_and_wait()
self._call_counter = 0
return
if period_reached and self._future is None:
# The update period has been reached and no request has been sent yet, so
# making an asynchronous request now.
self._future = self._async_request()
self._call_counter = 0
if self._future is not None and self._future.done():
# The active request is done so copy the result and remove the future.
self._copy(self._future.result())
self._future: Optional[futures.Future] = None
else:
# There is either a pending/running request or we're between update
# periods, so just carry on.
return
def update_and_wait(self):
"""Immediately update and block until we get the result."""
self._copy(self._request())
def _copy(self, new_variables: Sequence[Sequence[tf.Variable]]):
"""Copies the new variables to the old ones."""
new_variables = tree.flatten(new_variables)
if len(self._variables) != len(new_variables):
raise ValueError('Length mismatch between old variables and new.')
for new, old in zip(new_variables, self._variables):
old.assign(new)
|
acme-master
|
acme/tf/variable_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses for quantile regression."""
from typing import NamedTuple
from .huber import huber
import sonnet as snt
import tensorflow as tf
class QuantileDistribution(NamedTuple):
values: tf.Tensor
logits: tf.Tensor
class NonUniformQuantileRegression(snt.Module):
"""Compute the quantile regression loss for the distributional TD error."""
def __init__(
self,
huber_param: float = 0.,
name: str = 'NUQuantileRegression'):
"""Initializes the module.
Args:
huber_param: The point where the huber loss function changes from a
quadratic to linear.
name: name to use for grouping operations.
"""
super().__init__(name=name)
self._huber_param = huber_param
def __call__(
self,
q_tm1: QuantileDistribution,
r_t: tf.Tensor,
pcont_t: tf.Tensor,
q_t: QuantileDistribution,
tau: tf.Tensor,
) -> tf.Tensor:
"""Calculates the loss.
Note that this is only defined for discrete quantile-valued distributions.
In particular we assume that the distributions define q.logits and
q.values.
Args:
q_tm1: the distribution at time t-1.
r_t: the reward at time t.
pcont_t: the discount factor at time t.
q_t: the target distribution.
tau: the quantile regression targets.
Returns:
Value of the loss.
"""
# Distributional Bellman update
values_t = (tf.reshape(r_t, (-1, 1)) +
tf.reshape(pcont_t, (-1, 1)) * q_t.values)
values_t = tf.stop_gradient(values_t)
probs_t = tf.nn.softmax(q_t.logits)
# Quantile regression loss
# Tau gives the quantile regression targets, where in the sample
# space [0, 1] each output should train towards
# Tau applies along the second dimension in delta (below)
tau = tf.expand_dims(tau, -1)
# quantile td-error and assymmetric weighting
delta = values_t[:, None, :] - q_tm1.values[:, :, None]
delta_neg = tf.cast(delta < 0., dtype=tf.float32)
# This stop_gradient is very important, do not remove
weight = tf.stop_gradient(tf.abs(tau - delta_neg))
# loss
loss = huber(delta, self._huber_param) * weight
loss = tf.reduce_sum(loss * probs_t[:, None, :], 2)
# Have not been able to get quite as good performance with mean vs. sum
loss = tf.reduce_sum(loss, -1)
return loss
|
acme-master
|
acme/tf/losses/quantile.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the MPO losses.
The MPO loss is implemented as a Sonnet module rather than a function so that it
can hold its own dual variables, as instances of `tf.Variable`, which it creates
the first time the module is called.
Tensor shapes are annotated, where helpful, as follow:
B: batch size,
N: number of sampled actions, see MPO paper for more details,
D: dimensionality of the action space.
"""
from typing import Dict, Tuple, Union
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
_MPO_FLOAT_EPSILON = 1e-8
class MPO(snt.Module):
"""MPO loss with decoupled KL constraints as in (Abdolmaleki et al., 2018).
This implementation of the MPO loss includes the following features, as
options:
- Satisfying the KL-constraint on a per-dimension basis (on by default);
- Penalizing actions that fall outside of [-1, 1] (on by default) as a
special case of multi-objective MPO (MO-MPO; Abdolmaleki et al., 2020).
For best results on the control suite, keep both of these on.
(Abdolmaleki et al., 2018): https://arxiv.org/pdf/1812.02256.pdf
(Abdolmaleki et al., 2020): https://arxiv.org/pdf/2005.07513.pdf
"""
def __init__(self,
epsilon: float,
epsilon_mean: float,
epsilon_stddev: float,
init_log_temperature: float,
init_log_alpha_mean: float,
init_log_alpha_stddev: float,
per_dim_constraining: bool = True,
action_penalization: bool = True,
epsilon_penalty: float = 0.001,
name: str = "MPO"):
"""Initialize and configure the MPO loss.
Args:
epsilon: KL constraint on the non-parametric auxiliary policy, the one
associated with the dual variable called temperature.
epsilon_mean: KL constraint on the mean of the Gaussian policy, the one
associated with the dual variable called alpha_mean.
epsilon_stddev: KL constraint on the stddev of the Gaussian policy, the
one associated with the dual variable called alpha_mean.
init_log_temperature: initial value for the temperature in log-space, note
a softplus (rather than an exp) will be used to transform this.
init_log_alpha_mean: initial value for the alpha_mean in log-space, note
a softplus (rather than an exp) will be used to transform this.
init_log_alpha_stddev: initial value for the alpha_stddev in log-space,
note a softplus (rather than an exp) will be used to transform this.
per_dim_constraining: whether to enforce the KL constraint on each
dimension independently; this is the default. Otherwise the overall KL
is constrained, which allows some dimensions to change more at the
expense of others staying put.
action_penalization: whether to use a KL constraint to penalize actions
via the MO-MPO algorithm.
epsilon_penalty: KL constraint on the probability of violating the action
constraint.
name: a name for the module, passed directly to snt.Module.
"""
super().__init__(name=name)
# MPO constrain thresholds.
self._epsilon = tf.constant(epsilon)
self._epsilon_mean = tf.constant(epsilon_mean)
self._epsilon_stddev = tf.constant(epsilon_stddev)
# Initial values for the constraints' dual variables.
self._init_log_temperature = init_log_temperature
self._init_log_alpha_mean = init_log_alpha_mean
self._init_log_alpha_stddev = init_log_alpha_stddev
# Whether to penalize out-of-bound actions via MO-MPO and its corresponding
# constraint threshold.
self._action_penalization = action_penalization
self._epsilon_penalty = tf.constant(epsilon_penalty)
# Whether to ensure per-dimension KL constraint satisfication.
self._per_dim_constraining = per_dim_constraining
@snt.once
def create_dual_variables_once(self, shape: tf.TensorShape, dtype: tf.DType):
"""Creates the dual variables the first time the loss module is called."""
# Create the dual variables.
self._log_temperature = tf.Variable(
initial_value=[self._init_log_temperature],
dtype=dtype,
name="log_temperature",
shape=(1,))
self._log_alpha_mean = tf.Variable(
initial_value=tf.fill(shape, self._init_log_alpha_mean),
dtype=dtype,
name="log_alpha_mean",
shape=shape)
self._log_alpha_stddev = tf.Variable(
initial_value=tf.fill(shape, self._init_log_alpha_stddev),
dtype=dtype,
name="log_alpha_stddev",
shape=shape)
# Cast constraint thresholds to the expected dtype.
self._epsilon = tf.cast(self._epsilon, dtype)
self._epsilon_mean = tf.cast(self._epsilon_mean, dtype)
self._epsilon_stddev = tf.cast(self._epsilon_stddev, dtype)
# Maybe create the action penalization dual variable.
if self._action_penalization:
self._epsilon_penalty = tf.cast(self._epsilon_penalty, dtype)
self._log_penalty_temperature = tf.Variable(
initial_value=[self._init_log_temperature],
dtype=dtype,
name="log_penalty_temperature",
shape=(1,))
def __call__(
self,
online_action_distribution: Union[tfd.MultivariateNormalDiag,
tfd.Independent],
target_action_distribution: Union[tfd.MultivariateNormalDiag,
tfd.Independent],
actions: tf.Tensor, # Shape [N, B, D].
q_values: tf.Tensor, # Shape [N, B].
) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]:
"""Computes the decoupled MPO loss.
Args:
online_action_distribution: online distribution returned by the online
policy network; expects batch_dims of [B] and event_dims of [D].
target_action_distribution: target distribution returned by the target
policy network; expects same shapes as online distribution.
actions: actions sampled from the target policy; expects shape [N, B, D].
q_values: Q-values associated with each action; expects shape [N, B].
Returns:
Loss, combining the policy loss, KL penalty, and dual losses required to
adapt the dual variables.
Stats, for diagnostics and tracking performance.
"""
# Cast `MultivariateNormalDiag`s to Independent Normals.
# The latter allows us to satisfy KL constraints per-dimension.
if isinstance(target_action_distribution, tfd.MultivariateNormalDiag):
target_action_distribution = tfd.Independent(
tfd.Normal(target_action_distribution.mean(),
target_action_distribution.stddev()))
online_action_distribution = tfd.Independent(
tfd.Normal(online_action_distribution.mean(),
online_action_distribution.stddev()))
# Infer the shape and dtype of dual variables.
scalar_dtype = q_values.dtype
if self._per_dim_constraining:
dual_variable_shape = target_action_distribution.distribution.kl_divergence(
online_action_distribution.distribution).shape[1:] # Should be [D].
else:
dual_variable_shape = target_action_distribution.kl_divergence(
online_action_distribution).shape[1:] # Should be [1].
# Create dual variables for the KL constraints; only happens the first call.
self.create_dual_variables_once(dual_variable_shape, scalar_dtype)
# Project dual variables to ensure they stay positive.
min_log_temperature = tf.constant(-18.0, scalar_dtype)
min_log_alpha = tf.constant(-18.0, scalar_dtype)
self._log_temperature.assign(
tf.maximum(min_log_temperature, self._log_temperature))
self._log_alpha_mean.assign(tf.maximum(min_log_alpha, self._log_alpha_mean))
self._log_alpha_stddev.assign(
tf.maximum(min_log_alpha, self._log_alpha_stddev))
# Transform dual variables from log-space.
# Note: using softplus instead of exponential for numerical stability.
temperature = tf.math.softplus(self._log_temperature) + _MPO_FLOAT_EPSILON
alpha_mean = tf.math.softplus(self._log_alpha_mean) + _MPO_FLOAT_EPSILON
alpha_stddev = tf.math.softplus(self._log_alpha_stddev) + _MPO_FLOAT_EPSILON
# Get online and target means and stddevs in preparation for decomposition.
online_mean = online_action_distribution.distribution.mean()
online_scale = online_action_distribution.distribution.stddev()
target_mean = target_action_distribution.distribution.mean()
target_scale = target_action_distribution.distribution.stddev()
# Compute normalized importance weights, used to compute expectations with
# respect to the non-parametric policy; and the temperature loss, used to
# adapt the tempering of Q-values.
normalized_weights, loss_temperature = compute_weights_and_temperature_loss(
q_values, self._epsilon, temperature)
# Only needed for diagnostics: Compute estimated actualized KL between the
# non-parametric and current target policies.
kl_nonparametric = compute_nonparametric_kl_from_normalized_weights(
normalized_weights)
if self._action_penalization:
# Project and transform action penalization temperature.
self._log_penalty_temperature.assign(
tf.maximum(min_log_temperature, self._log_penalty_temperature))
penalty_temperature = tf.math.softplus(
self._log_penalty_temperature) + _MPO_FLOAT_EPSILON
# Compute action penalization cost.
# Note: the cost is zero in [-1, 1] and quadratic beyond.
diff_out_of_bound = actions - tf.clip_by_value(actions, -1.0, 1.0)
cost_out_of_bound = -tf.norm(diff_out_of_bound, axis=-1)
penalty_normalized_weights, loss_penalty_temperature = compute_weights_and_temperature_loss(
cost_out_of_bound, self._epsilon_penalty, penalty_temperature)
# Only needed for diagnostics: Compute estimated actualized KL between the
# non-parametric and current target policies.
penalty_kl_nonparametric = compute_nonparametric_kl_from_normalized_weights(
penalty_normalized_weights)
# Combine normalized weights.
normalized_weights += penalty_normalized_weights
loss_temperature += loss_penalty_temperature
# Decompose the online policy into fixed-mean & fixed-stddev distributions.
# This has been documented as having better performance in bandit settings,
# see e.g. https://arxiv.org/pdf/1812.02256.pdf.
fixed_stddev_distribution = tfd.Independent(
tfd.Normal(loc=online_mean, scale=target_scale))
fixed_mean_distribution = tfd.Independent(
tfd.Normal(loc=target_mean, scale=online_scale))
# Compute the decomposed policy losses.
loss_policy_mean = compute_cross_entropy_loss(
actions, normalized_weights, fixed_stddev_distribution)
loss_policy_stddev = compute_cross_entropy_loss(
actions, normalized_weights, fixed_mean_distribution)
# Compute the decomposed KL between the target and online policies.
if self._per_dim_constraining:
kl_mean = target_action_distribution.distribution.kl_divergence(
fixed_stddev_distribution.distribution) # Shape [B, D].
kl_stddev = target_action_distribution.distribution.kl_divergence(
fixed_mean_distribution.distribution) # Shape [B, D].
else:
kl_mean = target_action_distribution.kl_divergence(
fixed_stddev_distribution) # Shape [B].
kl_stddev = target_action_distribution.kl_divergence(
fixed_mean_distribution) # Shape [B].
# Compute the alpha-weighted KL-penalty and dual losses to adapt the alphas.
loss_kl_mean, loss_alpha_mean = compute_parametric_kl_penalty_and_dual_loss(
kl_mean, alpha_mean, self._epsilon_mean)
loss_kl_stddev, loss_alpha_stddev = compute_parametric_kl_penalty_and_dual_loss(
kl_stddev, alpha_stddev, self._epsilon_stddev)
# Combine losses.
loss_policy = loss_policy_mean + loss_policy_stddev
loss_kl_penalty = loss_kl_mean + loss_kl_stddev
loss_dual = loss_alpha_mean + loss_alpha_stddev + loss_temperature
loss = loss_policy + loss_kl_penalty + loss_dual
stats = {}
# Dual Variables.
stats["dual_alpha_mean"] = tf.reduce_mean(alpha_mean)
stats["dual_alpha_stddev"] = tf.reduce_mean(alpha_stddev)
stats["dual_temperature"] = tf.reduce_mean(temperature)
# Losses.
stats["loss_policy"] = tf.reduce_mean(loss)
stats["loss_alpha"] = tf.reduce_mean(loss_alpha_mean + loss_alpha_stddev)
stats["loss_temperature"] = tf.reduce_mean(loss_temperature)
# KL measurements.
stats["kl_q_rel"] = tf.reduce_mean(kl_nonparametric) / self._epsilon
if self._action_penalization:
stats["penalty_kl_q_rel"] = tf.reduce_mean(
penalty_kl_nonparametric) / self._epsilon_penalty
stats["kl_mean_rel"] = tf.reduce_mean(kl_mean) / self._epsilon_mean
stats["kl_stddev_rel"] = tf.reduce_mean(kl_stddev) / self._epsilon_stddev
if self._per_dim_constraining:
# When KL is constrained per-dimension, we also log per-dimension min and
# max of mean/std of the realized KL costs.
stats["kl_mean_rel_min"] = tf.reduce_min(tf.reduce_mean(
kl_mean, axis=0)) / self._epsilon_mean
stats["kl_mean_rel_max"] = tf.reduce_max(tf.reduce_mean(
kl_mean, axis=0)) / self._epsilon_mean
stats["kl_stddev_rel_min"] = tf.reduce_min(
tf.reduce_mean(kl_stddev, axis=0)) / self._epsilon_stddev
stats["kl_stddev_rel_max"] = tf.reduce_max(
tf.reduce_mean(kl_stddev, axis=0)) / self._epsilon_stddev
# Q measurements.
stats["q_min"] = tf.reduce_mean(tf.reduce_min(q_values, axis=0))
stats["q_max"] = tf.reduce_mean(tf.reduce_max(q_values, axis=0))
# If the policy has standard deviation, log summary stats for this as well.
pi_stddev = online_action_distribution.distribution.stddev()
stats["pi_stddev_min"] = tf.reduce_mean(tf.reduce_min(pi_stddev, axis=-1))
stats["pi_stddev_max"] = tf.reduce_mean(tf.reduce_max(pi_stddev, axis=-1))
# Condition number of the diagonal covariance (actually, stddev) matrix.
stats["pi_stddev_cond"] = tf.reduce_mean(
tf.reduce_max(pi_stddev, axis=-1) / tf.reduce_min(pi_stddev, axis=-1))
return loss, stats
def compute_weights_and_temperature_loss(
q_values: tf.Tensor,
epsilon: float,
temperature: tf.Variable,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes normalized importance weights for the policy optimization.
Args:
q_values: Q-values associated with the actions sampled from the target
policy; expected shape [N, B].
epsilon: Desired constraint on the KL between the target and non-parametric
policies.
temperature: Scalar used to temper the Q-values before computing normalized
importance weights from them. This is really the Lagrange dual variable
in the constrained optimization problem, the solution of which is the
non-parametric policy targeted by the policy loss.
Returns:
Normalized importance weights, used for policy optimization.
Temperature loss, used to adapt the temperature.
"""
# Temper the given Q-values using the current temperature.
tempered_q_values = tf.stop_gradient(q_values) / temperature
# Compute the normalized importance weights used to compute expectations with
# respect to the non-parametric policy.
normalized_weights = tf.nn.softmax(tempered_q_values, axis=0)
normalized_weights = tf.stop_gradient(normalized_weights)
# Compute the temperature loss (dual of the E-step optimization problem).
q_logsumexp = tf.reduce_logsumexp(tempered_q_values, axis=0)
log_num_actions = tf.math.log(tf.cast(q_values.shape[0], tf.float32))
loss_temperature = epsilon + tf.reduce_mean(q_logsumexp) - log_num_actions
loss_temperature = temperature * loss_temperature
return normalized_weights, loss_temperature
def compute_nonparametric_kl_from_normalized_weights(
normalized_weights: tf.Tensor) -> tf.Tensor:
"""Estimate the actualized KL between the non-parametric and target policies."""
# Compute integrand.
num_action_samples = tf.cast(normalized_weights.shape[0], tf.float32)
integrand = tf.math.log(num_action_samples * normalized_weights + 1e-8)
# Return the expectation with respect to the non-parametric policy.
return tf.reduce_sum(normalized_weights * integrand, axis=0)
def compute_cross_entropy_loss(
sampled_actions: tf.Tensor,
normalized_weights: tf.Tensor,
online_action_distribution: tfp.distributions.Distribution,
) -> tf.Tensor:
"""Compute cross-entropy online and the reweighted target policy.
Args:
sampled_actions: samples used in the Monte Carlo integration in the policy
loss. Expected shape is [N, B, ...], where N is the number of sampled
actions and B is the number of sampled states.
normalized_weights: target policy multiplied by the exponentiated Q values
and normalized; expected shape is [N, B].
online_action_distribution: policy to be optimized.
Returns:
loss_policy_gradient: the cross-entropy loss that, when differentiated,
produces the policy gradient.
"""
# Compute the M-step loss.
log_prob = online_action_distribution.log_prob(sampled_actions)
# Compute the weighted average log-prob using the normalized weights.
loss_policy_gradient = -tf.reduce_sum(log_prob * normalized_weights, axis=0)
# Return the mean loss over the batch of states.
return tf.reduce_mean(loss_policy_gradient, axis=0)
def compute_parametric_kl_penalty_and_dual_loss(
kl: tf.Tensor,
alpha: tf.Variable,
epsilon: float,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes the KL cost to be added to the Lagragian and its dual loss.
The KL cost is simply the alpha-weighted KL divergence and it is added as a
regularizer to the policy loss. The dual variable alpha itself has a loss that
can be minimized to adapt the strength of the regularizer to keep the KL
between consecutive updates at the desired target value of epsilon.
Args:
kl: KL divergence between the target and online policies.
alpha: Lagrange multipliers (dual variables) for the KL constraints.
epsilon: Desired value for the KL.
Returns:
loss_kl: alpha-weighted KL regularization to be added to the policy loss.
loss_alpha: The Lagrange dual loss minimized to adapt alpha.
"""
# Compute the mean KL over the batch.
mean_kl = tf.reduce_mean(kl, axis=0)
# Compute the regularization.
loss_kl = tf.reduce_sum(tf.stop_gradient(alpha) * mean_kl)
# Compute the dual loss.
loss_alpha = tf.reduce_sum(alpha * (epsilon - tf.stop_gradient(mean_kl)))
return loss_kl, loss_alpha
|
acme-master
|
acme/tf/losses/mpo.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various losses for training agent components (policies, critics, etc)."""
from acme.tf.losses.distributional import categorical
from acme.tf.losses.distributional import multiaxis_categorical
from acme.tf.losses.dpg import dpg
from acme.tf.losses.huber import huber
from acme.tf.losses.mompo import KLConstraint
from acme.tf.losses.mompo import MultiObjectiveMPO
from acme.tf.losses.mpo import MPO
from acme.tf.losses.r2d2 import transformed_n_step_loss
# Internal imports.
# pylint: disable=g-bad-import-order,g-import-not-at-top
from acme.tf.losses.quantile import NonUniformQuantileRegression
from acme.tf.losses.quantile import QuantileDistribution
|
acme-master
|
acme/tf/losses/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses for Deterministic Policy Gradients."""
from typing import Optional
import tensorflow as tf
def dpg(
q_max: tf.Tensor,
a_max: tf.Tensor,
tape: tf.GradientTape,
dqda_clipping: Optional[float] = None,
clip_norm: bool = False,
) -> tf.Tensor:
"""Deterministic policy gradient loss, similar to trfl.dpg."""
# Calculate the gradient dq/da.
dqda = tape.gradient([q_max], [a_max])[0]
if dqda is None:
raise ValueError('q_max needs to be a function of a_max.')
# Clipping the gradient dq/da.
if dqda_clipping is not None:
if dqda_clipping <= 0:
raise ValueError('dqda_clipping should be bigger than 0, {} found'.format(
dqda_clipping))
if clip_norm:
dqda = tf.clip_by_norm(dqda, dqda_clipping, axes=-1)
else:
dqda = tf.clip_by_value(dqda, -1. * dqda_clipping, dqda_clipping)
# Target_a ensures correct gradient calculated during backprop.
target_a = dqda + a_max
# Stop the gradient going through Q network when backprop.
target_a = tf.stop_gradient(target_a)
# Gradient only go through actor network.
loss = 0.5 * tf.reduce_sum(tf.square(target_a - a_max), axis=-1)
# This recovers the DPG because (letting w be the actor network weights):
# d(loss)/dw = 0.5 * (2 * (target_a - a_max) * d(target_a - a_max)/dw)
# = (target_a - a_max) * [d(target_a)/dw - d(a_max)/dw]
# = dq/da * [d(target_a)/dw - d(a_max)/dw] # by defn of target_a
# = dq/da * [0 - d(a_max)/dw] # by stop_gradient
# = - dq/da * da/dw
return loss
|
acme-master
|
acme/tf/losses/dpg.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses and projection operators relevant to distributional RL."""
from acme.tf import networks
import tensorflow as tf
def categorical(q_tm1: networks.DiscreteValuedDistribution, r_t: tf.Tensor,
d_t: tf.Tensor,
q_t: networks.DiscreteValuedDistribution) -> tf.Tensor:
"""Implements the Categorical Distributional TD(0)-learning loss."""
z_t = tf.reshape(r_t, (-1, 1)) + tf.reshape(d_t, (-1, 1)) * q_t.values
p_t = tf.nn.softmax(q_t.logits)
# Performs L2 projection.
target = tf.stop_gradient(l2_project(z_t, p_t, q_t.values))
# Calculates loss.
loss = tf.nn.softmax_cross_entropy_with_logits(
logits=q_tm1.logits, labels=target)
return loss
# Use an old version of the l2 projection which is probably slower on CPU
# but will run on GPUs.
def l2_project( # pylint: disable=invalid-name
Zp: tf.Tensor,
P: tf.Tensor,
Zq: tf.Tensor,
) -> tf.Tensor:
"""Project distribution (Zp, P) onto support Zq under the L2-metric over CDFs.
This projection works for any support Zq.
Let Kq be len(Zq) and Kp be len(Zp).
Args:
Zp: (batch_size, Kp) Support of distribution P
P: (batch_size, Kp) Probability values for P(Zp[i])
Zq: (Kp,) Support to project onto
Returns:
L2 projection of (Zp, P) onto Zq.
"""
# Asserts that Zq has no leading dimension of size 1.
if Zq.get_shape().ndims > 1:
Zq = tf.squeeze(Zq, axis=0)
# Extracts vmin and vmax and construct helper tensors from Zq.
vmin, vmax = Zq[0], Zq[-1]
d_pos = tf.concat([Zq, vmin[None]], 0)[1:]
d_neg = tf.concat([vmax[None], Zq], 0)[:-1]
# Clips Zp to be in new support range (vmin, vmax).
clipped_zp = tf.clip_by_value(Zp, vmin, vmax)[:, None, :]
clipped_zq = Zq[None, :, None]
# Gets the distance between atom values in support.
d_pos = (d_pos - Zq)[None, :, None] # Zq[i+1] - Zq[i]
d_neg = (Zq - d_neg)[None, :, None] # Zq[i] - Zq[i-1]
delta_qp = clipped_zp - clipped_zq # Zp[j] - Zq[i]
d_sign = tf.cast(delta_qp >= 0., dtype=P.dtype)
delta_hat = (d_sign * delta_qp / d_pos) - ((1. - d_sign) * delta_qp / d_neg)
P = P[:, None, :]
return tf.reduce_sum(tf.clip_by_value(1. - delta_hat, 0., 1.) * P, 2)
def multiaxis_categorical( # pylint: disable=invalid-name
q_tm1: networks.DiscreteValuedDistribution,
r_t: tf.Tensor,
d_t: tf.Tensor,
q_t: networks.DiscreteValuedDistribution) -> tf.Tensor:
"""Implements a multi-axis categorical distributional TD(0)-learning loss.
All arguments may have a leading batch axis, but q_tm1.logits, and one of
r_t or d_t *must* have a leading batch axis.
Args:
q_tm1: Previous timestep's value distribution.
r_t: Reward.
d_t: Discount.
q_t: Current timestep's value distribution.
Returns:
Cross-entropy Bellman loss between q_tm1 and q_t + r_t * d_t.
Shape: (B, *E), where
B is the batch size.
E is the broadcasted shape of r_t, d_t, and q_t.values[:-1].
"""
tf.assert_equal(tf.rank(r_t), tf.rank(d_t))
# Append a singleton axis corresponding to the axis that indexes the atoms in
# q_t.values.
r_t = r_t[..., None] # shape: (B, *R, 1)
d_t = d_t[..., None] # shape: (B, *D, 1)
z_t = r_t + d_t * q_t.values # shape: (B, *E, N)
p_t = tf.nn.softmax(q_t.logits)
# Performs L2 projection.
target = tf.stop_gradient(multiaxis_l2_project(z_t, p_t, q_t.values))
# Calculates loss.
loss = tf.nn.softmax_cross_entropy_with_logits(
logits=q_tm1.logits, labels=target)
return loss
# A modification of l2_project that allows multi-axis support arguments.
def multiaxis_l2_project( # pylint: disable=invalid-name
Zp: tf.Tensor,
P: tf.Tensor,
Zq: tf.Tensor,
) -> tf.Tensor:
"""Project distribution (Zp, P) onto support Zq under the L2-metric over CDFs.
Let source support Zp's shape be described as (B, *C, M), where:
B is the batch size.
C contains the sizes of any axes in between the first and last axes.
M is the number of atoms in the support.
Let destination support Zq's shape be described as (*D, N), where:
D contains the sizes of any axes before the last axis.
N is the number of atoms in the support.
Shapes C and D must have the same number of dimensions, and must be
broadcastable with each other.
Args:
Zp: Support of source distribution. Shape: (B, *C, M).
P: Probability values of source distribution p(Zp[i]). Shape: (B, *C, M).
Zq: Support to project P onto. Shape: (*D, N).
Returns:
The L2 projection of P from support Zp to support Zq.
Shape: (B, *E, N), where E is the broadcast-merged shape of C and D.
"""
tf.assert_equal(tf.shape(Zp), tf.shape(P))
# Shapes C, D, and E as defined in the docstring above.
shape_c = tf.shape(Zp)[1:-1] # drop the batch and atom axes
shape_d = tf.shape(Zq)[:-1] # drop the atom axis
shape_e = tf.broadcast_dynamic_shape(shape_c, shape_d)
# If Zq has fewer inner axes than the broadcasted output shape, insert some
# size-1 axes to broadcast.
ndim_c = tf.size(shape_c)
ndim_e = tf.size(shape_e)
Zp = tf.reshape(
Zp,
tf.concat([tf.shape(Zp)[:1], # B
tf.ones(tf.math.maximum(ndim_e - ndim_c, 0), dtype=tf.int32),
shape_c, # C
tf.shape(Zp)[-1:]], # M
axis=0))
P = tf.reshape(P, tf.shape(Zp))
# Broadcast Zp, P, and Zq's common axes to the same shape: E.
#
# Normally it'd be sufficient to ensure that these args have the same number
# of axes, then let the arithmetic operators broadcast as necessary. Instead,
# we need to explicitly broadcast them here, because there's a call to
# tf.clip_by_value(t, vmin, vmax) below, which doesn't allow t's dimensions
# to be expanded to match vmin and vmax.
# Shape: (B, *E, M)
Zp = tf.broadcast_to(
Zp,
tf.concat([tf.shape(Zp)[:1], # B
shape_e, # E
tf.shape(Zp)[-1:]], # M
axis=0))
# Shape: (B, *E, M)
P = tf.broadcast_to(P, tf.shape(Zp))
# Shape: (*E, N)
Zq = tf.broadcast_to(Zq, tf.concat([shape_e, tf.shape(Zq)[-1:]], axis=0))
# Extracts vmin and vmax and construct helper tensors from Zq.
# These have shape shape_q, except the last axis has size 1.
# Shape: (*E, 1)
vmin, vmax = Zq[..., :1], Zq[..., -1:]
# The distances between neighboring atom values in the target support.
# Shape: (*E, N)
d_pos = tf.roll(Zq, shift=-1, axis=-1) - Zq # d_pos[i] := Zq[i+1] - Zq[i]
d_neg = Zq - tf.roll(Zq, shift=1, axis=-1) # d_neg[i] := Zq[i] - Zq[i-1]
# Clips Zp to be in new support range (vmin, vmax).
# Shape: (B, *E, 1, M)
clipped_zp = tf.clip_by_value(Zp, vmin, vmax)[..., None, :]
# Shape: (1, *E, N, 1)
clipped_zq = Zq[None, ..., :, None]
# Shape: (B, *E, N, M)
delta_qp = clipped_zp - clipped_zq # Zp[j] - Zq[i]
# Shape: (B, *E, N, M)
d_sign = tf.cast(delta_qp >= 0., dtype=P.dtype)
# Insert singleton axes to d_pos and d_neg to maintain the same shape as
# clipped_zq.
# Shape: (1, *E, N, 1)
d_pos = d_pos[None, ..., :, None]
d_neg = d_neg[None, ..., :, None]
# Shape: (B, *E, N, M)
delta_hat = (d_sign * delta_qp / d_pos) - ((1. - d_sign) * delta_qp / d_neg)
# Shape: (B, *E, 1, M)
P = P[..., None, :]
# Shape: (B, *E, N)
return tf.reduce_sum(tf.clip_by_value(1. - delta_hat, 0., 1.) * P, axis=-1)
|
acme-master
|
acme/tf/losses/distributional.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.tf.losses.distributional."""
from acme.tf.losses import distributional
import numpy as np
from numpy import testing as npt
import tensorflow as tf
from absl.testing import absltest
from absl.testing import parameterized
def _reference_l2_project(src_support, src_probs, dst_support):
"""Multi-axis l2_project, implemented using single-axis l2_project.
This is for testing multiaxis_l2_project's consistency with l2_project,
when used with multi-axis support vs single-axis support.
Args:
src_support: Zp in l2_project.
src_probs: P in l2_project.
dst_support: Zq in l2_project.
Returns:
src_probs, projected onto dst_support.
"""
assert src_support.shape == src_probs.shape
# Remove the batch and value axes, and broadcast the rest to a common shape.
common_shape = np.broadcast(src_support[0, ..., 0],
dst_support[..., 0]).shape
# If src_* have fewer internal axes than len(common_shape), insert size-1
# axes.
while src_support.ndim-2 < len(common_shape):
src_support = src_support[:, None, ...]
src_probs = np.reshape(src_probs, src_support.shape)
# Broadcast args' non-batch, non-value axes to common_shape.
src_support = np.broadcast_to(
src_support,
src_support.shape[:1] + common_shape + src_support.shape[-1:])
src_probs = np.broadcast_to(src_probs, src_support.shape)
dst_support = np.broadcast_to(
dst_support,
common_shape + dst_support.shape[-1:])
output_shape = (src_support.shape[0],) + dst_support.shape
# Collapse all but the first (batch) and last (atom) axes.
src_support = src_support.reshape(
[src_support.shape[0], -1, src_support.shape[-1]])
src_probs = src_probs.reshape(
[src_probs.shape[0], -1, src_probs.shape[-1]])
# Collapse all but the last (atom) axes.
dst_support = dst_support.reshape([-1, dst_support.shape[-1]])
dst_probs = np.zeros(src_support.shape[:1] + dst_support.shape,
dtype=src_probs.dtype)
# iterate over all supports
for i in range(src_support.shape[1]):
s_support = tf.convert_to_tensor(src_support[:, i, :])
s_probs = tf.convert_to_tensor(src_probs[:, i, :])
d_support = tf.convert_to_tensor(dst_support[i, :])
d_probs = distributional.l2_project(s_support, s_probs, d_support)
dst_probs[:, i, :] = d_probs.numpy()
return dst_probs.reshape(output_shape)
class L2ProjectTest(parameterized.TestCase):
@parameterized.parameters(
[(2, 11), (11,)], # C = (), D = (), matching num_atoms (11 and 11)
[(2, 11), (5,)], # C = (), D = (), differing num_atoms (11 and 5).
[(2, 3, 11), (3, 5)], # C = (3,), D = (3,)
[(2, 1, 11), (3, 5)], # C = (1,), D = (3,)
[(2, 3, 11), (1, 5)], # (C = (3,), D = (1,)
[(2, 3, 4, 11), (3, 4, 5)], # C = (3, 4), D = (3, 4)
[(2, 3, 4, 11), (4, 5)], # C = (3, 4), D = (4,)
[(2, 4, 11), (3, 4, 5)], # C = (4,), D = (3, 4)
)
def test_multiaxis(self, src_shape, dst_shape):
"""Tests consistency between multi-axis and single-axis l2_project.
This calls l2_project on multi-axis supports, and checks that it gets
the same outcomes as many calls to single-axis supports.
Args:
src_shape: Shape of source support. Includes a leading batch axis.
dst_shape: Shape of destination support.
Does not include a leading batch axis.
"""
# src_shape includes a leading batch axis, whereas dst_shape does not.
# assert len(src_shape) >= (1 + len(dst_shape))
def make_support(shape, minimum):
"""Creates a ndarray of supports."""
values = np.linspace(start=minimum, stop=minimum+100, num=shape[-1])
offsets = np.arange(np.prod(shape[:-1]))
result = values[None, :] + offsets[:, None]
return result.reshape(shape)
src_support = make_support(src_shape, -1)
dst_support = make_support(dst_shape, -.75)
rng = np.random.RandomState(1)
src_probs = rng.uniform(low=1.0, high=2.0, size=src_shape)
src_probs /= src_probs.sum()
# Repeated calls to l2_project using single-axis supports.
expected_dst_probs = _reference_l2_project(src_support,
src_probs,
dst_support)
# A single call to l2_project, with multi-axis supports.
dst_probs = distributional.multiaxis_l2_project(
tf.convert_to_tensor(src_support),
tf.convert_to_tensor(src_probs),
tf.convert_to_tensor(dst_support)).numpy()
npt.assert_allclose(dst_probs, expected_dst_probs)
@parameterized.parameters(
# Same src and dst support shape, dst support is shifted by +.25
([[0., 1, 2, 3]],
[[0., 1, 0, 0]],
[.25, 1.25, 2.25, 3.25],
[[.25, .75, 0, 0]]),
# Similar to above, but with batched src.
([[0., 1, 2, 3],
[0., 1, 2, 3]],
[[0., 1, 0, 0],
[0., 0, 1, 0]],
[.25, 1.25, 2.25, 3.25],
[[.25, .75, 0, 0],
[0., .25, .75, 0]]),
# Similar to above, but src_probs has two 0.5's, instead of being one-hot.
([[0., 1, 2, 3]],
[[0., .5, .5, 0]],
[.25, 1.25, 2.25, 3.25],
0.5 * (np.array([[.25, .75, 0, 0]]) + np.array([[0., .25, .75, 0]]))),
# src and dst support have differing sizes
([[0., 1, 2, 3]],
[[0., 1, 0, 0]],
[0.00, 0.25, 0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 2.00, 2.25, 2.50],
[[0.00, 0.00, 0.00, 0.00, 1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]]),
)
def test_l2_projection(
self, src_support, src_probs, dst_support, expected_dst_probs):
dst_probs = distributional.multiaxis_l2_project(
tf.convert_to_tensor(src_support),
tf.convert_to_tensor(src_probs),
tf.convert_to_tensor(dst_support)).numpy()
npt.assert_allclose(dst_probs, expected_dst_probs)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/tf/losses/distributional_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Huber loss."""
import tensorflow as tf
def huber(inputs: tf.Tensor, quadratic_linear_boundary: float) -> tf.Tensor:
"""Calculates huber loss of `inputs`.
For each value x in `inputs`, the following is calculated:
```
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `quadratic_linear_boundary`.
Args:
inputs: Input Tensor to calculate the huber loss on.
quadratic_linear_boundary: The point where the huber loss function changes
from a quadratic to linear.
Returns:
`Tensor` of the same shape as `inputs`, containing values calculated
in the manner described above.
Raises:
ValueError: if quadratic_linear_boundary < 0.
"""
if quadratic_linear_boundary < 0:
raise ValueError("quadratic_linear_boundary must be >= 0.")
abs_x = tf.abs(inputs)
delta = tf.constant(quadratic_linear_boundary)
quad = tf.minimum(abs_x, delta)
# The following expression is the same in value as
# tf.maximum(abs_x - delta, 0), but importantly the gradient for the
# expression when abs_x == delta is 0 (for tf.maximum it would be 1). This
# is necessary to avoid doubling the gradient, since there is already a
# nonzero contribution to the gradient from the quadratic term.
lin = (abs_x - quad)
return 0.5 * quad**2 + delta * lin
|
acme-master
|
acme/tf/losses/huber.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the multi-objective MPO (MO-MPO) loss.
This loss was proposed in (Abdolmaleki, Huang et al., 2020).
The loss is implemented as a Sonnet module rather than a function so that it
can hold its own dual variables, as instances of `tf.Variable`, which it creates
the first time the module is called.
Tensor shapes are annotated, where helpful, as follow:
B: batch size,
N: number of sampled actions, see MO-MPO paper for more details,
D: dimensionality of the action space,
K: number of objectives.
(Abdolmaleki, Huang et al., 2020): https://arxiv.org/pdf/2005.07513.pdf
"""
import dataclasses
from typing import Dict, Sequence, Tuple, Union
from acme.tf.losses import mpo
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
_MPO_FLOAT_EPSILON = 1e-8
@dataclasses.dataclass
class KLConstraint:
"""Defines a per-objective policy improvement step constraint for MO-MPO."""
name: str
value: float
def __post_init__(self):
if self.value < 0:
raise ValueError("KL constraint epsilon must be non-negative.")
class MultiObjectiveMPO(snt.Module):
"""Multi-objective MPO loss with decoupled KL constraints.
This implementation of the MO-MPO loss is based on the approach proposed in
(Abdolmaleki, Huang et al., 2020). The following features are included as
options:
- Satisfying the KL-constraint on a per-dimension basis (on by default)
(Abdolmaleki, Huang et al., 2020): https://arxiv.org/pdf/2005.07513.pdf
"""
def __init__(self,
epsilons: Sequence[KLConstraint],
epsilon_mean: float,
epsilon_stddev: float,
init_log_temperature: float,
init_log_alpha_mean: float,
init_log_alpha_stddev: float,
per_dim_constraining: bool = True,
name: str = "MOMPO"):
"""Initialize and configure the MPO loss.
Args:
epsilons: per-objective KL constraints on the non-parametric auxiliary
policy, the one associated with the dual variables called temperature;
expected length K.
epsilon_mean: KL constraint on the mean of the Gaussian policy, the one
associated with the dual variable called alpha_mean.
epsilon_stddev: KL constraint on the stddev of the Gaussian policy, the
one associated with the dual variable called alpha_mean.
init_log_temperature: initial value for the temperature in log-space, note
a softplus (rather than an exp) will be used to transform this.
init_log_alpha_mean: initial value for the alpha_mean in log-space, note
a softplus (rather than an exp) will be used to transform this.
init_log_alpha_stddev: initial value for the alpha_stddev in log-space,
note a softplus (rather than an exp) will be used to transform this.
per_dim_constraining: whether to enforce the KL constraint on each
dimension independently; this is the default. Otherwise the overall KL
is constrained, which allows some dimensions to change more at the
expense of others staying put.
name: a name for the module, passed directly to snt.Module.
"""
super().__init__(name=name)
# MO-MPO constraint thresholds.
self._epsilons = tf.constant([x.value for x in epsilons])
self._epsilon_mean = tf.constant(epsilon_mean)
self._epsilon_stddev = tf.constant(epsilon_stddev)
# Initial values for the constraints' dual variables.
self._init_log_temperature = init_log_temperature
self._init_log_alpha_mean = init_log_alpha_mean
self._init_log_alpha_stddev = init_log_alpha_stddev
# Whether to ensure per-dimension KL constraint satisfication.
self._per_dim_constraining = per_dim_constraining
# Remember the number of objectives
self._num_objectives = len(epsilons) # K = number of objectives
self._objective_names = [x.name for x in epsilons]
# Make sure there are no duplicate objective names
if len(self._objective_names) != len(set(self._objective_names)):
raise ValueError("Duplicate objective names are not allowed.")
@property
def objective_names(self):
return self._objective_names
@snt.once
def create_dual_variables_once(self, shape: tf.TensorShape, dtype: tf.DType):
"""Creates the dual variables the first time the loss module is called."""
# Create the dual variables.
self._log_temperature = tf.Variable(
initial_value=[self._init_log_temperature] * self._num_objectives,
dtype=dtype,
name="log_temperature",
shape=(self._num_objectives,))
self._log_alpha_mean = tf.Variable(
initial_value=tf.fill(shape, self._init_log_alpha_mean),
dtype=dtype,
name="log_alpha_mean",
shape=shape)
self._log_alpha_stddev = tf.Variable(
initial_value=tf.fill(shape, self._init_log_alpha_stddev),
dtype=dtype,
name="log_alpha_stddev",
shape=shape)
# Cast constraint thresholds to the expected dtype.
self._epsilons = tf.cast(self._epsilons, dtype)
self._epsilon_mean = tf.cast(self._epsilon_mean, dtype)
self._epsilon_stddev = tf.cast(self._epsilon_stddev, dtype)
def __call__(
self,
online_action_distribution: Union[tfd.MultivariateNormalDiag,
tfd.Independent],
target_action_distribution: Union[tfd.MultivariateNormalDiag,
tfd.Independent],
actions: tf.Tensor, # Shape [N, B, D].
q_values: tf.Tensor, # Shape [N, B, K].
) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]:
"""Computes the decoupled MO-MPO loss.
Args:
online_action_distribution: online distribution returned by the online
policy network; expects batch_dims of [B] and event_dims of [D].
target_action_distribution: target distribution returned by the target
policy network; expects same shapes as online distribution.
actions: actions sampled from the target policy; expects shape [N, B, D].
q_values: Q-values associated with each action; expects shape [N, B, K].
Returns:
Loss, combining the policy loss, KL penalty, and dual losses required to
adapt the dual variables.
Stats, for diagnostics and tracking performance.
"""
# Make sure the Q-values are per-objective
q_values.get_shape().assert_has_rank(3)
if q_values.get_shape()[-1] != self._num_objectives:
raise ValueError("Q-values do not match expected number of objectives.")
# Cast `MultivariateNormalDiag`s to Independent Normals.
# The latter allows us to satisfy KL constraints per-dimension.
if isinstance(target_action_distribution, tfd.MultivariateNormalDiag):
target_action_distribution = tfd.Independent(
tfd.Normal(target_action_distribution.mean(),
target_action_distribution.stddev()))
online_action_distribution = tfd.Independent(
tfd.Normal(online_action_distribution.mean(),
online_action_distribution.stddev()))
# Infer the shape and dtype of dual variables.
scalar_dtype = q_values.dtype
if self._per_dim_constraining:
dual_variable_shape = target_action_distribution.distribution.kl_divergence(
online_action_distribution.distribution).shape[1:] # Should be [D].
else:
dual_variable_shape = target_action_distribution.kl_divergence(
online_action_distribution).shape[1:] # Should be [1].
# Create dual variables for the KL constraints; only happens the first call.
self.create_dual_variables_once(dual_variable_shape, scalar_dtype)
# Project dual variables to ensure they stay positive.
min_log_temperature = tf.constant(-18.0, scalar_dtype)
min_log_alpha = tf.constant(-18.0, scalar_dtype)
self._log_temperature.assign(
tf.maximum(min_log_temperature, self._log_temperature))
self._log_alpha_mean.assign(tf.maximum(min_log_alpha, self._log_alpha_mean))
self._log_alpha_stddev.assign(
tf.maximum(min_log_alpha, self._log_alpha_stddev))
# Transform dual variables from log-space.
# Note: using softplus instead of exponential for numerical stability.
temperature = tf.math.softplus(self._log_temperature) + _MPO_FLOAT_EPSILON
alpha_mean = tf.math.softplus(self._log_alpha_mean) + _MPO_FLOAT_EPSILON
alpha_stddev = tf.math.softplus(self._log_alpha_stddev) + _MPO_FLOAT_EPSILON
# Get online and target means and stddevs in preparation for decomposition.
online_mean = online_action_distribution.distribution.mean()
online_scale = online_action_distribution.distribution.stddev()
target_mean = target_action_distribution.distribution.mean()
target_scale = target_action_distribution.distribution.stddev()
# Compute normalized importance weights, used to compute expectations with
# respect to the non-parametric policy; and the temperature loss, used to
# adapt the tempering of Q-values.
normalized_weights, loss_temperature = compute_weights_and_temperature_loss(
q_values, self._epsilons, temperature) # Shapes [N, B, K] and [1, K].
normalized_weights_sum = tf.reduce_sum(normalized_weights, axis=-1)
loss_temperature_mean = tf.reduce_mean(loss_temperature)
# Only needed for diagnostics: Compute estimated actualized KL between the
# non-parametric and current target policies.
kl_nonparametric = mpo.compute_nonparametric_kl_from_normalized_weights(
normalized_weights)
# Decompose the online policy into fixed-mean & fixed-stddev distributions.
# This has been documented as having better performance in bandit settings,
# see e.g. https://arxiv.org/pdf/1812.02256.pdf.
fixed_stddev_distribution = tfd.Independent(
tfd.Normal(loc=online_mean, scale=target_scale))
fixed_mean_distribution = tfd.Independent(
tfd.Normal(loc=target_mean, scale=online_scale))
# Compute the decomposed policy losses.
loss_policy_mean = mpo.compute_cross_entropy_loss(
actions, normalized_weights_sum, fixed_stddev_distribution)
loss_policy_stddev = mpo.compute_cross_entropy_loss(
actions, normalized_weights_sum, fixed_mean_distribution)
# Compute the decomposed KL between the target and online policies.
if self._per_dim_constraining:
kl_mean = target_action_distribution.distribution.kl_divergence(
fixed_stddev_distribution.distribution) # Shape [B, D].
kl_stddev = target_action_distribution.distribution.kl_divergence(
fixed_mean_distribution.distribution) # Shape [B, D].
else:
kl_mean = target_action_distribution.kl_divergence(
fixed_stddev_distribution) # Shape [B].
kl_stddev = target_action_distribution.kl_divergence(
fixed_mean_distribution) # Shape [B].
# Compute the alpha-weighted KL-penalty and dual losses to adapt the alphas.
loss_kl_mean, loss_alpha_mean = mpo.compute_parametric_kl_penalty_and_dual_loss(
kl_mean, alpha_mean, self._epsilon_mean)
loss_kl_stddev, loss_alpha_stddev = mpo.compute_parametric_kl_penalty_and_dual_loss(
kl_stddev, alpha_stddev, self._epsilon_stddev)
# Combine losses.
loss_policy = loss_policy_mean + loss_policy_stddev
loss_kl_penalty = loss_kl_mean + loss_kl_stddev
loss_dual = loss_alpha_mean + loss_alpha_stddev + loss_temperature_mean
loss = loss_policy + loss_kl_penalty + loss_dual
stats = {}
# Dual Variables.
stats["dual_alpha_mean"] = tf.reduce_mean(alpha_mean)
stats["dual_alpha_stddev"] = tf.reduce_mean(alpha_stddev)
# Losses.
stats["loss_policy"] = tf.reduce_mean(loss)
stats["loss_alpha"] = tf.reduce_mean(loss_alpha_mean + loss_alpha_stddev)
# KL measurements.
stats["kl_mean_rel"] = tf.reduce_mean(kl_mean, axis=0) / self._epsilon_mean
stats["kl_stddev_rel"] = tf.reduce_mean(
kl_stddev, axis=0) / self._epsilon_stddev
# If the policy has standard deviation, log summary stats for this as well.
pi_stddev = online_action_distribution.distribution.stddev()
stats["pi_stddev_min"] = tf.reduce_mean(tf.reduce_min(pi_stddev, axis=-1))
stats["pi_stddev_max"] = tf.reduce_mean(tf.reduce_max(pi_stddev, axis=-1))
# Condition number of the diagonal covariance (actually, stddev) matrix.
stats["pi_stddev_cond"] = tf.reduce_mean(
tf.reduce_max(pi_stddev, axis=-1) / tf.reduce_min(pi_stddev, axis=-1))
# Log per-objective values.
for i, name in enumerate(self._objective_names):
stats["{}_dual_temperature".format(name)] = temperature[i]
stats["{}_loss_temperature".format(name)] = loss_temperature[i]
stats["{}_kl_q_rel".format(name)] = tf.reduce_mean(
kl_nonparametric[:, i]) / self._epsilons[i]
# Q measurements.
stats["{}_q_min".format(name)] = tf.reduce_mean(tf.reduce_min(
q_values, axis=0)[:, i])
stats["{}_q_mean".format(name)] = tf.reduce_mean(tf.reduce_mean(
q_values, axis=0)[:, i])
stats["{}_q_max".format(name)] = tf.reduce_mean(tf.reduce_max(
q_values, axis=0)[:, i])
return loss, stats
def compute_weights_and_temperature_loss(
q_values: tf.Tensor,
epsilons: tf.Tensor,
temperature: tf.Variable,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes normalized importance weights for the policy optimization.
Args:
q_values: Q-values associated with the actions sampled from the target
policy; expected shape [N, B, K].
epsilons: Desired per-objective constraints on the KL between the target
and non-parametric policies; expected shape [K].
temperature: Per-objective scalar used to temper the Q-values before
computing normalized importance weights from them; expected shape [K].
This is really the Lagrange dual variable in the constrained optimization
problem, the solution of which is the non-parametric policy targeted by
the policy loss.
Returns:
Normalized importance weights, used for policy optimization; shape [N,B,K].
Temperature loss, used to adapt the temperature; shape [1, K].
"""
# Temper the given Q-values using the current temperature.
tempered_q_values = tf.stop_gradient(q_values) / temperature[None, None, :]
# Compute the normalized importance weights used to compute expectations with
# respect to the non-parametric policy.
normalized_weights = tf.nn.softmax(tempered_q_values, axis=0)
normalized_weights = tf.stop_gradient(normalized_weights)
# Compute the temperature loss (dual of the E-step optimization problem).
q_logsumexp = tf.reduce_logsumexp(tempered_q_values, axis=0)
log_num_actions = tf.math.log(tf.cast(q_values.shape[0], tf.float32))
loss_temperature = (
epsilons + tf.reduce_mean(q_logsumexp, axis=0) - log_num_actions)
loss_temperature = temperature * loss_temperature
return normalized_weights, loss_temperature
|
acme-master
|
acme/tf/losses/mompo.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions for R2D2."""
from typing import Iterable, NamedTuple, Sequence
import tensorflow as tf
import trfl
class LossCoreExtra(NamedTuple):
targets: tf.Tensor
errors: tf.Tensor
def transformed_n_step_loss(
qs: tf.Tensor,
targnet_qs: tf.Tensor,
actions: tf.Tensor,
rewards: tf.Tensor,
pcontinues: tf.Tensor,
target_policy_probs: tf.Tensor,
bootstrap_n: int,
stop_targnet_gradients: bool = True,
name: str = 'transformed_n_step_loss',
) -> trfl.base_ops.LossOutput:
"""Helper function for computing transformed loss on sequences.
Args:
qs: 3-D tensor corresponding to the Q-values to be learned. Shape is [T+1,
B, A].
targnet_qs: Like `qs`, but in the target network setting, these values
should be computed by the target network. Shape is [T+1, B, A].
actions: 2-D tensor holding the indices of actions executed during the
transition that corresponds to each major index. Shape is [T+1, B].
rewards: 2-D tensor holding rewards received during the transition that
corresponds to each major index. Shape is [T, B].
pcontinues: 2-D tensor holding pcontinue values received during the
transition that corresponds to each major index. Shape is [T, B].
target_policy_probs: 3-D tensor holding per-action policy probabilities for
the states encountered just before taking the transitions that correspond
to each major index, according to the target policy (i.e. the policy we
wish to learn). For standard Q-learning the probabilities should form a
one-hot vector over actions where the nonzero index corresponds to the max
Q. Shape is [T+1, B, A].
bootstrap_n: Transition length for N-step bootstrapping.
stop_targnet_gradients: `bool` indicating whether to apply tf.stop_gradients
to the target values. This should usually be True.
name: name to prefix ops created by this function.
Returns:
a tuple of:
* `loss`: the transformed Q-learning loss summed over `T`.
* `LossCoreExtra`: namedtuple containing the fields `targets` and `errors`.
"""
with tf.name_scope(name):
# Require correct tensor ranks---as long as we have shape information
# available to check. If there isn't any, we print a warning.
def check_rank(tensors: Iterable[tf.Tensor], ranks: Sequence[int]):
for i, (tensor, rank) in enumerate(zip(tensors, ranks)):
if tensor.get_shape():
trfl.assert_rank_and_shape_compatibility([tensor], rank)
else:
raise ValueError(
f'Tensor "{tensor.name}", which was offered as transformed_n_step_loss'
f'parameter {i+1}, has no rank at construction time, so cannot verify'
f'that it has the necessary rank of {rank}')
check_rank(
[qs, targnet_qs, actions, rewards, pcontinues, target_policy_probs],
[3, 3, 2, 2, 2, 3])
# Construct arguments to compute bootstrap target.
a_tm1 = actions[:-1] # (0:T) x B
r_t, pcont_t = rewards, pcontinues # (1:T+1) x B
q_tm1 = qs[:-1] # (0:T) x B x A
target_policy_t = target_policy_probs[1:] # (1:T+1) x B x A
targnet_q_t = targnet_qs[1:] # (1:T+1) x B x A
bootstrap_value = tf.reduce_sum(
target_policy_t * _signed_parabolic_tx(targnet_q_t), -1)
target = _compute_n_step_sequence_targets(
r_t=r_t,
pcont_t=pcont_t,
bootstrap_value=bootstrap_value,
n=bootstrap_n)
if stop_targnet_gradients:
target = tf.stop_gradient(target)
# tx/inv_tx may result in numerical instabilities so mask any NaNs.
finite_mask = tf.math.is_finite(target)
target = tf.where(finite_mask, target, tf.zeros_like(target))
qa_tm1 = trfl.batched_index(q_tm1, a_tm1)
errors = qa_tm1 - _signed_hyperbolic_tx(target)
# Only compute n-step errors w.r.t. finite targets.
errors = tf.where(finite_mask, errors, tf.zeros_like(errors))
# Sum over time dimension.
loss = 0.5 * tf.reduce_sum(tf.square(errors), axis=0)
return trfl.base_ops.LossOutput(
loss, LossCoreExtra(targets=target, errors=errors))
def _compute_n_step_sequence_targets(
r_t: tf.Tensor,
pcont_t: tf.Tensor,
bootstrap_value: tf.Tensor,
n: int,
) -> tf.Tensor:
"""Computes n-step bootstrapped returns over a sequence.
Args:
r_t: 2-D tensor of shape [T, B] corresponding to rewards.
pcont_t: 2-D tensor of shape [T, B] corresponding to pcontinues.
bootstrap_value: 2-D tensor of shape [T, B] corresponding to bootstrap
values.
n: number of steps over which to accumulate reward before bootstrapping.
Returns:
2-D tensor of shape [T, B] corresponding to bootstrapped returns.
"""
time_size, batch_size = r_t.shape.as_list()
# Pad r_t and pcont_t so we can use static slice shapes in scan.
r_t = tf.concat([r_t, tf.zeros((n - 1, batch_size))], 0)
pcont_t = tf.concat([pcont_t, tf.ones((n - 1, batch_size))], 0)
# We need to use tf.slice with static shapes for TPU compatibility.
def _slice(tensor, index, size):
return tf.slice(tensor, [index, 0], [size, batch_size])
# Construct correct bootstrap targets for each time slice t, which are exactly
# the target values at timestep min(t+n-1, time_size-1).
last_bootstrap_value = _slice(bootstrap_value, time_size - 1, 1)
if time_size > n - 1:
full_bootstrap_steps = [_slice(bootstrap_value, n - 1, time_size - (n - 1))]
truncated_bootstrap_steps = [last_bootstrap_value] * (n - 1)
else:
# Only truncated steps, since n > time_size.
full_bootstrap_steps = []
truncated_bootstrap_steps = [last_bootstrap_value] * time_size
bootstrap_value = tf.concat(full_bootstrap_steps + truncated_bootstrap_steps,
0)
# Iterate backwards for n steps to construct n-step return targets.
targets = bootstrap_value
for i in range(n - 1, -1, -1):
this_pcont_t = _slice(pcont_t, i, time_size)
this_r_t = _slice(r_t, i, time_size)
targets = this_r_t + this_pcont_t * targets
return targets
def _signed_hyperbolic_tx(x: tf.Tensor, eps: float = 1e-3) -> tf.Tensor:
"""Signed hyperbolic transform, inverse of signed_parabolic."""
return tf.sign(x) * (tf.sqrt(abs(x) + 1) - 1) + eps * x
def _signed_parabolic_tx(x: tf.Tensor, eps: float = 1e-3) -> tf.Tensor:
"""Signed parabolic transform, inverse of signed_hyperbolic."""
z = tf.sqrt(1 + 4 * eps * (eps + 1 + abs(x))) / 2 / eps - 1 / 2 / eps
return tf.sign(x) * (tf.square(z) - 1)
|
acme-master
|
acme/tf/losses/r2d2.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commonly-used networks for running on Atari."""
from typing import Optional, Tuple
from acme.tf.networks import base
from acme.tf.networks import duelling
from acme.tf.networks import embedding
from acme.tf.networks import policy_value
from acme.tf.networks import recurrence
from acme.tf.networks import vision
from acme.wrappers import observation_action_reward
import sonnet as snt
import tensorflow as tf
Images = tf.Tensor
QValues = tf.Tensor
Logits = tf.Tensor
Value = tf.Tensor
class AtariTorso(base.Module):
"""Simple convolutional stack commonly used for Atari."""
def __init__(self):
super().__init__(name='atari_torso')
self._network = snt.Sequential([
snt.Conv2D(32, [8, 8], [4, 4]),
tf.nn.relu,
snt.Conv2D(64, [4, 4], [2, 2]),
tf.nn.relu,
snt.Conv2D(64, [3, 3], [1, 1]),
tf.nn.relu,
snt.Flatten(),
])
def __call__(self, inputs: Images) -> tf.Tensor:
return self._network(inputs)
class DQNAtariNetwork(base.Module):
"""A feed-forward network for use with Ape-X DQN.
See https://arxiv.org/pdf/1803.00933.pdf for more information.
"""
def __init__(self, num_actions: int):
super().__init__(name='dqn_atari_network')
self._network = snt.Sequential([
AtariTorso(),
duelling.DuellingMLP(num_actions, hidden_sizes=[512]),
])
def __call__(self, inputs: Images) -> QValues:
return self._network(inputs)
class R2D2AtariNetwork(base.RNNCore):
"""A recurrent network for use with R2D2.
See https://openreview.net/forum?id=r1lyTjAqYX for more information.
"""
def __init__(self, num_actions: int, core: Optional[base.RNNCore] = None):
super().__init__(name='r2d2_atari_network')
self._embed = embedding.OAREmbedding(
torso=AtariTorso(), num_actions=num_actions)
self._core = core if core is not None else recurrence.LSTM(512)
self._head = duelling.DuellingMLP(num_actions, hidden_sizes=[512])
def __call__(
self,
inputs: observation_action_reward.OAR,
state: base.State,
) -> Tuple[QValues, base.State]:
embeddings = self._embed(inputs)
embeddings, new_state = self._core(embeddings, state)
action_values = self._head(embeddings) # [B, A]
return action_values, new_state
# TODO(b/171287329): Figure out why return type annotation causes error.
def initial_state(self, batch_size: int, **unused_kwargs) -> base.State: # pytype: disable=invalid-annotation
return self._core.initial_state(batch_size)
def unroll(
self,
inputs: observation_action_reward.OAR,
state: base.State,
sequence_length: int,
) -> Tuple[QValues, base.State]:
"""Efficient unroll that applies embeddings, MLP, & convnet in one pass."""
embeddings = snt.BatchApply(self._embed)(inputs) # [T, B, D+A+1]
embeddings, new_state = self._core.unroll(embeddings, state,
sequence_length)
action_values = snt.BatchApply(self._head)(embeddings)
return action_values, new_state
class IMPALAAtariNetwork(snt.RNNCore):
"""A recurrent network for use with IMPALA.
See https://arxiv.org/pdf/1802.01561.pdf for more information.
"""
def __init__(self, num_actions: int):
super().__init__(name='impala_atari_network')
self._embed = embedding.OAREmbedding(
torso=AtariTorso(), num_actions=num_actions)
self._core = snt.LSTM(256)
self._head = snt.Sequential([
snt.Linear(256),
tf.nn.relu,
policy_value.PolicyValueHead(num_actions),
])
self._num_actions = num_actions
def __call__(
self, inputs: observation_action_reward.OAR,
state: snt.LSTMState) -> Tuple[Tuple[Logits, Value], snt.LSTMState]:
embeddings = self._embed(inputs)
embeddings, new_state = self._core(embeddings, state)
logits, value = self._head(embeddings) # [B, A]
return (logits, value), new_state
def initial_state(self, batch_size: int, **unused_kwargs) -> snt.LSTMState:
return self._core.initial_state(batch_size)
class DeepIMPALAAtariNetwork(base.RNNCore):
"""A recurrent network for use with IMPALA.
See https://arxiv.org/pdf/1802.01561.pdf for more information.
"""
def __init__(self, num_actions: int):
super().__init__(name='deep_impala_atari_network')
self._embed = embedding.OAREmbedding(
torso=vision.ResNetTorso(), num_actions=num_actions)
self._core = snt.LSTM(256)
self._head = snt.Sequential([
snt.Linear(256),
tf.nn.relu,
policy_value.PolicyValueHead(num_actions),
])
self._num_actions = num_actions
def __call__(
self, inputs: observation_action_reward.OAR,
state: snt.LSTMState) -> Tuple[Tuple[Logits, Value], snt.LSTMState]:
embeddings = self._embed(inputs)
embeddings, new_state = self._core(embeddings, state)
logits, value = self._head(embeddings) # [B, A]
return (logits, value), new_state
def initial_state(self, batch_size: int, **unused_kwargs) -> snt.LSTMState:
return self._core.initial_state(batch_size)
def unroll(
self,
inputs: observation_action_reward.OAR,
states: snt.LSTMState,
sequence_length: int,
) -> Tuple[Tuple[Logits, Value], snt.LSTMState]:
"""Efficient unroll that applies embeddings, MLP, & convnet in one pass."""
embeddings = snt.BatchApply(self._embed)(inputs) # [T, B, D+A+1]
embeddings, new_states = snt.static_unroll(self._core, embeddings, states,
sequence_length)
logits, values = snt.BatchApply(self._head)(embeddings)
return (logits, values), new_states
|
acme-master
|
acme/tf/networks/atari.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.tf.networks.distributions."""
from acme.tf.networks import distributions
import numpy as np
from numpy import testing as npt
from absl.testing import absltest
from absl.testing import parameterized
class DiscreteValuedDistributionTest(parameterized.TestCase):
@parameterized.parameters(
((), (), 5),
((2,), (), 5),
((), (3, 4), 5),
((2,), (3, 4), 5),
((2, 6), (3, 4), 5),
)
def test_constructor(self, batch_shape, event_shape, num_values):
logits_shape = batch_shape + event_shape + (num_values,)
logits_size = np.prod(logits_shape)
logits = np.arange(logits_size, dtype=float).reshape(logits_shape)
values = np.linspace(start=-np.ones(event_shape, dtype=float),
stop=np.ones(event_shape, dtype=float),
num=num_values,
axis=-1)
distribution = distributions.DiscreteValuedDistribution(values=values,
logits=logits)
# Check batch and event shapes.
self.assertEqual(distribution.batch_shape, batch_shape)
self.assertEqual(distribution.event_shape, event_shape)
self.assertEqual(distribution.logits_parameter().shape.as_list(),
list(logits.shape))
self.assertEqual(distribution.logits_parameter().shape.as_list()[-1],
logits.shape[-1])
# Test slicing
if len(batch_shape) == 1:
slice_0_logits = distribution[1:3].logits_parameter().numpy()
expected_slice_0_logits = distribution.logits_parameter().numpy()[1:3]
npt.assert_allclose(slice_0_logits, expected_slice_0_logits)
elif len(batch_shape) == 2:
slice_logits = distribution[0, 1:3].logits_parameter().numpy()
expected_slice_logits = distribution.logits_parameter().numpy()[0, 1:3]
npt.assert_allclose(slice_logits, expected_slice_logits)
else:
assert not batch_shape
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/tf/networks/distributions_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rescaling layers (e.g. to match action specs)."""
from typing import Union
from acme import specs
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
class ClipToSpec(snt.Module):
"""Sonnet module clipping inputs to within a BoundedArraySpec."""
def __init__(self, spec: specs.BoundedArray, name: str = 'clip_to_spec'):
super().__init__(name=name)
self._min = spec.minimum
self._max = spec.maximum
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
return tf.clip_by_value(inputs, self._min, self._max)
class RescaleToSpec(snt.Module):
"""Sonnet module rescaling inputs in [-1, 1] to match a BoundedArraySpec."""
def __init__(self, spec: specs.BoundedArray, name: str = 'rescale_to_spec'):
super().__init__(name=name)
self._scale = spec.maximum - spec.minimum
self._offset = spec.minimum
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
inputs = 0.5 * (inputs + 1.0) # [0, 1]
output = inputs * self._scale + self._offset # [minimum, maximum]
return output
class TanhToSpec(snt.Module):
"""Sonnet module squashing real-valued inputs to match a BoundedArraySpec."""
def __init__(self, spec: specs.BoundedArray, name: str = 'tanh_to_spec'):
super().__init__(name=name)
self._scale = spec.maximum - spec.minimum
self._offset = spec.minimum
def __call__(
self, inputs: Union[tf.Tensor, tfd.Distribution]
) -> Union[tf.Tensor, tfd.Distribution]:
if isinstance(inputs, tfd.Distribution):
inputs = tfb.Tanh()(inputs)
inputs = tfb.ScaleMatvecDiag(0.5 * self._scale)(inputs)
output = tfb.Shift(self._offset + 0.5 * self._scale)(inputs)
else:
inputs = tf.tanh(inputs) # [-1, 1]
inputs = 0.5 * (inputs + 1.0) # [0, 1]
output = inputs * self._scale + self._offset # [minimum, maximum]
return output
|
acme-master
|
acme/tf/networks/rescaling.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implicit quantile network, as described in [0].
[0] https://arxiv.org/abs/1806.06923
"""
import numpy as np
import sonnet as snt
import tensorflow as tf
class IQNNetwork(snt.Module):
"""A feedforward network for use with IQN.
IQN extends the Q-network of regular DQN which consists of torso and head
networks. IQN embeds sampled quantile thresholds into the output space of the
torso network and merges them with the torso output.
Outputs a tuple consisting of (mean) Q-values, Q-value quantiles, and sampled
quantile thresholds.
"""
def __init__(self,
torso: snt.Module,
head: snt.Module,
latent_dim: int,
num_quantile_samples: int,
name: str = 'iqn_network'):
"""Initializes the network.
Args:
torso: Network producing an intermediate representation, typically a
convolutional network.
head: Network producing Q-value quantiles, typically an MLP.
latent_dim: Dimension of latent variables.
num_quantile_samples: Number of quantile thresholds to sample.
name: Module name.
"""
super().__init__(name)
self._torso = torso
self._head = head
self._latent_dim = latent_dim
self._num_quantile_samples = num_quantile_samples
@snt.once
def _create_embedding(self, size):
self._embedding = snt.Linear(size)
def __call__(self, observations):
# Transform observations to intermediate representations (typically a
# convolutional network).
torso_output = self._torso(observations)
# Now that dimension of intermediate representation is known initialize
# embedding of sample quantile thresholds (only done once).
self._create_embedding(torso_output.shape[-1])
# Sample quantile thresholds.
batch_size = tf.shape(observations)[0]
tau_shape = tf.stack([batch_size, self._num_quantile_samples])
tau = tf.random.uniform(tau_shape)
indices = tf.range(1, self._latent_dim+1, dtype=tf.float32)
# Embed sampled quantile thresholds in intermediate representation space.
tau_tiled = tf.tile(tau[:, :, None], (1, 1, self._latent_dim))
indices_tiled = tf.tile(indices[None, None, :],
tf.concat([tau_shape, [1]], 0))
tau_embedding = tf.cos(tau_tiled * indices_tiled * np.pi)
tau_embedding = snt.BatchApply(self._embedding)(tau_embedding)
tau_embedding = tf.nn.relu(tau_embedding)
# Merge intermediate representations with embeddings, and apply head
# network (typically an MLP).
torso_output = tf.tile(torso_output[:, None, :],
(1, self._num_quantile_samples, 1))
q_value_quantiles = snt.BatchApply(self._head)(tau_embedding * torso_output)
q_dist = tf.transpose(q_value_quantiles, (0, 2, 1))
q_values = tf.reduce_mean(q_value_quantiles, axis=1)
q_values = tf.stop_gradient(q_values)
return q_values, q_dist, tau
|
acme-master
|
acme/tf/networks/quantile.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visual networks for processing pixel inputs."""
from typing import Callable, Optional, Sequence, Union
import sonnet as snt
import tensorflow as tf
class ResNetTorso(snt.Module):
"""ResNet architecture used in IMPALA paper."""
def __init__(
self,
num_channels: Sequence[int] = (16, 32, 32), # default to IMPALA resnet.
num_blocks: Sequence[int] = (2, 2, 2), # default to IMPALA resnet.
num_output_hidden: Sequence[int] = (256,), # default to IMPALA resnet.
conv_shape: Union[int, Sequence[int]] = 3,
conv_stride: Union[int, Sequence[int]] = 1,
pool_size: Union[int, Sequence[int]] = 3,
pool_stride: Union[int, Sequence[int], Sequence[Sequence[int]]] = 2,
data_format: str = 'NHWC',
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.relu,
output_dtype: tf.DType = tf.float32,
name: str = 'resnet_torso'):
"""Builds an IMPALA-style ResNet.
The arguments' default values construct the IMPALA resnet.
Args:
num_channels: The number of convolutional channels for each layer.
num_blocks: The number of resnet blocks in each "layer".
num_output_hidden: The output size(s) of the MLP layer(s) on top.
conv_shape: The convolution filter size (int), or size dimensions (H, W).
conv_stride: the convolution stride (int), or strides (row, column).
pool_size: The pooling footprint size (int), or size dimensions (H, W).
pool_stride: The pooling stride (int) or strides (row, column), or
strides for each of the N layers ((r1, c1), (r2, c2), ..., (rN, cN)).
data_format: The axis order of the input.
activation: The activation function.
output_dtype: the output dtype.
name: The Sonnet module name.
"""
super().__init__(name=name)
self._output_dtype = output_dtype
self._num_layers = len(num_blocks)
if isinstance(pool_stride, int):
pool_stride = (pool_stride, pool_stride)
if isinstance(pool_stride[0], int):
pool_stride = self._num_layers * (pool_stride,)
# Create sequence of residual blocks.
blocks = []
for i in range(self._num_layers):
blocks.append(
ResidualBlockGroup(
num_blocks[i],
num_channels[i],
conv_shape,
conv_stride,
pool_size,
pool_stride[i],
data_format=data_format,
activation=activation))
# Create output layer.
out_layer = snt.nets.MLP(num_output_hidden, activation=activation)
# Compose blocks and final layer.
self._resnet = snt.Sequential(
blocks + [activation, snt.Flatten(), out_layer])
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
"""Evaluates the ResidualPixelCore."""
# Convert to floats.
preprocessed_inputs = _preprocess_inputs(inputs, self._output_dtype)
torso_output = self._resnet(preprocessed_inputs)
return torso_output
class ResidualBlockGroup(snt.Module):
"""Higher level block for ResNet implementation."""
def __init__(self,
num_blocks: int,
num_output_channels: int,
conv_shape: Union[int, Sequence[int]],
conv_stride: Union[int, Sequence[int]],
pool_shape: Union[int, Sequence[int]],
pool_stride: Union[int, Sequence[int]],
data_format: str = 'NHWC',
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.relu,
name: Optional[str] = None):
super().__init__(name=name)
self._num_blocks = num_blocks
self._data_format = data_format
self._activation = activation
# The pooling operation expects a 2-rank shape/stride (height and width).
if isinstance(pool_shape, int):
pool_shape = 2 * [pool_shape]
if isinstance(pool_stride, int):
pool_stride = 2 * [pool_stride]
# Create a Conv2D factory since we'll be making quite a few.
def build_conv_layer(name: str):
return snt.Conv2D(
num_output_channels,
conv_shape,
stride=conv_stride,
padding='SAME',
data_format=data_format,
name=name)
# Create a pooling layer.
def pooling_layer(inputs: tf.Tensor) -> tf.Tensor:
return tf.nn.pool(
inputs,
pool_shape,
pooling_type='MAX',
strides=pool_stride,
padding='SAME',
data_format=data_format)
# Create an initial conv layer and pooling to scale the image down.
self._downscale = snt.Sequential(
[build_conv_layer('downscale'), pooling_layer])
# Residual block(s).
self._convs = []
for i in range(self._num_blocks):
name = 'residual_block_%d' % i
self._convs.append(
[build_conv_layer(name + '_0'),
build_conv_layer(name + '_1')])
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
# Downscale the inputs.
conv_out = self._downscale(inputs)
# Apply (sequence of) residual block(s).
for i in range(self._num_blocks):
block_input = conv_out
conv_out = self._activation(conv_out)
conv_out = self._convs[i][0](conv_out)
conv_out = self._activation(conv_out)
conv_out = self._convs[i][1](conv_out)
conv_out += block_input
return conv_out
def _preprocess_inputs(inputs: tf.Tensor, output_dtype: tf.DType) -> tf.Tensor:
"""Returns the `Tensor` corresponding to the preprocessed inputs."""
rank = inputs.shape.rank
if rank < 4:
raise ValueError(
'Input Tensor must have at least 4 dimensions (for '
'batch size, height, width, and channels), but it only has '
'{}'.format(rank))
flattened_inputs = snt.Flatten(preserve_dims=3)(inputs)
processed_inputs = tf.image.convert_image_dtype(
flattened_inputs, dtype=output_dtype)
return processed_inputs
class DrQTorso(snt.Module):
"""DrQ Torso inspired by the second DrQ paper [Yarats et al., 2021].
[Yarats et al., 2021] https://arxiv.org/abs/2107.09645
"""
def __init__(
self,
data_format: str = 'NHWC',
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.relu,
output_dtype: tf.DType = tf.float32,
name: str = 'resnet_torso'):
super().__init__(name=name)
self._output_dtype = output_dtype
# Create a Conv2D factory since we'll be making quite a few.
gain = 2**0.5 if activation == tf.nn.relu else 1.
def build_conv_layer(name: str,
output_channels: int = 32,
kernel_shape: Sequence[int] = (3, 3),
stride: int = 1):
return snt.Conv2D(
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride,
padding='SAME',
data_format=data_format,
w_init=snt.initializers.Orthogonal(gain=gain, seed=None),
b_init=snt.initializers.Zeros(),
name=name)
self._network = snt.Sequential(
[build_conv_layer('conv_0', stride=2),
activation,
build_conv_layer('conv_1', stride=1),
activation,
build_conv_layer('conv_2', stride=1),
activation,
build_conv_layer('conv_3', stride=1),
activation,
snt.Flatten()])
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
"""Evaluates the ResidualPixelCore."""
# Normalize to -0.5 to 0.5
preprocessed_inputs = _preprocess_inputs(inputs, self._output_dtype) - 0.5
torso_output = self._network(preprocessed_inputs)
return torso_output
|
acme-master
|
acme/tf/networks/vision.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules for computing custom embeddings."""
from acme.tf.networks import base
from acme.wrappers import observation_action_reward
import sonnet as snt
import tensorflow as tf
class OAREmbedding(snt.Module):
"""Module for embedding (observation, action, reward) inputs together."""
def __init__(self, torso: base.Module, num_actions: int):
super().__init__(name='oar_embedding')
self._num_actions = num_actions
self._torso = torso
def __call__(self, inputs: observation_action_reward.OAR) -> tf.Tensor:
"""Embed each of the (observation, action, reward) inputs & concatenate."""
# Add dummy trailing dimension to rewards if necessary.
if len(inputs.reward.shape.dims) == 1:
inputs = inputs._replace(reward=tf.expand_dims(inputs.reward, axis=-1))
features = self._torso(inputs.observation) # [T?, B, D]
action = tf.one_hot(inputs.action, depth=self._num_actions) # [T?, B, A]
reward = tf.nn.tanh(inputs.reward) # [T?, B, 1]
embedding = tf.concat([features, action, reward], axis=-1) # [T?, B, D+A+1]
return embedding
|
acme-master
|
acme/tf/networks/embedding.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A duelling network architecture, as described in [0].
[0] https://arxiv.org/abs/1511.06581
"""
from typing import Sequence
import sonnet as snt
import tensorflow as tf
class DuellingMLP(snt.Module):
"""A Duelling MLP Q-network."""
def __init__(
self,
num_actions: int,
hidden_sizes: Sequence[int],
):
super().__init__(name='duelling_q_network')
self._value_mlp = snt.nets.MLP([*hidden_sizes, 1])
self._advantage_mlp = snt.nets.MLP([*hidden_sizes, num_actions])
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
"""Forward pass of the duelling network.
Args:
inputs: 2-D tensor of shape [batch_size, embedding_size].
Returns:
q_values: 2-D tensor of action values of shape [batch_size, num_actions]
"""
# Compute value & advantage for duelling.
value = self._value_mlp(inputs) # [B, 1]
advantages = self._advantage_mlp(inputs) # [B, A]
# Advantages have zero mean.
advantages -= tf.reduce_mean(advantages, axis=-1, keepdims=True) # [B, A]
q_values = value + advantages # [B, A]
return q_values
|
acme-master
|
acme/tf/networks/duelling.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Useful sonnet modules to chain after distributional module outputs."""
from acme import types
from acme.tf import utils as tf2_utils
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
import tree
tfd = tfp.distributions
class StochasticModeHead(snt.Module):
"""Simple sonnet module to produce the mode of a tfp.Distribution."""
def __call__(self, distribution: tfd.Distribution):
return distribution.mode()
class StochasticMeanHead(snt.Module):
"""Simple sonnet module to produce the mean of a tfp.Distribution."""
def __call__(self, distribution: tfd.Distribution):
return distribution.mean()
class StochasticSamplingHead(snt.Module):
"""Simple sonnet module to sample from a tfp.Distribution."""
def __call__(self, distribution: tfd.Distribution):
return distribution.sample()
class ExpQWeightedPolicy(snt.Module):
"""Exponentially Q-weighted policy.
Given a stochastic policy and a critic, returns a (stochastic) policy which
samples multiple actions from the underlying policy, computes the Q-values for
each action, and chooses the final action among the sampled ones with
probability proportional to the exponentiated Q values, tempered by
a parameter beta.
"""
def __init__(self,
actor_network: snt.Module,
critic_network: snt.Module,
beta: float = 1.0,
num_action_samples: int = 16):
super().__init__(name='ExpQWeightedPolicy')
self._actor_network = actor_network
self._critic_network = critic_network
self._num_action_samples = num_action_samples
self._beta = beta
def __call__(self, inputs: types.NestedTensor) -> tf.Tensor:
# Inputs are of size [B, ...]. Here we tile them to be of shape [N, B, ...].
tiled_inputs = tf2_utils.tile_nested(inputs, self._num_action_samples)
shape = tf.shape(tree.flatten(tiled_inputs)[0])
n, b = shape[0], shape[1]
tf.debugging.assert_equal(n, self._num_action_samples,
'Internal Error. Unexpected tiled_inputs shape.')
dummy_zeros_n_b = tf.zeros((n, b))
# Reshape to [N * B, ...].
merge = lambda x: snt.merge_leading_dims(x, 2)
tiled_inputs = tree.map_structure(merge, tiled_inputs)
tiled_actions = self._actor_network(tiled_inputs)
# Compute Q-values and the resulting tempered probabilities.
q = self._critic_network(tiled_inputs, tiled_actions)
boltzmann_logits = q / self._beta
boltzmann_logits = snt.split_leading_dim(boltzmann_logits, dummy_zeros_n_b,
2)
# [B, N]
boltzmann_logits = tf.transpose(boltzmann_logits, perm=(1, 0))
# Resample one action per batch according to the Boltzmann distribution.
action_idx = tfp.distributions.Categorical(logits=boltzmann_logits).sample()
# [B, 2], where the first column is 0, 1, 2,... corresponding to indices to
# the batch dimension.
action_idx = tf.stack((tf.range(b), action_idx), axis=1)
tiled_actions = snt.split_leading_dim(tiled_actions, dummy_zeros_n_b, 2)
action_dim = len(tiled_actions.get_shape().as_list())
tiled_actions = tf.transpose(tiled_actions,
perm=[1, 0] + list(range(2, action_dim)))
# [B, ...]
action_sample = tf.gather_nd(tiled_actions, action_idx)
return action_sample
|
acme-master
|
acme/tf/networks/stochastic.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks useful for building recurrent agents.
"""
import functools
from typing import NamedTuple, Optional, Sequence, Tuple
from absl import logging
from acme import types
from acme.tf import savers
from acme.tf import utils
from acme.tf.networks import base
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
import tree
RNNState = types.NestedTensor
class PolicyCriticRNNState(NamedTuple):
"""Consists of two RNNStates called 'policy' and 'critic'."""
policy: RNNState
critic: RNNState
class UnpackWrapper(snt.Module):
"""Gets a list of arguments and pass them as separate arguments.
Example
```
class Critic(snt.Module):
def __call__(self, o, a):
pass
critic = Critic()
UnpackWrapper(critic)((o, a))
```
calls critic(o, a)
"""
def __init__(self, module: snt.Module, name: str = 'UnpackWrapper'):
super().__init__(name=name)
self._module = module
def __call__(self,
inputs: Sequence[types.NestedTensor]) -> types.NestedTensor:
# Unpack the inputs before passing to the underlying module.
return self._module(*inputs)
class RNNUnpackWrapper(snt.RNNCore):
"""Gets a list of arguments and pass them as separate arguments.
Example
```
class Critic(snt.RNNCore):
def __call__(self, o, a, prev_state):
pass
critic = Critic()
RNNUnpackWrapper(critic)((o, a), prev_state)
```
calls m(o, a, prev_state)
"""
def __init__(self, module: snt.RNNCore, name: str = 'RNNUnpackWrapper'):
super().__init__(name=name)
self._module = module
def __call__(self, inputs: Sequence[types.NestedTensor],
prev_state: RNNState) -> Tuple[types.NestedTensor, RNNState]:
# Unpack the inputs before passing to the underlying module.
return self._module(*inputs, prev_state)
def initial_state(self, batch_size):
return self._module.initial_state(batch_size)
class CriticDeepRNN(snt.DeepRNN):
"""Same as snt.DeepRNN, but takes three inputs (obs, act, prev_state).
"""
def __init__(self, layers: Sequence[snt.Module]):
# Make the first layer take a single input instead of a list of arguments.
if isinstance(layers[0], snt.RNNCore):
first_layer = RNNUnpackWrapper(layers[0])
else:
first_layer = UnpackWrapper(layers[0])
super().__init__([first_layer] + list(layers[1:]))
self._unwrapped_first_layer = layers[0]
self.__input_signature = None
def __call__(self, inputs: types.NestedTensor, action: tf.Tensor,
prev_state: RNNState) -> Tuple[types.NestedTensor, RNNState]:
# Pack the inputs into a tuple and then using inherited DeepRNN logic to
# pass them through the layers.
# This in turn will pass the packed inputs into the first layer
# (UnpackWrapper) which will unpack them back.
return super().__call__((inputs, action), prev_state)
@property
def _input_signature(self) -> Optional[tf.TensorSpec]:
"""Return input signature for Acme snapshotting.
The Acme way of snapshotting works as follows: you first create your network
variables via the utility function `acme.tf.utils.create_variables()`, which
adds an `_input_signature` attribute to your module. This attribute is
critical for proper snapshot saving and loading.
If a module with such an attribute is wrapped into e.g. DeepRNN, Acme
descends into the `_layers[0]` of that DeepRNN to find the input
signature.
This implementation allows CriticDeepRNN to work seamlessly like DeepRNN for
the following two use-cases:
1) Creating variables *before* wrapping:
```
unwrapped_critic = Critic()
acme.tf.utils.create_variables(unwrapped_critic, specs)
critic = CriticDeepRNN([unwrapped_critic])
```
2) Create variables *after* wrapping:
```
unwrapped_critic = Critic()
critic = CriticDeepRNN([unwrapped_critic])
acme.tf.utils.create_variables(critic, specs)
```
Returns:
input_signature of the module or None of it is not known (i.e. the
variables were not created by acme.tf.utils.create_variables nor for this
module nor for any of its descendants).
"""
if self.__input_signature is not None:
# To make case (2) (see above) work, we need to allow create_variables to
# assign an _input_signature attribute to this module, which is why we
# create additional __input_signature attribute with a setter (see below).
return self.__input_signature
# To make case (1) work, we descend into self._unwrapped_first_layer
# and try to get its input signature (if it exists) by calling
# savers.get_input_signature.
# Ideally, savers.get_input_signature should automatically descend into
# DeepRNN. But in this case it breaks on CriticDeepRNN because
# CriticDeepRNN._layers[0] is an UnpackWrapper around the underlying module
# and not the module itself.
input_signature = savers._get_input_signature(self._unwrapped_first_layer) # pylint: disable=protected-access
if input_signature is None:
return None
# Since adding recurrent modules via CriticDeepRNN changes the recurrent
# state, we need to update its spec here.
state = self.initial_state(1)
input_signature[-1] = tree.map_structure(
lambda t: tf.TensorSpec((None,) + t.shape[1:], t.dtype), state)
self.__input_signature = input_signature
return input_signature
@_input_signature.setter
def _input_signature(self, new_spec: tf.TensorSpec):
self.__input_signature = new_spec
class RecurrentExpQWeightedPolicy(snt.RNNCore):
"""Recurrent exponentially Q-weighted policy."""
def __init__(self,
policy_network: snt.Module,
critic_network: snt.Module,
temperature_beta: float = 1.0,
num_action_samples: int = 16):
super().__init__(name='RecurrentExpQWeightedPolicy')
self._policy_network = policy_network
self._critic_network = critic_network
self._num_action_samples = num_action_samples
self._temperature_beta = temperature_beta
def __call__(self,
observation: types.NestedTensor,
prev_state: PolicyCriticRNNState
) -> Tuple[types.NestedTensor, PolicyCriticRNNState]:
return tf.vectorized_map(self._call, (observation, prev_state))
def _call(
self, observation_and_state: Tuple[types.NestedTensor,
PolicyCriticRNNState]
) -> Tuple[types.NestedTensor, PolicyCriticRNNState]:
"""Computes a forward step for a single element.
The observation and state are packed together in order to use
`tf.vectorized_map` to handle batches of observations.
See this module's __call__() function.
Args:
observation_and_state: the observation and state packed in a tuple.
Returns:
The selected action and the corresponding state.
"""
observation, prev_state = observation_and_state
# Tile input observations and states to allow multiple policy predictions.
tiled_observation, tiled_prev_state = utils.tile_nested(
(observation, prev_state), self._num_action_samples)
actions, policy_states = self._policy_network(
tiled_observation, tiled_prev_state.policy)
# Evaluate multiple critic predictions with the sampled actions.
value_distribution, critic_states = self._critic_network(
tiled_observation, actions, tiled_prev_state.critic)
value_estimate = value_distribution.mean()
# Resample a single action of the sampled actions according to logits given
# by the tempered Q-values.
selected_action_idx = tfp.distributions.Categorical(
probs=tf.nn.softmax(value_estimate / self._temperature_beta)).sample()
selected_action = actions[selected_action_idx]
# Select and return the RNN state that corresponds to the selected action.
states = PolicyCriticRNNState(
policy=policy_states, critic=critic_states)
selected_state = tree.map_structure(
lambda x: x[selected_action_idx], states)
return selected_action, selected_state
def initial_state(self, batch_size: int) -> PolicyCriticRNNState:
return PolicyCriticRNNState(
policy=self._policy_network.initial_state(batch_size),
critic=self._critic_network.initial_state(batch_size)
)
class DeepRNN(snt.DeepRNN, base.RNNCore):
"""Unroll-aware deep RNN module.
Sonnet's DeepRNN steps through RNNCores sequentially which can result in a
performance hit, in particular when using Transformers. This module adds an
unroll() method which unrolls each module in the DeepRNN individually,
allowing efficient implementation of the unroll operation. For example, a
Transformer can 'unroll' by evaluating the whole sequence at once (this being
one of the advantages of Transformers over e.g. LSTMs).
Any RNNCore passed to this module should implement unroll(). Failure to so
may cause the RNNCore not to be called properly. For example, passing a
partial function application of a snt.RNNCore to this module will fail (this
is also true for snt.DeepRNN). However, the special case of passing in a
RNNCore object that does not implement unroll() is supported and will be
dynamically unrolled. Implement unroll() to override this behavior with
static unrolling.
Stateless modules (i.e. anything other than an RNNCore) which do not
implement unroll() are batch applied over the time and batch axes
simultaneously. Effectively, this means that such modules may be applied to
fairly large batches, potentially leading to out-of-memory issues.
"""
def __init__(self, layers, name: Optional[str] = None):
"""Initializes the module."""
super().__init__(layers, name=name)
self.__input_signature = None
self._num_unrollable = 0
# As a convenience, check for snt.RNNCore modules and dynamically unroll
# them if they don't already support unrolling. This check can fail, e.g.
# if a partially applied RNNCore is passed in. Sonnet's implementation of
# DeepRNN suffers from the same problem.
for layer in self._layers:
if hasattr(layer, 'unroll'):
self._num_unrollable += 1
elif isinstance(layer, snt.RNNCore):
self._num_unrollable += 1
layer.unroll = functools.partial(snt.dynamic_unroll, layer)
logging.warning(
'Acme DeepRNN detected a Sonnet RNNCore. '
'This will be dynamically unrolled. Please implement unroll() '
'to suppress this warning.')
def unroll(self,
inputs: types.NestedTensor,
state: base.State,
sequence_length: int,
) -> Tuple[types.NestedTensor, base.State]:
"""Unroll each layer individually.
Calls unroll() on layers which support it, all other layers are
batch-applied over the first two axes (assumed to be the time and batch
axes).
Args:
inputs: A nest of `tf.Tensor` in time-major format.
state: The RNN core state.
sequence_length: How long the static_unroll should go for.
Returns:
Nested sequence output of RNN, and final state.
Raises:
ValueError if the length of `state` does not match the number of
unrollable layers.
"""
if len(state) != self._num_unrollable:
raise ValueError(
'DeepRNN was called with the wrong number of states. The length of '
'`state` does not match the number of unrollable layers.')
states = iter(state)
outputs = inputs
next_states = []
for layer in self._layers:
if hasattr(layer, 'unroll'):
# The length of the `states` list was checked above.
outputs, next_state = layer.unroll(outputs, next(states),
sequence_length)
next_states.append(next_state)
else:
# Couldn't unroll(); assume that this is a stateless module.
outputs = snt.BatchApply(layer, num_dims=2)(outputs)
return outputs, tuple(next_states)
@property
def _input_signature(self) -> Optional[tf.TensorSpec]:
"""Return input signature for Acme snapshotting, see CriticDeepRNN."""
if self.__input_signature is not None:
return self.__input_signature
input_signature = savers._get_input_signature(self._layers[0]) # pylint: disable=protected-access
if input_signature is None:
return None
state = self.initial_state(1)
input_signature[-1] = tree.map_structure(
lambda t: tf.TensorSpec((None,) + t.shape[1:], t.dtype), state)
self.__input_signature = input_signature
return input_signature
@_input_signature.setter
def _input_signature(self, new_spec: tf.TensorSpec):
self.__input_signature = new_spec
class LSTM(snt.LSTM, base.RNNCore):
"""Unrollable interface to LSTM.
This module is supposed to be used with the DeepRNN class above, and more
generally in networks which support unroll().
"""
def unroll(self,
inputs: types.NestedTensor,
state: base.State,
sequence_length: int,
) -> Tuple[types.NestedTensor, base.State]:
return snt.static_unroll(self, inputs, state, sequence_length)
|
acme-master
|
acme/tf/networks/recurrence.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks used in discrete-action agents."""
import sonnet as snt
import tensorflow as tf
class DiscreteFilteredQNetwork(snt.Module):
"""Discrete filtered Q-network.
This produces filtered Q values according to the method used in the discrete
BCQ algorithm (https://arxiv.org/pdf/1910.01708.pdf - section 4).
"""
def __init__(self,
g_network: snt.Module,
q_network: snt.Module,
threshold: float):
super().__init__(name='discrete_filtered_qnet')
assert threshold >= 0 and threshold <= 1
self.g_network = g_network
self.q_network = q_network
self._threshold = threshold
def __call__(self, o_t: tf.Tensor) -> tf.Tensor:
q_t = self.q_network(o_t)
g_t = tf.nn.softmax(self.g_network(o_t))
normalized_g_t = g_t / tf.reduce_max(g_t, axis=-1, keepdims=True)
# Filter actions based on g_network outputs.
min_q = tf.reduce_min(q_t, axis=-1, keepdims=True)
return tf.where(normalized_g_t >= self._threshold, q_t, min_q)
|
acme-master
|
acme/tf/networks/discrete.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Useful network definitions."""
from acme.tf.networks.atari import AtariTorso
from acme.tf.networks.atari import DeepIMPALAAtariNetwork
from acme.tf.networks.atari import DQNAtariNetwork
from acme.tf.networks.atari import IMPALAAtariNetwork
from acme.tf.networks.atari import R2D2AtariNetwork
from acme.tf.networks.base import DistributionalModule
from acme.tf.networks.base import Module
from acme.tf.networks.base import RNNCore
from acme.tf.networks.continuous import LayerNormAndResidualMLP
from acme.tf.networks.continuous import LayerNormMLP
from acme.tf.networks.continuous import NearZeroInitializedLinear
from acme.tf.networks.discrete import DiscreteFilteredQNetwork
from acme.tf.networks.distributional import ApproximateMode
from acme.tf.networks.distributional import DiscreteValuedHead
from acme.tf.networks.distributional import MultivariateGaussianMixture
from acme.tf.networks.distributional import MultivariateNormalDiagHead
from acme.tf.networks.distributional import UnivariateGaussianMixture
from acme.tf.networks.distributions import DiscreteValuedDistribution
from acme.tf.networks.duelling import DuellingMLP
from acme.tf.networks.masked_epsilon_greedy import NetworkWithMaskedEpsilonGreedy
from acme.tf.networks.multihead import Multihead
from acme.tf.networks.multiplexers import CriticMultiplexer
from acme.tf.networks.noise import ClippedGaussian
from acme.tf.networks.policy_value import PolicyValueHead
from acme.tf.networks.recurrence import CriticDeepRNN
from acme.tf.networks.recurrence import DeepRNN
from acme.tf.networks.recurrence import LSTM
from acme.tf.networks.recurrence import RecurrentExpQWeightedPolicy
from acme.tf.networks.rescaling import ClipToSpec
from acme.tf.networks.rescaling import RescaleToSpec
from acme.tf.networks.rescaling import TanhToSpec
from acme.tf.networks.stochastic import ExpQWeightedPolicy
from acme.tf.networks.stochastic import StochasticMeanHead
from acme.tf.networks.stochastic import StochasticModeHead
from acme.tf.networks.stochastic import StochasticSamplingHead
from acme.tf.networks.vision import DrQTorso
from acme.tf.networks.vision import ResNetTorso
# For backwards compatibility.
GaussianMixtureHead = UnivariateGaussianMixture
try:
# pylint: disable=g-bad-import-order,g-import-not-at-top
from acme.tf.networks.legal_actions import MaskedSequential
from acme.tf.networks.legal_actions import EpsilonGreedy
except ImportError:
pass
# Internal imports.
from acme.tf.networks.quantile import IQNNetwork
|
acme-master
|
acme/tf/networks/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.