python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment wrapper which converts double-to-single precision."""
from acme import specs
from acme import types
from acme.wrappers import base
import dm_env
import numpy as np
import tree
class SinglePrecisionWrapper(base.EnvironmentWrapper):
"""Wrapper which converts environments from double- to single-precision."""
def _convert_timestep(self, timestep: dm_env.TimeStep) -> dm_env.TimeStep:
return timestep._replace(
reward=_convert_value(timestep.reward),
discount=_convert_value(timestep.discount),
observation=_convert_value(timestep.observation))
def step(self, action) -> dm_env.TimeStep:
return self._convert_timestep(self._environment.step(action))
def reset(self) -> dm_env.TimeStep:
return self._convert_timestep(self._environment.reset())
def action_spec(self):
return _convert_spec(self._environment.action_spec())
def discount_spec(self):
return _convert_spec(self._environment.discount_spec())
def observation_spec(self):
return _convert_spec(self._environment.observation_spec())
def reward_spec(self):
return _convert_spec(self._environment.reward_spec())
def _convert_spec(nested_spec: types.NestedSpec) -> types.NestedSpec:
"""Convert a nested spec."""
def _convert_single_spec(spec: specs.Array):
"""Convert a single spec."""
if spec.dtype == 'O':
# Pass StringArray objects through unmodified.
return spec
if np.issubdtype(spec.dtype, np.float64):
dtype = np.float32
elif np.issubdtype(spec.dtype, np.int64):
dtype = np.int32
else:
dtype = spec.dtype
return spec.replace(dtype=dtype)
return tree.map_structure(_convert_single_spec, nested_spec)
def _convert_value(nested_value: types.Nest) -> types.Nest:
"""Convert a nested value given a desired nested spec."""
def _convert_single_value(value):
if value is not None:
value = np.array(value, copy=False)
if np.issubdtype(value.dtype, np.float64):
value = np.array(value, copy=False, dtype=np.float32)
elif np.issubdtype(value.dtype, np.int64):
value = np.array(value, copy=False, dtype=np.int32)
return value
return tree.map_structure(_convert_single_value, nested_value)
|
acme-master
|
acme/wrappers/single_precision.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adders for sending data from actors to replay buffers."""
# pylint: disable=unused-import
from acme.adders.base import Adder
from acme.adders.wrappers import ForkingAdder
from acme.adders.wrappers import IgnoreExtrasAdder
|
acme-master
|
acme/adders/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of useful adder wrappers."""
from typing import Iterable
from acme import types
from acme.adders import base
import dm_env
class ForkingAdder(base.Adder):
"""An adder that forks data into several other adders."""
def __init__(self, adders: Iterable[base.Adder]):
self._adders = adders
def reset(self):
for adder in self._adders:
adder.reset()
def add_first(self, timestep: dm_env.TimeStep):
for adder in self._adders:
adder.add_first(timestep)
def add(self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
extras: types.NestedArray = ()):
for adder in self._adders:
adder.add(action, next_timestep, extras)
class IgnoreExtrasAdder(base.Adder):
"""An adder that ignores extras."""
def __init__(self, adder: base.Adder):
self._adder = adder
def reset(self):
self._adder.reset()
def add_first(self, timestep: dm_env.TimeStep):
self._adder.add_first(timestep)
def add(self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
extras: types.NestedArray = ()):
self._adder.add(action, next_timestep)
|
acme-master
|
acme/adders/wrappers.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for adders which transmit data to a replay buffer."""
import abc
from acme import types
import dm_env
class Adder(abc.ABC):
"""The Adder interface.
An adder packs together data to send to the replay buffer, and potentially
performs some reduction/transformation to this data in the process.
All adders will use this API. Below is an illustrative example of how they
are intended to be used in a typical RL run-loop. We assume that the
environment conforms to the dm_env environment API.
```python
# Reset the environment and add the first observation.
timestep = env.reset()
adder.add_first(timestep.observation)
while not timestep.last():
# Generate an action from the policy and step the environment.
action = my_policy(timestep)
timestep = env.step(action)
# Add the action and the resulting timestep.
adder.add(action, next_timestep=timestep)
```
Note that for all adders, the `add()` method expects an action taken and the
*resulting* timestep observed after taking this action. Note that this
timestep is named `next_timestep` precisely to emphasize this point.
"""
@abc.abstractmethod
def add_first(self, timestep: dm_env.TimeStep):
"""Defines the interface for an adder's `add_first` method.
We expect this to be called at the beginning of each episode and it will
start a trajectory to be added to replay with an initial observation.
Args:
timestep: a dm_env TimeStep corresponding to the first step.
"""
@abc.abstractmethod
def add(
self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
extras: types.NestedArray = (),
):
"""Defines the adder `add` interface.
Args:
action: A possibly nested structure corresponding to a_t.
next_timestep: A dm_env Timestep object corresponding to the resulting
data obtained by taking the given action.
extras: A possibly nested structure of extra data to add to replay.
"""
@abc.abstractmethod
def reset(self):
"""Resets the adder's buffer."""
|
acme-master
|
acme/adders/base.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing Reverb adders."""
from typing import Any, Callable, Optional, Sequence, Tuple, TypeVar, Union
from acme import specs
from acme import types
from acme.adders import base as adders_base
from acme.adders import reverb as adders
from acme.utils import tree_utils
import dm_env
import numpy as np
import reverb
import tensorflow as tf
import tree
from absl.testing import absltest
StepWithExtra = Tuple[Any, dm_env.TimeStep, Any]
StepWithoutExtra = Tuple[Any, dm_env.TimeStep]
Step = TypeVar('Step', StepWithExtra, StepWithoutExtra)
def make_trajectory(observations):
"""Make a simple trajectory from a sequence of observations.
Arguments:
observations: a sequence of observations.
Returns:
a tuple (first, steps) where first contains the initial dm_env.TimeStep
object and steps contains a list of (action, step) tuples. The length of
steps is given by episode_length.
"""
first = dm_env.restart(observations[0])
middle = [(0, dm_env.transition(reward=0.0, observation=observation))
for observation in observations[1:-1]]
last = (0, dm_env.termination(reward=0.0, observation=observations[-1]))
return first, middle + [last]
def make_sequence(observations):
"""Create a sequence of timesteps of the form `first, [second, ..., last]`."""
first, steps = make_trajectory(observations)
observation = first.observation
sequence = []
start_of_episode = True
for action, timestep in steps:
extras = ()
sequence.append((observation, action, timestep.reward, timestep.discount,
start_of_episode, extras))
observation = timestep.observation
start_of_episode = False
sequence.append((observation, 0, 0.0, 0.0, False, ()))
return sequence
def _numeric_to_spec(x: Union[float, int, np.ndarray]):
if isinstance(x, np.ndarray):
return specs.Array(shape=x.shape, dtype=x.dtype)
elif isinstance(x, (float, int)):
return specs.Array(shape=(), dtype=type(x))
else:
raise ValueError(f'Unsupported numeric: {type(x)}')
def get_specs(step):
"""Infer spec from an example step."""
env_spec = tree.map_structure(
_numeric_to_spec,
specs.EnvironmentSpec(
observations=step[1].observation,
actions=step[0],
rewards=step[1].reward,
discounts=step[1].discount))
has_extras = len(step) == 3
if has_extras:
extras_spec = tree.map_structure(_numeric_to_spec, step[2])
else:
extras_spec = ()
return env_spec, extras_spec
class AdderTestMixin(absltest.TestCase):
"""A helper mixin for testing Reverb adders.
Note that any test inheriting from this mixin must also inherit from something
that provides the Python unittest assert methods.
"""
server: reverb.Server
client: reverb.Client
@classmethod
def setUpClass(cls):
super().setUpClass()
replay_table = reverb.Table.queue(adders.DEFAULT_PRIORITY_TABLE, 1000)
cls.server = reverb.Server([replay_table])
cls.client = reverb.Client(f'localhost:{cls.server.port}')
def tearDown(self):
self.client.reset(adders.DEFAULT_PRIORITY_TABLE)
super().tearDown()
@classmethod
def tearDownClass(cls):
cls.server.stop()
super().tearDownClass()
def num_episodes(self):
info = self.client.server_info(1)[adders.DEFAULT_PRIORITY_TABLE]
return info.num_episodes
def num_items(self):
info = self.client.server_info(1)[adders.DEFAULT_PRIORITY_TABLE]
return info.current_size
def items(self):
sampler = self.client.sample(
table=adders.DEFAULT_PRIORITY_TABLE,
num_samples=self.num_items(),
emit_timesteps=False)
return [sample.data for sample in sampler] # pytype: disable=attribute-error
def run_test_adder(
self,
adder: adders_base.Adder,
first: dm_env.TimeStep,
steps: Sequence[Step],
expected_items: Sequence[Any],
signature: types.NestedSpec,
pack_expected_items: bool = False,
stack_sequence_fields: bool = True,
repeat_episode_times: int = 1,
end_behavior: adders.EndBehavior = adders.EndBehavior.ZERO_PAD,
item_transform: Optional[Callable[[Sequence[np.ndarray]], Any]] = None):
"""Runs a unit test case for the adder.
Args:
adder: The instance of `Adder` that is being tested.
first: The first `dm_env.TimeStep` that is used to call
`Adder.add_first()`.
steps: A sequence of (action, timestep) tuples that are passed to
`Adder.add()`.
expected_items: The sequence of items that are expected to be created
by calling the adder's `add_first()` method on `first` and `add()` on
all of the elements in `steps`.
signature: Signature that written items must be compatible with.
pack_expected_items: Deprecated and not used. If true the expected items
are given unpacked and need to be packed in a list before comparison.
stack_sequence_fields: Whether to stack the sequence fields of the
expected items before comparing to the observed items. Usually False
for transition adders and True for both episode and sequence adders.
repeat_episode_times: How many times to run an episode.
end_behavior: How end of episode should be handled.
item_transform: Transformation of item simulating the work done by the
dataset pipeline on the learner in a real setup.
"""
del pack_expected_items
if not steps:
raise ValueError('At least one step must be given.')
has_extras = len(steps[0]) == 3
for _ in range(repeat_episode_times):
# Add all the data up to the final step.
adder.add_first(first)
for step in steps[:-1]:
action, ts = step[0], step[1]
if has_extras:
extras = step[2]
else:
extras = ()
adder.add(action, next_timestep=ts, extras=extras)
# Add the final step.
adder.add(*steps[-1])
# Force run the destructor to trigger the flushing of all pending items.
getattr(adder, '__del__', lambda: None)()
# Ending the episode should close the writer. No new writer should yet have
# been created as it is constructed lazily.
if end_behavior is not adders.EndBehavior.CONTINUE:
self.assertEqual(self.num_episodes(), repeat_episode_times)
# Make sure our expected and observed data match.
observed_items = self.items()
# Check matching number of items.
self.assertEqual(len(expected_items), len(observed_items))
# Check items are matching according to numpy's almost_equal.
for expected_item, observed_item in zip(expected_items, observed_items):
if stack_sequence_fields:
expected_item = tree_utils.stack_sequence_fields(expected_item)
# Apply the transformation which would be done by the dataset in a real
# setup.
if item_transform:
observed_item = item_transform(observed_item)
tree.map_structure(np.testing.assert_array_almost_equal,
tree.flatten(expected_item),
tree.flatten(observed_item))
# Make sure the signature matches was is being written by Reverb.
def _check_signature(spec: tf.TensorSpec, value: np.ndarray):
self.assertTrue(spec.is_compatible_with(tf.convert_to_tensor(value)))
# Check that it is possible to unpack observed using the signature.
for item in observed_items:
tree.map_structure(_check_signature, tree.flatten(signature),
tree.flatten(item))
|
acme-master
|
acme/adders/reverb/test_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for structured."""
from typing import Sequence
from acme import types
from acme.adders.reverb import sequence as adders
from acme.adders.reverb import structured
from acme.adders.reverb import test_cases
from acme.adders.reverb import test_utils
from acme.utils import tree_utils
import dm_env
import numpy as np
from reverb import structured_writer as sw
import tree
from absl.testing import absltest
from absl.testing import parameterized
class StructuredAdderTest(test_utils.AdderTestMixin, parameterized.TestCase):
@parameterized.named_parameters(*test_cases.BASE_TEST_CASES_FOR_SEQUENCE_ADDER
)
def test_sequence_adder(self,
sequence_length: int,
period: int,
first,
steps,
expected_sequences,
end_behavior: adders.EndBehavior,
repeat_episode_times: int = 1):
env_spec, extras_spec = test_utils.get_specs(steps[0])
step_spec = structured.create_step_spec(env_spec, extras_spec)
should_pad_trajectory = end_behavior == adders.EndBehavior.ZERO_PAD
def _maybe_zero_pad(flat_trajectory):
trajectory = tree.unflatten_as(step_spec, flat_trajectory)
if not should_pad_trajectory:
return trajectory
padding_length = sequence_length - flat_trajectory[0].shape[0]
if padding_length == 0:
return trajectory
padding = tree.map_structure(
lambda x: np.zeros([padding_length, *x.shape[1:]], x.dtype),
trajectory)
return tree.map_structure(lambda *x: np.concatenate(x), trajectory,
padding)
# The StructuredAdder does not support adding padding steps as we assume
# that the padding will be added on the learner side.
if end_behavior == adders.EndBehavior.ZERO_PAD:
end_behavior = adders.EndBehavior.TRUNCATE
configs = structured.create_sequence_config(
step_spec=step_spec,
sequence_length=sequence_length,
period=period,
end_of_episode_behavior=end_behavior)
adder = structured.StructuredAdder(
client=self.client,
max_in_flight_items=0,
configs=configs,
step_spec=step_spec)
super().run_test_adder(
adder=adder,
first=first,
steps=steps,
expected_items=expected_sequences,
repeat_episode_times=repeat_episode_times,
end_behavior=end_behavior,
item_transform=_maybe_zero_pad,
signature=sw.infer_signature(configs, step_spec))
@parameterized.named_parameters(*test_cases.TEST_CASES_FOR_TRANSITION_ADDER)
def test_transition_adder(self, n_step: int, additional_discount: float,
first: dm_env.TimeStep,
steps: Sequence[dm_env.TimeStep],
expected_transitions: Sequence[types.Transition]):
env_spec, extras_spec = test_utils.get_specs(steps[0])
step_spec = structured.create_step_spec(env_spec, extras_spec)
def _as_n_step_transition(flat_trajectory):
trajectory = tree.unflatten_as(step_spec, flat_trajectory)
rewards, discount = _compute_cumulative_quantities(
rewards=trajectory.reward,
discounts=trajectory.discount,
additional_discount=additional_discount,
n_step=tree.flatten(trajectory.reward)[0].shape[0])
tmap = tree.map_structure
return types.Transition(
observation=tmap(lambda x: x[0], trajectory.observation),
action=tmap(lambda x: x[0], trajectory.action),
reward=rewards,
discount=discount,
next_observation=tmap(lambda x: x[-1], trajectory.observation),
extras=tmap(lambda x: x[0], trajectory.extras))
configs = structured.create_n_step_transition_config(
step_spec=step_spec, n_step=n_step)
adder = structured.StructuredAdder(
client=self.client,
max_in_flight_items=0,
configs=configs,
step_spec=step_spec)
super().run_test_adder(
adder=adder,
first=first,
steps=steps,
expected_items=expected_transitions,
stack_sequence_fields=False,
item_transform=_as_n_step_transition,
signature=sw.infer_signature(configs, step_spec))
def _compute_cumulative_quantities(rewards: types.NestedArray,
discounts: types.NestedArray,
additional_discount: float, n_step: int):
"""Stolen from TransitionAdder."""
# Give the same tree structure to the n-step return accumulator,
# n-step discount accumulator, and self.discount, so that they can be
# iterated in parallel using tree.map_structure.
rewards, discounts, self_discount = tree_utils.broadcast_structures(
rewards, discounts, additional_discount)
flat_rewards = tree.flatten(rewards)
flat_discounts = tree.flatten(discounts)
flat_self_discount = tree.flatten(self_discount)
# Copy total_discount as it is otherwise read-only.
total_discount = [np.copy(a[0]) for a in flat_discounts]
# Broadcast n_step_return to have the broadcasted shape of
# reward * discount.
n_step_return = [
np.copy(np.broadcast_to(r[0],
np.broadcast(r[0], d).shape))
for r, d in zip(flat_rewards, total_discount)
]
# NOTE: total_discount will have one less self_discount applied to it than
# the value of self._n_step. This is so that when the learner/update uses
# an additional discount we don't apply it twice. Inside the following loop
# we will apply this right before summing up the n_step_return.
for i in range(1, n_step):
for nsr, td, r, d, sd in zip(n_step_return, total_discount, flat_rewards,
flat_discounts, flat_self_discount):
# Equivalent to: `total_discount *= self._discount`.
td *= sd
# Equivalent to: `n_step_return += reward[i] * total_discount`.
nsr += r[i] * td
# Equivalent to: `total_discount *= discount[i]`.
td *= d[i]
n_step_return = tree.unflatten_as(rewards, n_step_return)
total_discount = tree.unflatten_as(rewards, total_discount)
return n_step_return, total_discount
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/adders/reverb/structured_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sequence adders."""
from acme.adders.reverb import sequence as adders
from acme.adders.reverb import test_cases
from acme.adders.reverb import test_utils
from absl.testing import absltest
from absl.testing import parameterized
class SequenceAdderTest(test_utils.AdderTestMixin, parameterized.TestCase):
@parameterized.named_parameters(*test_cases.TEST_CASES_FOR_SEQUENCE_ADDER)
def test_adder(self,
sequence_length: int,
period: int,
first,
steps,
expected_sequences,
end_behavior: adders.EndBehavior = adders.EndBehavior.ZERO_PAD,
repeat_episode_times: int = 1):
adder = adders.SequenceAdder(
self.client,
sequence_length=sequence_length,
period=period,
end_of_episode_behavior=end_behavior)
super().run_test_adder(
adder=adder,
first=first,
steps=steps,
expected_items=expected_sequences,
repeat_episode_times=repeat_episode_times,
end_behavior=end_behavior,
signature=adder.signature(*test_utils.get_specs(steps[0])))
@parameterized.parameters(
(True, True, adders.EndBehavior.ZERO_PAD),
(False, True, adders.EndBehavior.TRUNCATE),
(False, False, adders.EndBehavior.CONTINUE),
)
def test_end_of_episode_behavior_set_correctly(self, pad_end_of_episode,
break_end_of_episode,
expected_behavior):
adder = adders.SequenceAdder(
self.client,
sequence_length=5,
period=3,
pad_end_of_episode=pad_end_of_episode,
break_end_of_episode=break_end_of_episode)
self.assertEqual(adder._end_of_episode_behavior, expected_behavior)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/adders/reverb/sequence_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Episode adders.
This implements full episode adders, potentially with padding.
"""
from typing import Callable, Optional, Iterable, Tuple
from acme import specs
from acme import types
from acme.adders.reverb import base
from acme.adders.reverb import utils
import dm_env
import numpy as np
import reverb
import tensorflow as tf
import tree
_PaddingFn = Callable[[Tuple[int, ...], np.dtype], np.ndarray]
class EpisodeAdder(base.ReverbAdder):
"""Adder which adds entire episodes as trajectories."""
def __init__(
self,
client: reverb.Client,
max_sequence_length: int,
delta_encoded: bool = False,
priority_fns: Optional[base.PriorityFnMapping] = None,
max_in_flight_items: int = 1,
padding_fn: Optional[_PaddingFn] = None,
# Deprecated kwargs.
chunk_length: Optional[int] = None,
):
del chunk_length
super().__init__(
client=client,
max_sequence_length=max_sequence_length,
delta_encoded=delta_encoded,
priority_fns=priority_fns,
max_in_flight_items=max_in_flight_items,
)
self._padding_fn = padding_fn
def add(
self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
extras: types.NestedArray = (),
):
if self._writer.episode_steps >= self._max_sequence_length - 1:
raise ValueError(
'The number of observations within the same episode will exceed '
'max_sequence_length with the addition of this transition.')
super().add(action, next_timestep, extras)
def _write(self):
# This adder only writes at the end of the episode, see _write_last()
pass
def _write_last(self):
if self._padding_fn is not None and self._writer.episode_steps < self._max_sequence_length:
history = self._writer.history
padding_step = dict(
observation=history['observation'],
action=history['action'],
reward=history['reward'],
discount=history['discount'],
extras=history.get('extras', ()))
# Get shapes and dtypes from the last element.
padding_step = tree.map_structure(
lambda col: self._padding_fn(col[-1].shape, col[-1].dtype),
padding_step)
padding_step['start_of_episode'] = False
while self._writer.episode_steps < self._max_sequence_length:
self._writer.append(padding_step)
trajectory = tree.map_structure(lambda x: x[:], self._writer.history)
# Pack the history into a base.Step structure and get numpy converted
# variant for priotiy computation.
trajectory = base.Trajectory(**trajectory)
# Calculate the priority for this episode.
table_priorities = utils.calculate_priorities(self._priority_fns,
trajectory)
# Create a prioritized item for each table.
for table_name, priority in table_priorities.items():
self._writer.create_item(table_name, priority, trajectory)
self._writer.flush(self._max_in_flight_items)
# TODO(b/185309817): make this into a standalone method.
@classmethod
def signature(cls,
environment_spec: specs.EnvironmentSpec,
extras_spec: types.NestedSpec = (),
sequence_length: Optional[int] = None):
"""This is a helper method for generating signatures for Reverb tables.
Signatures are useful for validating data types and shapes, see Reverb's
documentation for details on how they are used.
Args:
environment_spec: A `specs.EnvironmentSpec` whose fields are nested
structures with leaf nodes that have `.shape` and `.dtype` attributes.
This should come from the environment that will be used to generate the
data inserted into the Reverb table.
extras_spec: A nested structure with leaf nodes that have `.shape` and
`.dtype` attributes. The structure (and shapes/dtypes) of this must be
the same as the `extras` passed into `ReverbAdder.add`.
sequence_length: An optional integer representing the expected length of
sequences that will be added to replay.
Returns:
A `Step` whose leaf nodes are `tf.TensorSpec` objects.
"""
def add_time_dim(paths: Iterable[str], spec: tf.TensorSpec):
return tf.TensorSpec(
shape=(sequence_length, *spec.shape),
dtype=spec.dtype,
name='/'.join(str(p) for p in paths))
trajectory_env_spec, trajectory_extras_spec = tree.map_structure_with_path(
add_time_dim, (environment_spec, extras_spec))
trajectory_spec = base.Trajectory(
*trajectory_env_spec,
start_of_episode=tf.TensorSpec(
shape=(sequence_length,), dtype=tf.bool, name='start_of_episode'),
extras=trajectory_extras_spec)
return trajectory_spec
|
acme-master
|
acme/adders/reverb/episode.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sequence adders.
This implements adders which add sequences or partial trajectories.
"""
import enum
import operator
from typing import Iterable, Optional
from acme import specs
from acme import types
from acme.adders.reverb import base
from acme.adders.reverb import utils
import numpy as np
import reverb
import tensorflow as tf
import tree
class EndBehavior(enum.Enum):
"""Class to enumerate available options for writing behavior at episode ends.
Example:
sequence_length = 3
period = 2
Episode steps (digits) and writing events (W):
1 2 3 4 5 6
W W
First two sequences:
1 2 3
. . 3 4 5
Written sequences for the different end of episode behaviors:
Here are the last written sequences for each end of episode behavior:
WRITE . . . 4 5 6
CONTINUE . . . . 5 6 F
ZERO_PAD . . . . 5 6 0
TRUNCATE . . . . 5 6
Key:
F: First step of the next episode
0: Zero-filled Step
"""
WRITE = 'write_buffer'
CONTINUE = 'continue_to_next_episode'
ZERO_PAD = 'zero_pad_til_next_write'
TRUNCATE = 'write_truncated_buffer'
class SequenceAdder(base.ReverbAdder):
"""An adder which adds sequences of fixed length."""
def __init__(
self,
client: reverb.Client,
sequence_length: int,
period: int,
*,
delta_encoded: bool = False,
priority_fns: Optional[base.PriorityFnMapping] = None,
max_in_flight_items: Optional[int] = 2,
end_of_episode_behavior: Optional[EndBehavior] = None,
# Deprecated kwargs.
chunk_length: Optional[int] = None,
pad_end_of_episode: Optional[bool] = None,
break_end_of_episode: Optional[bool] = None,
validate_items: bool = True,
):
"""Makes a SequenceAdder instance.
Args:
client: See docstring for BaseAdder.
sequence_length: The fixed length of sequences we wish to add.
period: The period with which we add sequences. If less than
sequence_length, overlapping sequences are added. If equal to
sequence_length, sequences are exactly non-overlapping.
delta_encoded: If `True` (False by default) enables delta encoding, see
`Client` for more information.
priority_fns: See docstring for BaseAdder.
max_in_flight_items: The maximum number of items allowed to be "in flight"
at the same time. See `block_until_num_items` in
`reverb.TrajectoryWriter.flush` for more info.
end_of_episode_behavior: Determines how sequences at the end of the
episode are handled (default `EndOfEpisodeBehavior.ZERO_PAD`). See
the docstring for `EndOfEpisodeBehavior` for more information.
chunk_length: Deprecated and unused.
pad_end_of_episode: If True (default) then upon end of episode the current
sequence will be padded (with observations, actions, etc... whose values
are 0) until its length is `sequence_length`. If False then the last
sequence in the episode may have length less than `sequence_length`.
break_end_of_episode: If 'False' (True by default) does not break
sequences on env reset. In this case 'pad_end_of_episode' is not used.
validate_items: Whether to validate items against the table signature
before they are sent to the server. This requires table signature to be
fetched from the server and cached locally.
"""
del chunk_length
super().__init__(
client=client,
# We need an additional space in the buffer for the partial step the
# base.ReverbAdder will add with the next observation.
max_sequence_length=sequence_length+1,
delta_encoded=delta_encoded,
priority_fns=priority_fns,
max_in_flight_items=max_in_flight_items,
validate_items=validate_items)
if pad_end_of_episode and not break_end_of_episode:
raise ValueError(
'Can\'t set pad_end_of_episode=True and break_end_of_episode=False at'
' the same time, since those behaviors are incompatible.')
self._period = period
self._sequence_length = sequence_length
if end_of_episode_behavior and (pad_end_of_episode is not None or
break_end_of_episode is not None):
raise ValueError(
'Using end_of_episode_behavior and either '
'pad_end_of_episode or break_end_of_episode is not permitted. '
'Please use only end_of_episode_behavior instead.')
# Set pad_end_of_episode and break_end_of_episode to default values.
if end_of_episode_behavior is None and pad_end_of_episode is None:
pad_end_of_episode = True
if end_of_episode_behavior is None and break_end_of_episode is None:
break_end_of_episode = True
self._end_of_episode_behavior = EndBehavior.ZERO_PAD
if pad_end_of_episode is not None or break_end_of_episode is not None:
if not break_end_of_episode:
self._end_of_episode_behavior = EndBehavior.CONTINUE
elif break_end_of_episode and pad_end_of_episode:
self._end_of_episode_behavior = EndBehavior.ZERO_PAD
elif break_end_of_episode and not pad_end_of_episode:
self._end_of_episode_behavior = EndBehavior.TRUNCATE
else:
raise ValueError(
'Reached an unexpected configuration of the SequenceAdder '
f'with break_end_of_episode={break_end_of_episode} '
f'and pad_end_of_episode={pad_end_of_episode}.')
elif isinstance(end_of_episode_behavior, EndBehavior):
self._end_of_episode_behavior = end_of_episode_behavior
else:
raise ValueError('end_of_episod_behavior must be an instance of '
f'EndBehavior, received {end_of_episode_behavior}.')
def reset(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""Resets the adder's buffer."""
# If we do not write on end of episode, we should not reset the writer.
if self._end_of_episode_behavior is EndBehavior.CONTINUE:
return
super().reset()
def _write(self):
self._maybe_create_item(self._sequence_length)
def _write_last(self):
# Maybe determine the delta to the next time we would write a sequence.
if self._end_of_episode_behavior in (EndBehavior.TRUNCATE,
EndBehavior.ZERO_PAD):
delta = self._sequence_length - self._writer.episode_steps
if delta < 0:
delta = (self._period + delta) % self._period
# Handle various end-of-episode cases.
if self._end_of_episode_behavior is EndBehavior.CONTINUE:
self._maybe_create_item(self._sequence_length, end_of_episode=True)
elif self._end_of_episode_behavior is EndBehavior.WRITE:
# Drop episodes that are too short.
if self._writer.episode_steps < self._sequence_length:
return
self._maybe_create_item(
self._sequence_length, end_of_episode=True, force=True)
elif self._end_of_episode_behavior is EndBehavior.TRUNCATE:
self._maybe_create_item(
self._sequence_length - delta,
end_of_episode=True,
force=True)
elif self._end_of_episode_behavior is EndBehavior.ZERO_PAD:
zero_step = tree.map_structure(lambda x: np.zeros_like(x[-2].numpy()),
self._writer.history)
for _ in range(delta):
self._writer.append(zero_step)
self._maybe_create_item(
self._sequence_length, end_of_episode=True, force=True)
else:
raise ValueError(
f'Unhandled end of episode behavior: {self._end_of_episode_behavior}.'
' This should never happen, please contact Acme dev team.')
def _maybe_create_item(self,
sequence_length: int,
*,
end_of_episode: bool = False,
force: bool = False):
# Check conditions under which a new item is created.
first_write = self._writer.episode_steps == sequence_length
# NOTE(bshahr): the following line assumes that the only way sequence_length
# is less than self._sequence_length, is if the episode is shorter than
# self._sequence_length.
period_reached = (
self._writer.episode_steps > self._sequence_length and
((self._writer.episode_steps - self._sequence_length) % self._period
== 0))
if not first_write and not period_reached and not force:
return
# TODO(b/183945808): will need to change to adhere to the new protocol.
if not end_of_episode:
get_traj = operator.itemgetter(slice(-sequence_length - 1, -1))
else:
get_traj = operator.itemgetter(slice(-sequence_length, None))
history = self._writer.history
trajectory = base.Trajectory(**tree.map_structure(get_traj, history))
# Compute priorities for the buffer.
table_priorities = utils.calculate_priorities(self._priority_fns,
trajectory)
# Create a prioritized item for each table.
for table_name, priority in table_priorities.items():
self._writer.create_item(table_name, priority, trajectory)
self._writer.flush(self._max_in_flight_items)
# TODO(bshahr): make this into a standalone method. Class methods should be
# used as alternative constructors or when modifying some global state,
# neither of which is done here.
@classmethod
def signature(cls, environment_spec: specs.EnvironmentSpec,
extras_spec: types.NestedSpec = (),
sequence_length: Optional[int] = None):
"""This is a helper method for generating signatures for Reverb tables.
Signatures are useful for validating data types and shapes, see Reverb's
documentation for details on how they are used.
Args:
environment_spec: A `specs.EnvironmentSpec` whose fields are nested
structures with leaf nodes that have `.shape` and `.dtype` attributes.
This should come from the environment that will be used to generate
the data inserted into the Reverb table.
extras_spec: A nested structure with leaf nodes that have `.shape` and
`.dtype` attributes. The structure (and shapes/dtypes) of this must
be the same as the `extras` passed into `ReverbAdder.add`.
sequence_length: An optional integer representing the expected length of
sequences that will be added to replay.
Returns:
A `Trajectory` whose leaf nodes are `tf.TensorSpec` objects.
"""
def add_time_dim(paths: Iterable[str], spec: tf.TensorSpec):
return tf.TensorSpec(shape=(sequence_length, *spec.shape),
dtype=spec.dtype,
name='/'.join(str(p) for p in paths))
trajectory_env_spec, trajectory_extras_spec = tree.map_structure_with_path(
add_time_dim, (environment_spec, extras_spec))
spec_step = base.Trajectory(
*trajectory_env_spec,
start_of_episode=tf.TensorSpec(
shape=(sequence_length,), dtype=tf.bool, name='start_of_episode'),
extras=trajectory_extras_spec)
return spec_step
|
acme-master
|
acme/adders/reverb/sequence.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transition adders.
This implements an N-step transition adder which collapses trajectory sequences
into a single transition, simplifying to a simple transition adder when N=1.
"""
import copy
from typing import Optional, Tuple
from acme import specs
from acme import types
from acme.adders.reverb import base
from acme.adders.reverb import utils
from acme.utils import tree_utils
import numpy as np
import reverb
import tree
class NStepTransitionAdder(base.ReverbAdder):
"""An N-step transition adder.
This will buffer a sequence of N timesteps in order to form a single N-step
transition which is added to reverb for future retrieval.
For N=1 the data added to replay will be a standard one-step transition which
takes the form:
(s_t, a_t, r_t, d_t, s_{t+1}, e_t)
where:
s_t = state observation at time t
a_t = the action taken from s_t
r_t = reward ensuing from action a_t
d_t = environment discount ensuing from action a_t. This discount is
applied to future rewards after r_t.
e_t [Optional] = extra data that the agent persists in replay.
For N greater than 1, transitions are of the form:
(s_t, a_t, R_{t:t+n}, D_{t:t+n}, s_{t+N}, e_t),
where:
s_t = State (observation) at time t.
a_t = Action taken from state s_t.
g = the additional discount, used by the agent to discount future returns.
R_{t:t+n} = N-step discounted return, i.e. accumulated over N rewards:
R_{t:t+n} := r_t + g * d_t * r_{t+1} + ...
+ g^{n-1} * d_t * ... * d_{t+n-2} * r_{t+n-1}.
D_{t:t+n}: N-step product of agent discounts g_i and environment
"discounts" d_i.
D_{t:t+n} := g^{n-1} * d_{t} * ... * d_{t+n-1},
For most environments d_i is 1 for all steps except the last,
i.e. it is the episode termination signal.
s_{t+n}: The "arrival" state, i.e. the state at time t+n.
e_t [Optional]: A nested structure of any 'extras' the user wishes to add.
Notes:
- At the beginning and end of episodes, shorter transitions are added.
That is, at the beginning of the episode, it will add:
(s_0 -> s_1), (s_0 -> s_2), ..., (s_0 -> s_n), (s_1 -> s_{n+1})
And at the end of the episode, it will add:
(s_{T-n+1} -> s_T), (s_{T-n+2} -> s_T), ... (s_{T-1} -> s_T).
- We add the *first* `extra` of each transition, not the *last*, i.e.
if extras are provided, we get e_t, not e_{t+n}.
"""
def __init__(
self,
client: reverb.Client,
n_step: int,
discount: float,
*,
priority_fns: Optional[base.PriorityFnMapping] = None,
max_in_flight_items: int = 5,
):
"""Creates an N-step transition adder.
Args:
client: A `reverb.Client` to send the data to replay through.
n_step: The "N" in N-step transition. See the class docstring for the
precise definition of what an N-step transition is. `n_step` must be at
least 1, in which case we use the standard one-step transition, i.e.
(s_t, a_t, r_t, d_t, s_t+1, e_t).
discount: Discount factor to apply. This corresponds to the agent's
discount in the class docstring.
priority_fns: See docstring for BaseAdder.
max_in_flight_items: The maximum number of items allowed to be "in flight"
at the same time. See `block_until_num_items` in
`reverb.TrajectoryWriter.flush` for more info.
Raises:
ValueError: If n_step is less than 1.
"""
# Makes the additional discount a float32, which means that it will be
# upcast if rewards/discounts are float64 and left alone otherwise.
self.n_step = n_step
self._discount = tree.map_structure(np.float32, discount)
self._first_idx = 0
self._last_idx = 0
super().__init__(
client=client,
max_sequence_length=n_step + 1,
priority_fns=priority_fns,
max_in_flight_items=max_in_flight_items)
def add(self, *args, **kwargs):
# Increment the indices for the start and end of the window for computing
# n-step returns.
if self._writer.episode_steps >= self.n_step:
self._first_idx += 1
self._last_idx += 1
super().add(*args, **kwargs)
def reset(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
super().reset()
self._first_idx = 0
self._last_idx = 0
@property
def _n_step(self) -> int:
"""Effective n-step, which may vary at starts and ends of episodes."""
return self._last_idx - self._first_idx
def _write(self):
# Convenient getters for use in tree operations.
get_first = lambda x: x[self._first_idx]
get_last = lambda x: x[self._last_idx]
# Note: this getter is meant to be used on a TrajectoryWriter.history to
# obtain its numpy values.
get_all_np = lambda x: x[self._first_idx:self._last_idx].numpy()
# Get the state, action, next_state, as well as possibly extras for the
# transition that is about to be written.
history = self._writer.history
s, a = tree.map_structure(get_first,
(history['observation'], history['action']))
s_ = tree.map_structure(get_last, history['observation'])
# Maybe get extras to add to the transition later.
if 'extras' in history:
extras = tree.map_structure(get_first, history['extras'])
# Note: at the beginning of an episode we will add the initial N-1
# transitions (of size 1, 2, ...) and at the end of an episode (when
# called from write_last) we will write the final transitions of size (N,
# N-1, ...). See the Note in the docstring.
# Get numpy view of the steps to be fed into the priority functions.
reward, discount = tree.map_structure(
get_all_np, (history['reward'], history['discount']))
# Compute discounted return and geometric discount over n steps.
n_step_return, total_discount = self._compute_cumulative_quantities(
reward, discount)
# Append the computed n-step return and total discount.
# Note: if this call to _write() is within a call to _write_last(), then
# this is the only data being appended and so it is not a partial append.
self._writer.append(
dict(n_step_return=n_step_return, total_discount=total_discount),
partial_step=self._writer.episode_steps <= self._last_idx)
# This should be done immediately after self._writer.append so the history
# includes the recently appended data.
history = self._writer.history
# Form the n-step transition by using the following:
# the first observation and action in the buffer, along with the cumulative
# reward and discount computed above.
n_step_return, total_discount = tree.map_structure(
lambda x: x[-1], (history['n_step_return'], history['total_discount']))
transition = types.Transition(
observation=s,
action=a,
reward=n_step_return,
discount=total_discount,
next_observation=s_,
extras=(extras if 'extras' in history else ()))
# Calculate the priority for this transition.
table_priorities = utils.calculate_priorities(self._priority_fns,
transition)
# Insert the transition into replay along with its priority.
for table, priority in table_priorities.items():
self._writer.create_item(
table=table, priority=priority, trajectory=transition)
self._writer.flush(self._max_in_flight_items)
def _write_last(self):
# Write the remaining shorter transitions by alternating writing and
# incrementingfirst_idx. Note that last_idx will no longer be incremented
# once we're in this method's scope.
self._first_idx += 1
while self._first_idx < self._last_idx:
self._write()
self._first_idx += 1
def _compute_cumulative_quantities(
self, rewards: types.NestedArray, discounts: types.NestedArray
) -> Tuple[types.NestedArray, types.NestedArray]:
# Give the same tree structure to the n-step return accumulator,
# n-step discount accumulator, and self.discount, so that they can be
# iterated in parallel using tree.map_structure.
rewards, discounts, self_discount = tree_utils.broadcast_structures(
rewards, discounts, self._discount)
flat_rewards = tree.flatten(rewards)
flat_discounts = tree.flatten(discounts)
flat_self_discount = tree.flatten(self_discount)
# Copy total_discount as it is otherwise read-only.
total_discount = [np.copy(a[0]) for a in flat_discounts]
# Broadcast n_step_return to have the broadcasted shape of
# reward * discount.
n_step_return = [
np.copy(np.broadcast_to(r[0],
np.broadcast(r[0], d).shape))
for r, d in zip(flat_rewards, total_discount)
]
# NOTE: total_discount will have one less self_discount applied to it than
# the value of self._n_step. This is so that when the learner/update uses
# an additional discount we don't apply it twice. Inside the following loop
# we will apply this right before summing up the n_step_return.
for i in range(1, self._n_step):
for nsr, td, r, d, sd in zip(n_step_return, total_discount, flat_rewards,
flat_discounts, flat_self_discount):
# Equivalent to: `total_discount *= self._discount`.
td *= sd
# Equivalent to: `n_step_return += reward[i] * total_discount`.
nsr += r[i] * td
# Equivalent to: `total_discount *= discount[i]`.
td *= d[i]
n_step_return = tree.unflatten_as(rewards, n_step_return)
total_discount = tree.unflatten_as(rewards, total_discount)
return n_step_return, total_discount
# TODO(bshahr): make this into a standalone method. Class methods should be
# used as alternative constructors or when modifying some global state,
# neither of which is done here.
@classmethod
def signature(cls,
environment_spec: specs.EnvironmentSpec,
extras_spec: types.NestedSpec = ()):
# This function currently assumes that self._discount is a scalar.
# If it ever becomes a nested structure and/or a np.ndarray, this method
# will need to know its structure / shape. This is because the signature
# discount shape is the environment's discount shape and this adder's
# discount shape broadcasted together. Also, the reward shape is this
# signature discount shape broadcasted together with the environment
# reward shape. As long as self._discount is a scalar, it will not affect
# either the signature discount shape nor the signature reward shape, so we
# can ignore it.
rewards_spec, step_discounts_spec = tree_utils.broadcast_structures(
environment_spec.rewards, environment_spec.discounts)
rewards_spec = tree.map_structure(_broadcast_specs, rewards_spec,
step_discounts_spec)
step_discounts_spec = tree.map_structure(copy.deepcopy, step_discounts_spec)
transition_spec = types.Transition(
environment_spec.observations,
environment_spec.actions,
rewards_spec,
step_discounts_spec,
environment_spec.observations, # next_observation
extras_spec)
return tree.map_structure_with_path(base.spec_like_to_tensor_spec,
transition_spec)
def _broadcast_specs(*args: specs.Array) -> specs.Array:
"""Like np.broadcast, but for specs.Array.
Args:
*args: one or more specs.Array instances.
Returns:
A specs.Array with the broadcasted shape and dtype of the specs in *args.
"""
bc_info = np.broadcast(*tuple(a.generate_value() for a in args))
dtype = np.result_type(*tuple(a.dtype for a in args))
return specs.Array(shape=bc_info.shape, dtype=dtype)
|
acme-master
|
acme/adders/reverb/transition.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adders for Reverb replay buffers."""
# pylint: disable=unused-import
from acme.adders.reverb.base import DEFAULT_PRIORITY_TABLE
from acme.adders.reverb.base import PriorityFn
from acme.adders.reverb.base import PriorityFnInput
from acme.adders.reverb.base import ReverbAdder
from acme.adders.reverb.base import Step
from acme.adders.reverb.base import Trajectory
from acme.adders.reverb.episode import EpisodeAdder
from acme.adders.reverb.sequence import EndBehavior
from acme.adders.reverb.sequence import SequenceAdder
from acme.adders.reverb.structured import create_n_step_transition_config
from acme.adders.reverb.structured import create_step_spec
from acme.adders.reverb.structured import StructuredAdder
from acme.adders.reverb.transition import NStepTransitionAdder
|
acme-master
|
acme/adders/reverb/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for NStepTransition adders."""
from acme.adders.reverb import test_cases
from acme.adders.reverb import test_utils
from acme.adders.reverb import transition as adders
from absl.testing import absltest
from absl.testing import parameterized
class NStepTransitionAdderTest(test_utils.AdderTestMixin,
parameterized.TestCase):
@parameterized.named_parameters(*test_cases.TEST_CASES_FOR_TRANSITION_ADDER)
def test_adder(self, n_step, additional_discount, first, steps,
expected_transitions):
adder = adders.NStepTransitionAdder(self.client, n_step,
additional_discount)
super().run_test_adder(
adder=adder,
first=first,
steps=steps,
expected_items=expected_transitions,
stack_sequence_fields=False,
signature=adder.signature(*test_utils.get_specs(steps[0])))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/adders/reverb/transition_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Reverb-based adders."""
from typing import Dict, Union
from acme import types
from acme.adders.reverb import base
import jax
import jax.numpy as jnp
import numpy as np
import tree
def zeros_like(x: Union[np.ndarray, int, float, np.number]):
"""Returns a zero-filled object of the same (d)type and shape as the input.
The difference between this and `np.zeros_like()` is that this works well
with `np.number`, `int`, `float`, and `jax.numpy.DeviceArray` objects without
converting them to `np.ndarray`s.
Args:
x: The object to replace with 0s.
Returns:
A zero-filed object of the same (d)type and shape as the input.
"""
if isinstance(x, (int, float, np.number)):
return type(x)(0)
elif isinstance(x, jax.Array):
return jnp.zeros_like(x)
elif isinstance(x, np.ndarray):
return np.zeros_like(x)
else:
raise ValueError(
f'Input ({type(x)}) must be either a numpy array, an int, or a float.')
def final_step_like(step: base.Step,
next_observation: types.NestedArray) -> base.Step:
"""Return a list of steps with the final step zero-filled."""
# Make zero-filled components so we can fill out the last step.
zero_action, zero_reward, zero_discount, zero_extras = tree.map_structure(
zeros_like, (step.action, step.reward, step.discount, step.extras))
# Return a final step that only has next_observation.
return base.Step(
observation=next_observation,
action=zero_action,
reward=zero_reward,
discount=zero_discount,
start_of_episode=False,
extras=zero_extras)
def calculate_priorities(
priority_fns: base.PriorityFnMapping,
trajectory_or_transition: Union[base.Trajectory, types.Transition],
) -> Dict[str, float]:
"""Helper used to calculate the priority of a Trajectory or Transition.
This helper converts the leaves of the Trajectory or Transition from
`reverb.TrajectoryColumn` objects into numpy arrays. The converted Trajectory
or Transition is then passed into each of the functions in `priority_fns`.
Args:
priority_fns: a mapping from table names to priority functions (i.e. a
callable of type PriorityFn). The given function will be used to generate
the priority (a float) for the given table.
trajectory_or_transition: the trajectory or transition used to compute
priorities.
Returns:
A dictionary mapping from table names to the priority (a float) for the
given collection Trajectory or Transition.
"""
if any([priority_fn is not None for priority_fn in priority_fns.values()]):
trajectory_or_transition = tree.map_structure(lambda col: col.numpy(),
trajectory_or_transition)
return {
table: (priority_fn(trajectory_or_transition) if priority_fn else 1.0)
for table, priority_fn in priority_fns.items()
}
|
acme-master
|
acme/adders/reverb/utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic adders that wraps Reverb's StructuredWriter."""
import itertools
import time
from typing import Callable, List, Optional, Sequence, Sized
from absl import logging
from acme import specs
from acme import types
from acme.adders import base as adders_base
from acme.adders.reverb import base as reverb_base
from acme.adders.reverb import sequence as sequence_adder
import dm_env
import numpy as np
import reverb
from reverb import structured_writer as sw
import tensorflow as tf
import tree
Step = reverb_base.Step
Trajectory = reverb_base.Trajectory
EndBehavior = sequence_adder.EndBehavior
_RESET_WRITER_EVERY_SECONDS = 60
class StructuredAdder(adders_base.Adder):
"""Generic Adder which writes to Reverb using Reverb's `StructuredWriter`.
The StructuredAdder is a thin wrapper around Reverb's `StructuredWriter` and
its behaviour is determined by the configs to __init__. Much of the behaviour
provided by other Adders can be replicated using `StructuredAdder` but there
are a few noteworthy differences:
* The behaviour of `StructuredAdder` can be thought of as the union of all
its configs. This means that a single adder is capable of inserting items
of different structures into any number of tables WITHOUT any data
duplication. Other adders are only capable of writing items of the same
structure into multiple tables.
* The complete structure of the step must be known at construction time when
using the StructuredAdder. This is not the case for other Adders as they
allow the structure of the step to become expanded over time.
* The `StructuredAdder` assigns all items the same priority (1.0) as it does
not currently support custom priority computations.
* The StructuredAdder is completely generic and thus does not perform any
preprocessing on the data (e.g. cumulative rewards as done by the
NStepTransitionAdder) before writing it to Reverb. The user is instead
expected to perform preprocessing in the dataset pipeline on the learner.
"""
def __init__(self, client: reverb.Client, max_in_flight_items: int,
configs: Sequence[sw.Config], step_spec: Step):
"""Initialize a StructuredAdder instance.
Args:
client: A client to the Reverb backend.
max_in_flight_items: The maximum number of items allowed to be "in flight"
at the same time. See `block_until_num_items` in
`reverb.TrajectoryWriter.flush` for more info.
configs: Configurations defining the behaviour of the wrapped Reverb
writer.
step_spec: spec of the step that is going to be inserted in the Adder. It
can be created with `create_step_spec` using the environment spec and
and the extras spec.
"""
# We validate the configs by attempting to infer the signatures of all
# targeted tables.
for table, table_configs in itertools.groupby(configs, lambda c: c.table):
try:
sw.infer_signature(list(table_configs), step_spec)
except ValueError as e:
raise ValueError(
f'Received invalid configs for table {table}: {str(e)}') from e
self._client = client
self._configs = tuple(configs)
self._none_step: Step = tree.map_structure(lambda _: None, step_spec)
self._max_in_flight_items = max_in_flight_items
self._writer = None
self._writer_created_at = None
def __del__(self):
if self._writer is None:
return
# Try flush all appended data before closing to avoid loss of experience.
try:
self._writer.flush(0, timeout_ms=10_000)
except reverb.DeadlineExceededError as e:
logging.error(
'Timeout (10 s) exceeded when flushing the writer before '
'deleting it. Caught Reverb exception: %s', str(e))
def _make_step(self, **kwargs) -> Step:
"""Complete the step with None in the missing positions."""
return Step(**{**self._none_step._asdict(), **kwargs})
@property
def configs(self):
return self._configs
def reset(self, timeout_ms: Optional[int] = None):
"""Marks the active episode as completed and flushes pending items."""
if self._writer is not None:
# Flush all pending items.
self._writer.end_episode(clear_buffers=True, timeout_ms=timeout_ms)
# Create a new writer unless the current one is too young.
# This is to reduce the relative overhead of creating a new Reverb writer.
if time.time() - self._writer_created_at > _RESET_WRITER_EVERY_SECONDS:
self._writer = None
def add_first(self, timestep: dm_env.TimeStep):
"""Record the first observation of an episode."""
if not timestep.first():
raise ValueError(
'adder.add_first called with a timestep that was not the first of its'
'episode (i.e. one for which timestep.first() is not True)')
if self._writer is None:
self._writer = self._client.structured_writer(self._configs)
self._writer_created_at = time.time()
# Record the next observation but leave the history buffer row open by
# passing `partial_step=True`.
self._writer.append(
data=self._make_step(
observation=timestep.observation,
start_of_episode=timestep.first()),
partial_step=True)
self._writer.flush(self._max_in_flight_items)
def add(self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
extras: types.NestedArray = ()):
"""Record an action and the following timestep."""
if self._writer is None or not self._writer.step_is_open:
raise ValueError('adder.add_first must be called before adder.add.')
# Add the timestep to the buffer.
has_extras = (
len(extras) > 0 if isinstance(extras, Sized) # pylint: disable=g-explicit-length-test
else extras is not None)
current_step = self._make_step(
action=action,
reward=next_timestep.reward,
discount=next_timestep.discount,
extras=extras if has_extras else self._none_step.extras)
self._writer.append(current_step)
# Record the next observation and write.
self._writer.append(
data=self._make_step(
observation=next_timestep.observation,
start_of_episode=next_timestep.first()),
partial_step=True)
self._writer.flush(self._max_in_flight_items)
if next_timestep.last():
# Complete the row by appending zeros to remaining open fields.
# TODO(b/183945808): remove this when fields are no longer expected to be
# of equal length on the learner side.
dummy_step = tree.map_structure(
lambda x: None if x is None else np.zeros_like(x), current_step)
self._writer.append(dummy_step)
self.reset()
def create_step_spec(
environment_spec: specs.EnvironmentSpec, extras_spec: types.NestedSpec = ()
) -> Step:
return Step(
*environment_spec,
start_of_episode=tf.TensorSpec([], tf.bool, 'start_of_episode'),
extras=extras_spec)
def _last_n(n: int, step_spec: Step) -> Trajectory:
"""Constructs a sequence with the last n elements of all the Step fields."""
return Trajectory(*tree.map_structure(lambda x: x[-n:], step_spec))
def create_sequence_config(
step_spec: Step,
sequence_length: int,
period: int,
table: str = reverb_base.DEFAULT_PRIORITY_TABLE,
end_of_episode_behavior: EndBehavior = EndBehavior.TRUNCATE,
sequence_pattern: Callable[[int, Step], Trajectory] = _last_n,
) -> List[sw.Config]:
"""Generates configs that produces the same behaviour as `SequenceAdder`.
NOTE! ZERO_PAD is not supported as the same behaviour can be achieved by
writing with TRUNCATE and then adding padding in the dataset pipeline on the
learner.
Args:
step_spec: The full structure of the data which will be appended to the
Reverb `StructuredWriter` in each step. Please use `create_step_spec` to
create `step_spec`.
sequence_length: The number of steps that each trajectory should span.
period: The period with which we add sequences. If less than
sequence_length, overlapping sequences are added. If equal to
sequence_length, sequences are exactly non-overlapping.
table: Name of the Reverb table to write items to. Defaults to the default
Acme table.
end_of_episode_behavior: Determines how sequences at the end of the episode
are handled (default `EndOfEpisodeBehavior.TRUNCATE`). See the docstring
of `EndOfEpisodeBehavior` for more information.
sequence_pattern: Transformation to obtain a sequence given the length
and the shape of the step.
Returns:
A list of configs for `StructuredAdder` to produce the described behaviour.
Raises:
ValueError: If sequence_length is <= 0.
NotImplementedError: If `end_of_episod_behavior` is `ZERO_PAD`.
"""
if sequence_length <= 0:
raise ValueError(f'sequence_length must be > 0 but got {sequence_length}.')
if end_of_episode_behavior == EndBehavior.ZERO_PAD:
raise NotImplementedError(
'Zero-padding is not supported. Please use TRUNCATE instead.')
if end_of_episode_behavior == EndBehavior.CONTINUE:
raise NotImplementedError('Merging episodes is not supported.')
def _sequence_pattern(n: int) -> sw.Pattern:
return sw.pattern_from_transform(step_spec,
lambda step: sequence_pattern(n, step))
# The base config is considered for all but the last step in the episode. No
# trajectories are created for the first `sequence_step-1` steps and then a
# new trajectory is inserted every `period` steps.
base_config = sw.create_config(
pattern=_sequence_pattern(sequence_length),
table=table,
conditions=[
sw.Condition.step_index() >= sequence_length - 1,
sw.Condition.step_index() % period == (sequence_length - 1) % period,
])
end_of_episode_configs = []
if end_of_episode_behavior == EndBehavior.WRITE:
# Simply write a trajectory in exactly the same way as the base config. The
# only difference here is that we ALWAYS create a trajectory even if it
# doesn't align with the `period`. The exceptions to the rule are episodes
# that are shorter than `sequence_length` steps which are completely
# ignored.
config = sw.create_config(
pattern=_sequence_pattern(sequence_length),
table=table,
conditions=[
sw.Condition.is_end_episode(),
sw.Condition.step_index() >= sequence_length - 1,
])
end_of_episode_configs.append(config)
elif end_of_episode_behavior == EndBehavior.TRUNCATE:
# The first trajectory is written at step index `sequence_length - 1` and
# then written every `period` step. This means that the
# `step_index % period` will always be equal to the below value everytime a
# trajectory is written.
target = (sequence_length - 1) % period
# When the episode ends we still want to capture the steps that has been
# appended since the last item was created. We do this by creating a config
# for all `step_index % period`, except `target`, and condition these
# configs so that they only are triggered when `end_episode` is called.
for x in range(period):
# When the last step is aligned with the period of the inserts then no
# action is required as the item was already generated by `base_config`.
if x == target:
continue
# If we were to pad the trajectory then we'll need to continue adding
# padding until `step_index % period` is equal to `target` again. We can
# exploit this relation by conditioning the config to only be applied for
# a single value of `step_index % period`. This constraint means that we
# can infer the number of padding steps required until the next write
# would have occurred if the episode didn't end.
#
# Now if we assume that the padding instead is added on the dataset (or
# the trajectory is simply truncated) then we can infer from the above
# that the number of real steps in this padded trajectory will be the
# difference between `sequence_length` and number of pad steps.
num_pad_steps = (target - x) % period
unpadded_length = sequence_length - num_pad_steps
config = sw.create_config(
pattern=_sequence_pattern(unpadded_length),
table=table,
conditions=[
sw.Condition.is_end_episode(),
sw.Condition.step_index() % period == x,
sw.Condition.step_index() >= sequence_length,
])
end_of_episode_configs.append(config)
# The above configs will capture the "remainder" of any episode that is at
# least `sequence_length` steps long. However, if the entire episode is
# shorter than `sequence_length` then data might still be lost. We avoid
# this by simply creating `sequence_length-1` configs that capture the last
# `x` steps iff the entire episode is `x` steps long.
for x in range(1, sequence_length):
config = sw.create_config(
pattern=_sequence_pattern(x),
table=table,
conditions=[
sw.Condition.is_end_episode(),
sw.Condition.step_index() == x - 1,
])
end_of_episode_configs.append(config)
else:
raise ValueError(
f'Unexpected `end_of_episod_behavior`: {end_of_episode_behavior}')
return [base_config] + end_of_episode_configs
def create_n_step_transition_config(
step_spec: Step,
n_step: int,
table: str = reverb_base.DEFAULT_PRIORITY_TABLE) -> List[sw.Config]:
"""Generates configs that replicates the behaviour of NStepTransitionAdder.
Please see the docstring of NStepTransitionAdder for more details.
NOTE! In contrast to NStepTransitionAdder, the trajectories written by the
`StructuredWriter` does not include the precomputed cumulative reward and
discounts. Instead the trajectory includes the raw rewards and discounts
required to comptute these values.
Args:
step_spec: The full structure of the data which will be appended to the
Reverb `StructuredWriter` in each step. Please use `create_step_spec` to
create `step_spec`.
n_step: The "N" in N-step transition. See the class docstring for the
precise definition of what an N-step transition is. `n_step` must be at
least 1, in which case we use the standard one-step transition, i.e. (s_t,
a_t, r_t, d_t, s_t+1, e_t).
table: Name of the Reverb table to write items to. Defaults to the default
Acme table.
Returns:
A list of configs for `StructuredAdder` to produce the described behaviour.
"""
def _make_pattern(n: int):
ref_step = sw.create_reference_step(step_spec)
get_first = lambda x: x[-(n + 1):-n]
get_all = lambda x: x[-(n + 1):-1]
get_first_and_last = lambda x: x[-(n + 1)::n]
tmap = tree.map_structure
# We use the exact same structure as we done when writing sequences except
# we trim the number of steps in each sub tree. This has the benefit that
# the postprocessing used to transform these items into N-step transition
# structures (cumulative rewards and discounts etc.) can be applied on
# full sequence items as well. The only difference being that the latter is
# more wasteful than the trimmed down version we write here.
return Trajectory(
observation=tmap(get_first_and_last, ref_step.observation),
action=tmap(get_first, ref_step.action),
reward=tmap(get_all, ref_step.reward),
discount=tmap(get_all, ref_step.discount),
start_of_episode=tmap(get_first, ref_step.start_of_episode),
extras=tmap(get_first, ref_step.extras))
# At the start of the episodes we'll add shorter transitions.
start_of_episode_configs = []
for n in range(1, n_step):
config = sw.create_config(
pattern=_make_pattern(n),
table=table,
conditions=[
sw.Condition.step_index() == n,
],
)
start_of_episode_configs.append(config)
# During all other steps we'll add a full N-step transition.
base_config = sw.create_config(pattern=_make_pattern(n_step), table=table)
# When the episode ends we'll add shorter transitions.
end_of_episode_configs = []
for n in range(n_step - 1, 0, -1):
config = sw.create_config(
pattern=_make_pattern(n),
table=table,
conditions=[
sw.Condition.is_end_episode(),
# If the entire episode is shorter than n_step then the episode
# start configs will already create an item that covers all the
# steps so we add this filter here to avoid adding it again.
sw.Condition.step_index() != n,
],
)
end_of_episode_configs.append(config)
return start_of_episode_configs + [base_config] + end_of_episode_configs
|
acme-master
|
acme/adders/reverb/structured.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Episode adders."""
from acme.adders.reverb import episode as adders
from acme.adders.reverb import test_utils
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
class EpisodeAdderTest(test_utils.AdderTestMixin, parameterized.TestCase):
@parameterized.parameters(2, 10, 50)
def test_adder(self, max_sequence_length):
adder = adders.EpisodeAdder(self.client, max_sequence_length)
# Create a simple trajectory to add.
observations = range(max_sequence_length)
first, steps = test_utils.make_trajectory(observations)
expected_episode = test_utils.make_sequence(observations)
self.run_test_adder(
adder=adder,
first=first,
steps=steps,
expected_items=[expected_episode],
signature=adder.signature(*test_utils.get_specs(steps[0])))
@parameterized.parameters(2, 10, 50)
def test_max_sequence_length(self, max_sequence_length):
adder = adders.EpisodeAdder(self.client, max_sequence_length)
first, steps = test_utils.make_trajectory(range(max_sequence_length + 1))
adder.add_first(first)
for action, step in steps[:-1]:
adder.add(action, step)
# We should have max_sequence_length-1 timesteps that have been written,
# where the -1 is due to the dangling observation (ie we have actually
# seen max_sequence_length observations).
self.assertEqual(self.num_items(), 0)
# Adding one more step should raise an error.
with self.assertRaises(ValueError):
action, step = steps[-1]
adder.add(action, step)
# Since the last insert failed it should not affect the internal state.
self.assertEqual(self.num_items(), 0)
@parameterized.parameters((2, 1), (10, 2), (50, 5))
def test_padding(self, max_sequence_length, padding):
adder = adders.EpisodeAdder(
self.client,
max_sequence_length + padding,
padding_fn=np.zeros)
# Create a simple trajectory to add.
observations = range(max_sequence_length)
first, steps = test_utils.make_trajectory(observations)
expected_episode = test_utils.make_sequence(observations)
for _ in range(padding):
expected_episode.append((0, 0, 0.0, 0.0, False, ()))
self.run_test_adder(
adder=adder,
first=first,
steps=steps,
expected_items=[expected_episode],
signature=adder.signature(*test_utils.get_specs(steps[0])))
@parameterized.parameters((2, 1), (10, 2), (50, 5))
def test_nonzero_padding(self, max_sequence_length, padding):
adder = adders.EpisodeAdder(
self.client,
max_sequence_length + padding,
padding_fn=lambda s, d: np.zeros(s, d) - 1)
# Create a simple trajectory to add.
observations = range(max_sequence_length)
first, steps = test_utils.make_trajectory(observations)
expected_episode = test_utils.make_sequence(observations)
for _ in range(padding):
expected_episode.append((-1, -1, -1.0, -1.0, False, ()))
self.run_test_adder(
adder=adder,
first=first,
steps=steps,
expected_items=[expected_episode],
signature=adder.signature(*test_utils.get_specs(steps[0])))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/adders/reverb/episode_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases used by multiple test files."""
from acme import types
from acme.adders.reverb import sequence as sequence_adder
import dm_env
import numpy as np
# Define the main set of test cases; these are given as parameterized tests to
# the test_adder method and describe a trajectory to add to replay and the
# expected transitions that should result from this trajectory. The expected
# transitions are of the form: (observation, action, reward, discount,
# next_observation, extras).
TEST_CASES_FOR_TRANSITION_ADDER = [
dict(
testcase_name='OneStepFinalReward',
n_step=1,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=0.0, observation=2)),
(0, dm_env.transition(reward=0.0, observation=3)),
(0, dm_env.termination(reward=1.0, observation=4)),
),
expected_transitions=(
types.Transition(1, 0, 0.0, 1.0, 2),
types.Transition(2, 0, 0.0, 1.0, 3),
types.Transition(3, 0, 1.0, 0.0, 4),
)),
dict(
testcase_name='OneStepDict',
n_step=1,
additional_discount=1.0,
first=dm_env.restart({'foo': 1}),
steps=(
(0, dm_env.transition(reward=0.0, observation={'foo': 2})),
(0, dm_env.transition(reward=0.0, observation={'foo': 3})),
(0, dm_env.termination(reward=1.0, observation={'foo': 4})),
),
expected_transitions=(
types.Transition({'foo': 1}, 0, 0.0, 1.0, {'foo': 2}),
types.Transition({'foo': 2}, 0, 0.0, 1.0, {'foo': 3}),
types.Transition({'foo': 3}, 0, 1.0, 0.0, {'foo': 4}),
)),
dict(
testcase_name='OneStepExtras',
n_step=1,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(
0,
dm_env.transition(reward=0.0, observation=2),
{
'state': 0
},
),
(
0,
dm_env.transition(reward=0.0, observation=3),
{
'state': 1
},
),
(
0,
dm_env.termination(reward=1.0, observation=4),
{
'state': 2
},
),
),
expected_transitions=(
types.Transition(1, 0, 0.0, 1.0, 2, {'state': 0}),
types.Transition(2, 0, 0.0, 1.0, 3, {'state': 1}),
types.Transition(3, 0, 1.0, 0.0, 4, {'state': 2}),
)),
dict(
testcase_name='OneStepExtrasZeroes',
n_step=1,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(
0,
dm_env.transition(reward=0.0, observation=2),
np.zeros(1),
),
(
0,
dm_env.transition(reward=0.0, observation=3),
np.zeros(1),
),
(
0,
dm_env.termination(reward=1.0, observation=4),
np.zeros(1),
),
),
expected_transitions=(
types.Transition(1, 0, 0.0, 1.0, 2, np.zeros(1)),
types.Transition(2, 0, 0.0, 1.0, 3, np.zeros(1)),
types.Transition(3, 0, 1.0, 0.0, 4, np.zeros(1)),
)),
dict(
testcase_name='TwoStep',
n_step=2,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=1.0, observation=2, discount=0.5)),
(0, dm_env.transition(reward=1.0, observation=3, discount=0.5)),
(0, dm_env.termination(reward=1.0, observation=4)),
),
expected_transitions=(
types.Transition(1, 0, 1.0, 0.50, 2),
types.Transition(1, 0, 1.5, 0.25, 3),
types.Transition(2, 0, 1.5, 0.00, 4),
types.Transition(3, 0, 1.0, 0.00, 4),
)),
dict(
testcase_name='TwoStepStructuredReward',
n_step=2,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(0,
dm_env.transition(reward=(1.0, 2.0), observation=2, discount=0.5)),
(0,
dm_env.transition(reward=(1.0, 2.0), observation=3, discount=0.5)),
(0, dm_env.termination(reward=(1.0, 2.0), observation=4)),
),
expected_transitions=(
types.Transition(1, 0, (1.0, 2.0), (0.50, 0.50), 2),
types.Transition(1, 0, (1.5, 3.0), (0.25, 0.25), 3),
types.Transition(2, 0, (1.5, 3.0), (0.00, 0.00), 4),
types.Transition(3, 0, (1.0, 2.0), (0.00, 0.00), 4),
)),
dict(
testcase_name='TwoStepNDArrayReward',
n_step=2,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(0,
dm_env.transition(
reward=np.array((1.0, 2.0)), observation=2, discount=0.5)),
(0,
dm_env.transition(
reward=np.array((1.0, 2.0)), observation=3, discount=0.5)),
(0, dm_env.termination(reward=np.array((1.0, 2.0)), observation=4)),
),
expected_transitions=(
types.Transition(1, 0, np.array((1.0, 2.0)), np.array((0.50, 0.50)),
2),
types.Transition(1, 0, np.array((1.5, 3.0)), np.array((0.25, 0.25)),
3),
types.Transition(2, 0, np.array((1.5, 3.0)), np.array((0.00, 0.00)),
4),
types.Transition(3, 0, np.array((1.0, 2.0)), np.array((0.00, 0.00)),
4),
)),
dict(
testcase_name='TwoStepStructuredDiscount',
n_step=2,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(0,
dm_env.transition(
reward=1.0, observation=2, discount={
'a': 0.5,
'b': 0.1
})),
(0,
dm_env.transition(
reward=1.0, observation=3, discount={
'a': 0.5,
'b': 0.1
})),
(0, dm_env.termination(reward=1.0,
observation=4)._replace(discount={
'a': 0.0,
'b': 0.0
})),
),
expected_transitions=(
types.Transition(1, 0, {
'a': 1.0,
'b': 1.0
}, {
'a': 0.50,
'b': 0.10
}, 2),
types.Transition(1, 0, {
'a': 1.5,
'b': 1.1
}, {
'a': 0.25,
'b': 0.01
}, 3),
types.Transition(2, 0, {
'a': 1.5,
'b': 1.1
}, {
'a': 0.00,
'b': 0.00
}, 4),
types.Transition(3, 0, {
'a': 1.0,
'b': 1.0
}, {
'a': 0.00,
'b': 0.00
}, 4),
)),
dict(
testcase_name='TwoStepNDArrayDiscount',
n_step=2,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(0,
dm_env.transition(
reward=1.0, observation=2, discount=np.array((0.5, 0.1)))),
(0,
dm_env.transition(
reward=1.0, observation=3, discount=np.array((0.5, 0.1)))),
(0, dm_env.termination(
reward=1.0,
observation=4)._replace(discount=np.array((0.0, 0.0)))),
),
expected_transitions=(
types.Transition(1, 0, np.array((1.0, 1.0)), np.array((0.50, 0.10)),
2),
types.Transition(1, 0, np.array((1.5, 1.1)), np.array((0.25, 0.01)),
3),
types.Transition(2, 0, np.array((1.5, 1.1)), np.array((0.00, 0.00)),
4),
types.Transition(3, 0, np.array((1.0, 1.0)), np.array((0.00, 0.00)),
4),
)),
dict(
testcase_name='TwoStepBroadcastedNDArrays',
n_step=2,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(0,
dm_env.transition(
reward=np.array([[1.0, 2.0]]),
observation=2,
discount=np.array([[0.5], [0.1]]))),
(0,
dm_env.transition(
reward=np.array([[1.0, 2.0]]),
observation=3,
discount=np.array([[0.5], [0.1]]))),
(0, dm_env.termination(
reward=np.array([[1.0, 2.0]]),
observation=4)._replace(discount=np.array([[0.0], [0.0]]))),
),
expected_transitions=(
types.Transition(1, 0, np.array([[1.0, 2.0], [1.0, 2.0]]),
np.array([[0.50], [0.10]]), 2),
types.Transition(1, 0, np.array([[1.5, 3.0], [1.1, 2.2]]),
np.array([[0.25], [0.01]]), 3),
types.Transition(2, 0, np.array([[1.5, 3.0], [1.1, 2.2]]),
np.array([[0.00], [0.00]]), 4),
types.Transition(3, 0, np.array([[1.0, 2.0], [1.0, 2.0]]),
np.array([[0.00], [0.00]]), 4),
)),
dict(
testcase_name='TwoStepStructuredBroadcastedNDArrays',
n_step=2,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(0,
dm_env.transition(
reward={'a': np.array([[1.0, 2.0]])},
observation=2,
discount=np.array([[0.5], [0.1]]))),
(0,
dm_env.transition(
reward={'a': np.array([[1.0, 2.0]])},
observation=3,
discount=np.array([[0.5], [0.1]]))),
(0,
dm_env.termination(
reward={
'a': np.array([[1.0, 2.0]])
}, observation=4)._replace(discount=np.array([[0.0], [0.0]]))),
),
expected_transitions=(
types.Transition(1, 0, {'a': np.array([[1.0, 2.0], [1.0, 2.0]])},
{'a': np.array([[0.50], [0.10]])}, 2),
types.Transition(1, 0, {'a': np.array([[1.5, 3.0], [1.1, 2.2]])},
{'a': np.array([[0.25], [0.01]])}, 3),
types.Transition(2, 0, {'a': np.array([[1.5, 3.0], [1.1, 2.2]])},
{'a': np.array([[0.00], [0.00]])}, 4),
types.Transition(3, 0, {'a': np.array([[1.0, 2.0], [1.0, 2.0]])},
{'a': np.array([[0.00], [0.00]])}, 4),
)),
dict(
testcase_name='TwoStepWithExtras',
n_step=2,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(
0,
dm_env.transition(reward=1.0, observation=2, discount=0.5),
{
'state': 0
},
),
(
0,
dm_env.transition(reward=1.0, observation=3, discount=0.5),
{
'state': 1
},
),
(
0,
dm_env.termination(reward=1.0, observation=4),
{
'state': 2
},
),
),
expected_transitions=(
types.Transition(1, 0, 1.0, 0.50, 2, {'state': 0}),
types.Transition(1, 0, 1.5, 0.25, 3, {'state': 0}),
types.Transition(2, 0, 1.5, 0.00, 4, {'state': 1}),
types.Transition(3, 0, 1.0, 0.00, 4, {'state': 2}),
)),
dict(
testcase_name='ThreeStepDiscounted',
n_step=3,
additional_discount=0.4,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=1.0, observation=2, discount=0.5)),
(0, dm_env.transition(reward=1.0, observation=3, discount=0.5)),
(0, dm_env.termination(reward=1.0, observation=4)),
),
expected_transitions=(
types.Transition(1, 0, 1.00, 0.5, 2),
types.Transition(1, 0, 1.20, 0.1, 3),
types.Transition(1, 0, 1.24, 0.0, 4),
types.Transition(2, 0, 1.20, 0.0, 4),
types.Transition(3, 0, 1.00, 0.0, 4),
)),
dict(
testcase_name='ThreeStepVaryingReward',
n_step=3,
additional_discount=0.5,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.transition(reward=3.0, observation=3)),
(0, dm_env.transition(reward=5.0, observation=4)),
(0, dm_env.termination(reward=7.0, observation=5)),
),
expected_transitions=(
types.Transition(1, 0, 2, 1.00, 2),
types.Transition(1, 0, 2 + 0.5 * 3, 0.50, 3),
types.Transition(1, 0, 2 + 0.5 * 3 + 0.25 * 5, 0.25, 4),
types.Transition(2, 0, 3 + 0.5 * 5 + 0.25 * 7, 0.00, 5),
types.Transition(3, 0, 5 + 0.5 * 7, 0.00, 5),
types.Transition(4, 0, 7, 0.00, 5),
)),
dict(
testcase_name='SingleTransitionEpisode',
n_step=4,
additional_discount=1.0,
first=dm_env.restart(1),
steps=((0, dm_env.termination(reward=1.0, observation=2)),),
expected_transitions=(types.Transition(1, 0, 1.00, 0.0, 2),)),
dict(
testcase_name='EpisodeShorterThanN',
n_step=4,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=1.0, observation=2)),
(0, dm_env.termination(reward=1.0, observation=3)),
),
expected_transitions=(
types.Transition(1, 0, 1.00, 1.0, 2),
types.Transition(1, 0, 2.00, 0.0, 3),
types.Transition(2, 0, 1.00, 0.0, 3),
)),
dict(
testcase_name='EpisodeEqualToN',
n_step=3,
additional_discount=1.0,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=1.0, observation=2)),
(0, dm_env.termination(reward=1.0, observation=3)),
),
expected_transitions=(
types.Transition(1, 0, 1.00, 1.0, 2),
types.Transition(1, 0, 2.00, 0.0, 3),
types.Transition(2, 0, 1.00, 0.0, 3),
)),
]
BASE_TEST_CASES_FOR_SEQUENCE_ADDER = [
dict(
testcase_name='PeriodOne',
sequence_length=3,
period=1,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.transition(reward=3.0, observation=3)),
(0, dm_env.transition(reward=5.0, observation=4)),
(0, dm_env.termination(reward=7.0, observation=5)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
],
[
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 0.0, False, ()),
],
[
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 0.0, False, ()),
(5, 0, 0.0, 0.0, False, ()),
],
),
end_behavior=sequence_adder.EndBehavior.ZERO_PAD,
),
dict(
testcase_name='PeriodTwo',
sequence_length=3,
period=2,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.transition(reward=3.0, observation=3)),
(0, dm_env.transition(reward=5.0, observation=4)),
(0, dm_env.termination(reward=7.0, observation=5)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
],
[
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 0.0, False, ()),
(5, 0, 0.0, 0.0, False, ()),
],
),
end_behavior=sequence_adder.EndBehavior.ZERO_PAD,
),
dict(
testcase_name='EarlyTerminationPeriodOne',
sequence_length=3,
period=1,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.termination(reward=3.0, observation=3)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 0.0, False, ()),
(3, 0, 0.0, 0.0, False, ()),
],),
end_behavior=sequence_adder.EndBehavior.ZERO_PAD,
),
dict(
testcase_name='EarlyTerminationPeriodTwo',
sequence_length=3,
period=2,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.termination(reward=3.0, observation=3)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 0.0, False, ()),
(3, 0, 0.0, 0.0, False, ()),
],),
end_behavior=sequence_adder.EndBehavior.ZERO_PAD,
),
dict(
testcase_name='EarlyTerminationPaddingPeriodOne',
sequence_length=4,
period=1,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.termination(reward=3.0, observation=3)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 0.0, False, ()),
(3, 0, 0.0, 0.0, False, ()),
(0, 0, 0.0, 0.0, False, ()),
],),
end_behavior=sequence_adder.EndBehavior.ZERO_PAD,
),
dict(
testcase_name='EarlyTerminationPaddingPeriodTwo',
sequence_length=4,
period=2,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.termination(reward=3.0, observation=3)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 0.0, False, ()),
(3, 0, 0.0, 0.0, False, ()),
(0, 0, 0.0, 0.0, False, ()),
],),
end_behavior=sequence_adder.EndBehavior.ZERO_PAD,
),
dict(
testcase_name='EarlyTerminationNoPadding',
sequence_length=4,
period=1,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.termination(reward=3.0, observation=3)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 0.0, False, ()),
(3, 0, 0.0, 0.0, False, ()),
],),
end_behavior=sequence_adder.EndBehavior.TRUNCATE,
),
dict(
testcase_name='LongEpisodePadding',
sequence_length=3,
period=3,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.transition(reward=3.0, observation=3)),
(0, dm_env.transition(reward=5.0, observation=4)),
(0, dm_env.transition(reward=7.0, observation=5)),
(0, dm_env.transition(reward=9.0, observation=6)),
(0, dm_env.transition(reward=11.0, observation=7)),
(0, dm_env.termination(reward=13.0, observation=8)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
],
[
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 9.0, 1.0, False, ()),
(6, 0, 11.0, 1.0, False, ()),
],
[
(7, 0, 13.0, 0.0, False, ()),
(8, 0, 0.0, 0.0, False, ()),
(0, 0, 0.0, 0.0, False, ()),
],
),
end_behavior=sequence_adder.EndBehavior.ZERO_PAD,
),
dict(
testcase_name='LongEpisodeNoPadding',
sequence_length=3,
period=3,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.transition(reward=3.0, observation=3)),
(0, dm_env.transition(reward=5.0, observation=4)),
(0, dm_env.transition(reward=7.0, observation=5)),
(0, dm_env.transition(reward=9.0, observation=6)),
(0, dm_env.transition(reward=11.0, observation=7)),
(0, dm_env.termination(reward=13.0, observation=8)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
],
[
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 9.0, 1.0, False, ()),
(6, 0, 11.0, 1.0, False, ()),
],
[
(7, 0, 13.0, 0.0, False, ()),
(8, 0, 0.0, 0.0, False, ()),
],
),
end_behavior=sequence_adder.EndBehavior.TRUNCATE,
),
dict(
testcase_name='EndBehavior_WRITE',
sequence_length=3,
period=2,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.transition(reward=3.0, observation=3)),
(0, dm_env.transition(reward=5.0, observation=4)),
(0, dm_env.transition(reward=7.0, observation=5)),
(0, dm_env.termination(reward=8.0, observation=6)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
],
[
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 8.0, 0.0, False, ()),
],
[
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 8.0, 0.0, False, ()),
(6, 0, 0.0, 0.0, False, ()),
],
),
end_behavior=sequence_adder.EndBehavior.WRITE,
),
]
TEST_CASES_FOR_SEQUENCE_ADDER = BASE_TEST_CASES_FOR_SEQUENCE_ADDER + [
dict(
testcase_name='NonBreakingSequenceOnEpisodeReset',
sequence_length=3,
period=2,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.transition(reward=3.0, observation=3)),
(0, dm_env.transition(reward=5.0, observation=4)),
(0, dm_env.transition(reward=7.0, observation=5)),
(0, dm_env.transition(reward=9.0, observation=6)),
(0, dm_env.transition(reward=11.0, observation=7)),
(0, dm_env.termination(reward=13.0, observation=8)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
],
[
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 9.0, 1.0, False, ()),
],
[
(5, 0, 9.0, 1.0, False, ()),
(6, 0, 11.0, 1.0, False, ()),
(7, 0, 13.0, 0.0, False, ()),
],
),
end_behavior=sequence_adder.EndBehavior.CONTINUE,
repeat_episode_times=1),
dict(
testcase_name='NonBreakingSequenceMultipleTerminatedEpisodes',
sequence_length=3,
period=2,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.transition(reward=3.0, observation=3)),
(0, dm_env.transition(reward=5.0, observation=4)),
(0, dm_env.transition(reward=7.0, observation=5)),
(0, dm_env.transition(reward=9.0, observation=6)),
(0, dm_env.termination(reward=13.0, observation=7)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
],
[
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 9.0, 1.0, False, ()),
],
[
(5, 0, 9.0, 1.0, False, ()),
(6, 0, 13.0, 0.0, False, ()),
(7, 0, 0.0, 0.0, False, ()),
],
[
(7, 0, 0.0, 0.0, False, ()),
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
],
[
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 1.0, False, ()),
],
[
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 9.0, 1.0, False, ()),
(6, 0, 13.0, 0.0, False, ()),
],
[
(6, 0, 13.0, 0.0, False, ()),
(7, 0, 0.0, 0.0, False, ()),
(1, 0, 2.0, 1.0, True, ()),
],
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
],
[
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 9.0, 1.0, False, ()),
],
[
(5, 0, 9.0, 1.0, False, ()),
(6, 0, 13.0, 0.0, False, ()),
(7, 0, 0.0, 0.0, False, ()),
],
),
end_behavior=sequence_adder.EndBehavior.CONTINUE,
repeat_episode_times=3),
dict(
testcase_name='NonBreakingSequenceMultipleTruncatedEpisodes',
sequence_length=3,
period=2,
first=dm_env.restart(1),
steps=(
(0, dm_env.transition(reward=2.0, observation=2)),
(0, dm_env.transition(reward=3.0, observation=3)),
(0, dm_env.transition(reward=5.0, observation=4)),
(0, dm_env.transition(reward=7.0, observation=5)),
(0, dm_env.transition(reward=9.0, observation=6)),
(0, dm_env.truncation(reward=13.0, observation=7)),
),
expected_sequences=(
# (observation, action, reward, discount, start_of_episode, extra)
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
],
[
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 9.0, 1.0, False, ()),
],
[
(5, 0, 9.0, 1.0, False, ()),
(6, 0, 13.0, 1.0, False, ()),
(7, 0, 0.0, 0.0, False, ()),
],
[
(7, 0, 0.0, 0.0, False, ()),
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
],
[
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 1.0, False, ()),
],
[
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 9.0, 1.0, False, ()),
(6, 0, 13.0, 1.0, False, ()),
],
[
(6, 0, 13.0, 1.0, False, ()),
(7, 0, 0.0, 0.0, False, ()),
(1, 0, 2.0, 1.0, True, ()),
],
[
(1, 0, 2.0, 1.0, True, ()),
(2, 0, 3.0, 1.0, False, ()),
(3, 0, 5.0, 1.0, False, ()),
],
[
(3, 0, 5.0, 1.0, False, ()),
(4, 0, 7.0, 1.0, False, ()),
(5, 0, 9.0, 1.0, False, ()),
],
[
(5, 0, 9.0, 1.0, False, ()),
(6, 0, 13.0, 1.0, False, ()),
(7, 0, 0.0, 0.0, False, ()),
],
),
end_behavior=sequence_adder.EndBehavior.CONTINUE,
repeat_episode_times=3),
]
|
acme-master
|
acme/adders/reverb/test_cases.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adders that use Reverb (github.com/deepmind/reverb) as a backend."""
import abc
import time
from typing import Callable, Iterable, Mapping, NamedTuple, Optional, Sized, Union, Tuple
from absl import logging
from acme import specs
from acme import types
from acme.adders import base
import dm_env
import numpy as np
import reverb
import tensorflow as tf
import tree
DEFAULT_PRIORITY_TABLE = 'priority_table'
_MIN_WRITER_LIFESPAN_SECONDS = 60
StartOfEpisodeType = Union[bool, specs.Array, tf.Tensor, tf.TensorSpec,
Tuple[()]]
# TODO(b/188510142): Delete Step.
class Step(NamedTuple):
"""Step class used internally for reverb adders."""
observation: types.NestedArray
action: types.NestedArray
reward: types.NestedArray
discount: types.NestedArray
start_of_episode: StartOfEpisodeType
extras: types.NestedArray = ()
# TODO(b/188510142): Replace with proper Trajectory class.
Trajectory = Step
class PriorityFnInput(NamedTuple):
"""The input to a priority function consisting of stacked steps."""
observations: types.NestedArray
actions: types.NestedArray
rewards: types.NestedArray
discounts: types.NestedArray
start_of_episode: types.NestedArray
extras: types.NestedArray
# Define the type of a priority function and the mapping from table to function.
PriorityFn = Callable[['PriorityFnInput'], float]
PriorityFnMapping = Mapping[str, Optional[PriorityFn]]
def spec_like_to_tensor_spec(paths: Iterable[str], spec: specs.Array):
return tf.TensorSpec.from_spec(spec, name='/'.join(str(p) for p in paths))
class ReverbAdder(base.Adder):
"""Base class for Reverb adders."""
def __init__(
self,
client: reverb.Client,
max_sequence_length: int,
max_in_flight_items: int,
delta_encoded: bool = False,
priority_fns: Optional[PriorityFnMapping] = None,
validate_items: bool = True,
):
"""Initialize a ReverbAdder instance.
Args:
client: A client to the Reverb backend.
max_sequence_length: The maximum length of sequences (corresponding to the
number of observations) that can be added to replay.
max_in_flight_items: The maximum number of items allowed to be "in flight"
at the same time. See `block_until_num_items` in
`reverb.TrajectoryWriter.flush` for more info.
delta_encoded: If `True` (False by default) enables delta encoding, see
`Client` for more information.
priority_fns: A mapping from table names to priority functions; if
omitted, all transitions/steps/sequences are given uniform priorities
(1.0) and placed in DEFAULT_PRIORITY_TABLE.
validate_items: Whether to validate items against the table signature
before they are sent to the server. This requires table signature to be
fetched from the server and cached locally.
"""
if priority_fns:
priority_fns = dict(priority_fns)
else:
priority_fns = {DEFAULT_PRIORITY_TABLE: None}
self._client = client
self._priority_fns = priority_fns
self._max_sequence_length = max_sequence_length
self._delta_encoded = delta_encoded
# TODO(b/206629159): Remove this.
self._max_in_flight_items = max_in_flight_items
self._add_first_called = False
# This is exposed as the _writer property in such a way that it will create
# a new writer automatically whenever the internal __writer is None. Users
# should ONLY ever interact with self._writer.
self.__writer = None
# Every time a new writer is created, it must fetch the signature from the
# Reverb server. If this is set too low it can crash the adders in a
# distributed setup where the replay may take a while to spin up.
self._validate_items = validate_items
def __del__(self):
if self.__writer is not None:
timeout_ms = 10_000
# Try flush all appended data before closing to avoid loss of experience.
try:
self.__writer.flush(0, timeout_ms=timeout_ms)
except reverb.DeadlineExceededError as e:
logging.error(
'Timeout (%d ms) exceeded when flushing the writer before '
'deleting it. Caught Reverb exception: %s', timeout_ms, str(e))
self.__writer.close()
self.__writer = None
@property
def _writer(self) -> reverb.TrajectoryWriter:
if self.__writer is None:
self.__writer = self._client.trajectory_writer(
num_keep_alive_refs=self._max_sequence_length,
validate_items=self._validate_items)
self._writer_created_timestamp = time.time()
return self.__writer
def add_priority_table(self, table_name: str,
priority_fn: Optional[PriorityFn]):
if table_name in self._priority_fns:
raise ValueError(
f'A priority function already exists for {table_name}. '
f'Existing tables: {", ".join(self._priority_fns.keys())}.'
)
self._priority_fns[table_name] = priority_fn
def reset(self, timeout_ms: Optional[int] = None):
"""Resets the adder's buffer."""
if self.__writer:
# Flush all appended data and clear the buffers.
self.__writer.end_episode(clear_buffers=True, timeout_ms=timeout_ms)
# Create a new writer unless the current one is too young.
# This is to reduce the relative overhead of creating a new Reverb writer.
if (time.time() - self._writer_created_timestamp >
_MIN_WRITER_LIFESPAN_SECONDS):
self.__writer = None
self._add_first_called = False
def add_first(self, timestep: dm_env.TimeStep):
"""Record the first observation of a trajectory."""
if not timestep.first():
raise ValueError('adder.add_first with an initial timestep (i.e. one for '
'which timestep.first() is True')
# Record the next observation but leave the history buffer row open by
# passing `partial_step=True`.
self._writer.append(dict(observation=timestep.observation,
start_of_episode=timestep.first()),
partial_step=True)
self._add_first_called = True
def add(self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
extras: types.NestedArray = ()):
"""Record an action and the following timestep."""
if not self._add_first_called:
raise ValueError('adder.add_first must be called before adder.add.')
# Add the timestep to the buffer.
has_extras = (len(extras) > 0 if isinstance(extras, Sized) # pylint: disable=g-explicit-length-test
else extras is not None)
current_step = dict(
# Observation was passed at the previous add call.
action=action,
reward=next_timestep.reward,
discount=next_timestep.discount,
# Start of episode indicator was passed at the previous add call.
**({'extras': extras} if has_extras else {})
)
self._writer.append(current_step)
# Record the next observation and write.
self._writer.append(
dict(
observation=next_timestep.observation,
start_of_episode=next_timestep.first()),
partial_step=True)
self._write()
if next_timestep.last():
# Complete the row by appending zeros to remaining open fields.
# TODO(b/183945808): remove this when fields are no longer expected to be
# of equal length on the learner side.
dummy_step = tree.map_structure(np.zeros_like, current_step)
self._writer.append(dummy_step)
self._write_last()
self.reset()
@classmethod
def signature(cls, environment_spec: specs.EnvironmentSpec,
extras_spec: types.NestedSpec = ()):
"""This is a helper method for generating signatures for Reverb tables.
Signatures are useful for validating data types and shapes, see Reverb's
documentation for details on how they are used.
Args:
environment_spec: A `specs.EnvironmentSpec` whose fields are nested
structures with leaf nodes that have `.shape` and `.dtype` attributes.
This should come from the environment that will be used to generate
the data inserted into the Reverb table.
extras_spec: A nested structure with leaf nodes that have `.shape` and
`.dtype` attributes. The structure (and shapes/dtypes) of this must
be the same as the `extras` passed into `ReverbAdder.add`.
Returns:
A `Step` whose leaf nodes are `tf.TensorSpec` objects.
"""
spec_step = Step(
observation=environment_spec.observations,
action=environment_spec.actions,
reward=environment_spec.rewards,
discount=environment_spec.discounts,
start_of_episode=specs.Array(shape=(), dtype=bool),
extras=extras_spec)
return tree.map_structure_with_path(spec_like_to_tensor_spec, spec_step)
@abc.abstractmethod
def _write(self):
"""Write data to replay from the buffer."""
@abc.abstractmethod
def _write_last(self):
"""Write data to replay from the buffer."""
|
acme-master
|
acme/adders/reverb/base.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for multiagent_utils."""
from acme import specs
from acme.multiagent import utils as multiagent_utils
from acme.testing import fakes
from acme.testing import multiagent_fakes
import dm_env
from absl.testing import absltest
class UtilsTest(absltest.TestCase):
def test_get_agent_spec(self):
agent_indices = ['a', '99', 'Z']
spec = multiagent_fakes.make_multiagent_environment_spec(agent_indices)
for agent_id in spec.actions.keys():
single_agent_spec = multiagent_utils.get_agent_spec(
spec, agent_id=agent_id)
expected_spec = specs.EnvironmentSpec(
actions=spec.actions[agent_id],
discounts=spec.discounts,
observations=spec.observations[agent_id],
rewards=spec.rewards[agent_id]
)
self.assertEqual(single_agent_spec, expected_spec)
def test_get_agent_timestep(self):
agent_indices = ['a', '99', 'Z']
spec = multiagent_fakes.make_multiagent_environment_spec(agent_indices)
env = fakes.Environment(spec)
timestep = env.reset()
for agent_id in spec.actions.keys():
single_agent_timestep = multiagent_utils.get_agent_timestep(
timestep, agent_id)
expected_timestep = dm_env.TimeStep(
observation=timestep.observation[agent_id],
reward=None,
discount=None,
step_type=timestep.step_type
)
self.assertEqual(single_agent_timestep, expected_timestep)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/multiagent/utils_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiagent helpers."""
|
acme-master
|
acme/multiagent/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Types for multiagent setups."""
from typing import Any, Callable, Dict, Tuple
from acme import specs
from acme.agents.jax import builders as jax_builders
from acme.utils.loggers import base
import reverb
# Sub-agent types
AgentID = str
EvalMode = bool
GenericAgent = Any
AgentConfig = Any
Networks = Any
PolicyNetwork = Any
LoggerFn = Callable[[], base.Logger]
InitNetworkFn = Callable[[GenericAgent, specs.EnvironmentSpec], Networks]
InitPolicyNetworkFn = Callable[
[GenericAgent, Networks, specs.EnvironmentSpec, AgentConfig, bool],
Networks]
InitBuilderFn = Callable[[GenericAgent, AgentConfig],
jax_builders.GenericActorLearnerBuilder]
# Multiagent types
MultiAgentLoggerFn = Dict[AgentID, LoggerFn]
MultiAgentNetworks = Dict[AgentID, Networks]
MultiAgentPolicyNetworks = Dict[AgentID, PolicyNetwork]
MultiAgentSample = Tuple[reverb.ReplaySample, ...]
NetworkFactory = Callable[[specs.EnvironmentSpec], MultiAgentNetworks]
PolicyFactory = Callable[[MultiAgentNetworks, EvalMode],
MultiAgentPolicyNetworks]
BuilderFactory = Callable[[
Dict[AgentID, GenericAgent],
Dict[AgentID, AgentConfig],
], Dict[AgentID, jax_builders.GenericActorLearnerBuilder]]
|
acme-master
|
acme/multiagent/types.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiagent utilities."""
from acme import specs
from acme.multiagent import types
import dm_env
def get_agent_spec(env_spec: specs.EnvironmentSpec,
agent_id: types.AgentID) -> specs.EnvironmentSpec:
"""Returns a single agent spec from environment spec.
Args:
env_spec: environment spec, wherein observation, action, and reward specs
are simply lists (with each entry specifying the respective spec for the
given agent index). Discounts are scalars shared amongst agents.
agent_id: agent index.
"""
return specs.EnvironmentSpec(
actions=env_spec.actions[agent_id],
discounts=env_spec.discounts,
observations=env_spec.observations[agent_id],
rewards=env_spec.rewards[agent_id])
def get_agent_timestep(timestep: dm_env.TimeStep,
agent_id: types.AgentID) -> dm_env.TimeStep:
"""Returns the extracted timestep for a particular agent."""
# Discounts are assumed to be shared amongst agents
reward = None if timestep.reward is None else timestep.reward[agent_id]
return dm_env.TimeStep(
observation=timestep.observation[agent_id],
reward=reward,
discount=timestep.discount,
step_type=timestep.step_type)
|
acme-master
|
acme/multiagent/utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A iterator that does zero-copy conversion of `tf.Tensor`s into `np.ndarray`s."""
from typing import Iterator
from acme import types
import numpy as np
import tree
class NumpyIterator(Iterator[types.NestedArray]):
"""Iterator over a dataset with elements converted to numpy.
Note: This iterator returns read-only numpy arrays.
This iterator (compared to `tf.data.Dataset.as_numpy_iterator()`) does not
copy the data when comverting `tf.Tensor`s to `np.ndarray`s.
TODO(b/178684359): Remove this when it is upstreamed into `tf.data`.
"""
__slots__ = ['_iterator']
def __init__(self, dataset):
self._iterator: Iterator[types.NestedTensor] = iter(dataset)
def __iter__(self) -> 'NumpyIterator':
return self
def __next__(self) -> types.NestedArray:
return tree.map_structure(lambda t: np.asarray(memoryview(t)),
next(self._iterator))
def next(self):
return self.__next__()
|
acme-master
|
acme/datasets/numpy_iterator.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset interfaces."""
from acme.datasets.numpy_iterator import NumpyIterator
from acme.datasets.reverb import make_reverb_dataset
# from acme.datasets.reverb import make_reverb_dataset_trajectory
|
acme-master
|
acme/datasets/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities related to loading TFDS datasets."""
import logging
from typing import Any, Iterator, Optional, Tuple, Sequence
from acme import specs
from acme import types
from flax import jax_utils
import jax
import jax.numpy as jnp
import numpy as np
import rlds
import tensorflow as tf
import tensorflow_datasets as tfds
def _batched_step_to_transition(step: rlds.BatchedStep) -> types.Transition:
return types.Transition(
observation=tf.nest.map_structure(lambda x: x[0], step[rlds.OBSERVATION]),
action=tf.nest.map_structure(lambda x: x[0], step[rlds.ACTION]),
reward=tf.nest.map_structure(lambda x: x[0], step[rlds.REWARD]),
discount=1.0 - tf.cast(step[rlds.IS_TERMINAL][1], dtype=tf.float32),
# If next step is terminal, then the observation may be arbitrary.
next_observation=tf.nest.map_structure(
lambda x: x[1], step[rlds.OBSERVATION])
)
def _batch_steps(episode: rlds.Episode) -> tf.data.Dataset:
return rlds.transformations.batch(
episode[rlds.STEPS], size=2, shift=1, drop_remainder=True)
def _dataset_size_upperbound(dataset: tf.data.Dataset) -> int:
if dataset.cardinality() != tf.data.experimental.UNKNOWN_CARDINALITY:
return dataset.cardinality()
return tf.cast(
dataset.batch(1000).reduce(0, lambda x, step: x + 1000), tf.int64)
def load_tfds_dataset(
dataset_name: str,
num_episodes: Optional[int] = None,
env_spec: Optional[specs.EnvironmentSpec] = None) -> tf.data.Dataset:
"""Returns a TFDS dataset with the given name."""
# Used only in tests.
del env_spec
dataset = tfds.load(dataset_name)['train']
if num_episodes:
dataset = dataset.take(num_episodes)
return dataset
# TODO(sinopalnikov): replace get_ftds_dataset with a pair of load/transform.
def get_tfds_dataset(
dataset_name: str,
num_episodes: Optional[int] = None,
env_spec: Optional[specs.EnvironmentSpec] = None) -> tf.data.Dataset:
"""Returns a TFDS dataset transformed to a dataset of transitions."""
dataset = load_tfds_dataset(dataset_name, num_episodes, env_spec)
batched_steps = dataset.flat_map(_batch_steps)
return rlds.transformations.map_steps(batched_steps,
_batched_step_to_transition)
# In order to avoid excessive copying on TPU one needs to make the last
# dimension a multiple of this number.
_BEST_DIVISOR = 128
def _pad(x: jnp.ndarray) -> jnp.ndarray:
if len(x.shape) != 2:
return x
# Find a more scientific way to find this threshold (30). Depending on various
# conditions for low enough sizes the excessive copying is not triggered.
if x.shape[-1] % _BEST_DIVISOR != 0 and x.shape[-1] > 30:
n = _BEST_DIVISOR - (x.shape[-1] % _BEST_DIVISOR)
x = np.pad(x, [(0, 0)] * (x.ndim - 1) + [(0, n)], 'constant')
return x
# Undo the padding.
def _unpad(x: jnp.ndarray, shape: Sequence[int]) -> jnp.ndarray:
if len(shape) == 2 and x.shape[-1] != shape[-1]:
return x[..., :shape[-1]]
return x
_PMAP_AXIS_NAME = 'data'
class JaxInMemoryRandomSampleIterator(Iterator[Any]):
"""In memory random sample iterator implemented in JAX.
Loads the whole dataset in memory and performs random sampling with
replacement of batches of `batch_size`.
This class provides much faster sampling functionality compared to using
an iterator on tf.data.Dataset.
"""
def __init__(self,
dataset: tf.data.Dataset,
key: jnp.ndarray,
batch_size: int,
shard_dataset_across_devices: bool = False):
"""Creates an iterator.
Args:
dataset: underlying tf Dataset
key: a key to be used for random number generation
batch_size: batch size
shard_dataset_across_devices: whether to use all available devices
for storing the underlying dataset. The upside is a larger
dataset capacity that fits into memory. Downsides are:
- execution of pmapped functions is usually slower than jitted
- few last elements in the dataset might be dropped (if not multiple)
- sampling is not 100% uniform, since each core will be doing sampling
only within its data chunk
The number of available devices must divide the batch_size evenly.
"""
# Read the whole dataset. We use artificially large batch_size to make sure
# we capture the whole dataset.
size = _dataset_size_upperbound(dataset)
data = next(dataset.batch(size).as_numpy_iterator())
self._dataset_size = jax.tree_flatten(
jax.tree_map(lambda x: x.shape[0], data))[0][0]
device = jax_utils._pmap_device_order()
if not shard_dataset_across_devices:
device = device[:1]
should_pmap = len(device) > 1
assert batch_size % len(device) == 0
self._dataset_size = self._dataset_size - self._dataset_size % len(device)
# len(device) needs to divide self._dataset_size evenly.
assert self._dataset_size % len(device) == 0
logging.info('Trying to load %s elements to %s', self._dataset_size, device)
logging.info('Dataset %s %s',
('before padding' if should_pmap else ''),
jax.tree_map(lambda x: x.shape, data))
if should_pmap:
shapes = jax.tree_map(lambda x: x.shape, data)
# Padding to a multiple of 128 is needed to avoid excessive copying on TPU
data = jax.tree_map(_pad, data)
logging.info('Dataset after padding %s',
jax.tree_map(lambda x: x.shape, data))
def split_and_put(x: jnp.ndarray) -> jnp.ndarray:
return jax.device_put_sharded(
np.split(x[:self._dataset_size], len(device)), devices=device)
self._jax_dataset = jax.tree_map(split_and_put, data)
else:
self._jax_dataset = jax.tree_map(jax.device_put, data)
self._key = (jnp.stack(jax.random.split(key, len(device)))
if should_pmap else key)
def sample_per_shard(data: Any,
key: jnp.ndarray) -> Tuple[Any, jnp.ndarray]:
key1, key2 = jax.random.split(key)
indices = jax.random.randint(
key1, (batch_size // len(device),),
minval=0,
maxval=self._dataset_size // len(device))
data_sample = jax.tree_map(lambda d: jnp.take(d, indices, axis=0), data)
return data_sample, key2
if should_pmap:
def sample(data, key):
data_sample, key = sample_per_shard(data, key)
# Gathering data on TPUs is much more efficient that doing so on a host
# since it avoids Host - Device communications.
data_sample = jax.lax.all_gather(
data_sample, axis_name=_PMAP_AXIS_NAME, axis=0, tiled=True)
data_sample = jax.tree_map(_unpad, data_sample, shapes)
return data_sample, key
pmapped_sample = jax.pmap(sample, axis_name=_PMAP_AXIS_NAME)
def sample_and_postprocess(key: jnp.ndarray) -> Tuple[Any, jnp.ndarray]:
data, key = pmapped_sample(self._jax_dataset, key)
# All pmapped devices return the same data, so we just take the one from
# the first device.
return jax.tree_map(lambda x: x[0], data), key
self._sample = sample_and_postprocess
else:
self._sample = jax.jit(
lambda key: sample_per_shard(self._jax_dataset, key))
def __next__(self) -> Any:
data, self._key = self._sample(self._key)
return data
@property
def dataset_size(self) -> int:
"""An integer of the dataset cardinality."""
return self._dataset_size
|
acme-master
|
acme/datasets/tfds.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reverb dataset benchmark.
Note: this a no-GRPC layer setup.
"""
import time
from typing import Sequence
from absl import app
from absl import logging
from acme import adders
from acme import specs
from acme.adders import reverb as adders_reverb
from acme.datasets import reverb as datasets
from acme.testing import fakes
import numpy as np
import reverb
from reverb import rate_limiters
def make_replay_tables(environment_spec: specs.EnvironmentSpec
) -> Sequence[reverb.Table]:
"""Create tables to insert data into."""
return [
reverb.Table(
name='default',
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1),
signature=adders_reverb.NStepTransitionAdder.signature(
environment_spec))
]
def make_adder(replay_client: reverb.Client) -> adders.Adder:
return adders_reverb.NStepTransitionAdder(
priority_fns={'default': None},
client=replay_client,
n_step=1,
discount=1)
def main(_):
environment = fakes.ContinuousEnvironment(action_dim=8,
observation_dim=87,
episode_length=10000000)
spec = specs.make_environment_spec(environment)
replay_tables = make_replay_tables(spec)
replay_server = reverb.Server(replay_tables, port=None)
replay_client = reverb.Client(f'localhost:{replay_server.port}')
adder = make_adder(replay_client)
timestep = environment.reset()
adder.add_first(timestep)
# TODO(raveman): Consider also filling the table to say 1M (too slow).
for steps in range(10000):
if steps % 1000 == 0:
logging.info('Processed %s steps', steps)
action = np.asarray(np.random.uniform(-1, 1, (8,)), dtype=np.float32)
next_timestep = environment.step(action)
adder.add(action, next_timestep, extras=())
for batch_size in [256, 256 * 8, 256 * 64]:
for prefetch_size in [0, 1, 4]:
print(f'Processing batch_size={batch_size} prefetch_size={prefetch_size}')
ds = datasets.make_reverb_dataset(
table='default',
server_address=replay_client.server_address,
batch_size=batch_size,
prefetch_size=prefetch_size,
)
it = ds.as_numpy_iterator()
for iteration in range(3):
t = time.time()
for _ in range(1000):
_ = next(it)
print(f'Iteration {iteration} finished in {time.time() - t}s')
if __name__ == '__main__':
app.run(main)
|
acme-master
|
acme/datasets/reverb_benchmark.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformations to be applied to replay datasets for augmentation purposes."""
import enum
from acme import types
from acme.datasets import reverb as reverb_dataset
import reverb
import tensorflow as tf
class CropType(enum.Enum):
"""Types of cropping supported by the image aumentation transforms.
BILINEAR: Continuously randomly located then bilinearly interpolated.
ALIGNED: Aligned with input image's pixel grid.
"""
BILINEAR = 'bilinear'
ALIGNED = 'aligned'
def pad_and_crop(img: tf.Tensor,
pad_size: int = 4,
method: CropType = CropType.ALIGNED) -> tf.Tensor:
"""Pad and crop image to mimic a random translation with mirroring at edges.
This implements the image augmentation from section 3.1 in (Kostrikov et al.)
https://arxiv.org/abs/2004.13649.
Args:
img: The image to pad and crop. Its dimensions are [..., H, W, C] where ...
are batch dimensions (if it has any).
pad_size: The amount of padding to apply to the image before cropping it.
method: The method to use for cropping the image, see `CropType` for
details.
Returns:
The image after having been padded and cropped.
"""
num_batch_dims = img.shape[:-3].rank
if img.shape.is_fully_defined():
img_shape = img.shape.as_list()
else:
img_shape = tf.shape(img)
# Set paddings for height and width only, batches and channels set to [0, 0].
paddings = [[0, 0]] * num_batch_dims # Do not pad batch dims.
paddings.extend([[pad_size, pad_size], [pad_size, pad_size], [0, 0]])
# Pad using symmetric padding.
padded_img = tf.pad(img, paddings=paddings, mode='SYMMETRIC')
# Crop padded image using requested method.
if method == CropType.ALIGNED:
cropped_img = tf.image.random_crop(padded_img, img_shape)
elif method == CropType.BILINEAR:
height, width = img_shape[-3:-1]
padded_height, padded_width = height + 2 * pad_size, width + 2 * pad_size
# Pick a top-left point uniformly at random.
top_left = tf.random.uniform(
shape=(2,), maxval=2 * pad_size + 1, dtype=tf.int32)
# This single box is applied to the entire batch if a batch is passed.
batch_size = tf.shape(padded_img)[0]
box = tf.cast(
tf.tile(
tf.expand_dims([
top_left[0] / padded_height,
top_left[1] / padded_width,
(top_left[0] + height) / padded_height,
(top_left[1] + width) / padded_width,
], axis=0), [batch_size, 1]),
tf.float32) # Shape [batch_size, 2].
# Crop and resize according to `box` then reshape back to input shape.
cropped_img = tf.image.crop_and_resize(
padded_img,
box,
tf.range(batch_size),
(height, width),
method='bilinear')
cropped_img = tf.reshape(cropped_img, img_shape)
return cropped_img
def make_transform(
observation_transform: types.TensorTransformation,
transform_next_observation: bool = True,
) -> reverb_dataset.Transform:
"""Creates the appropriate dataset transform for the given signature."""
if transform_next_observation:
def transform(x: reverb.ReplaySample) -> reverb.ReplaySample:
return x._replace(
data=x.data._replace(
observation=observation_transform(x.data.observation),
next_observation=observation_transform(x.data.next_observation)))
else:
def transform(x: reverb.ReplaySample) -> reverb.ReplaySample:
return x._replace(
data=x.data._replace(
observation=observation_transform(x.data.observation)))
return transform
|
acme-master
|
acme/datasets/image_augmentation.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.datasets.numpy_iterator."""
import collections
from acme.datasets import numpy_iterator
import tensorflow as tf
from absl.testing import absltest
class NumpyIteratorTest(absltest.TestCase):
def testBasic(self):
ds = tf.data.Dataset.range(3)
self.assertEqual([0, 1, 2], list(numpy_iterator.NumpyIterator(ds)))
def testNestedStructure(self):
point = collections.namedtuple('Point', ['x', 'y'])
ds = tf.data.Dataset.from_tensor_slices({
'a': ([1, 2], [3, 4]),
'b': [5, 6],
'c': point([7, 8], [9, 10])
})
self.assertEqual([{
'a': (1, 3),
'b': 5,
'c': point(7, 9)
}, {
'a': (2, 4),
'b': 6,
'c': point(8, 10)
}], list(numpy_iterator.NumpyIterator(ds)))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/datasets/numpy_iterator_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for making TensorFlow datasets for sampling from Reverb replay."""
import collections
import os
from typing import Callable, Mapping, Optional, Union
from acme import specs
from acme import types
from acme.adders import reverb as adders
import reverb
import tensorflow as tf
Transform = Callable[[reverb.ReplaySample], reverb.ReplaySample]
def make_reverb_dataset(
server_address: str,
batch_size: Optional[int] = None,
prefetch_size: Optional[int] = None,
table: Union[str, Mapping[str, float]] = adders.DEFAULT_PRIORITY_TABLE,
num_parallel_calls: Optional[int] = 12,
max_in_flight_samples_per_worker: Optional[int] = None,
postprocess: Optional[Transform] = None,
# Deprecated kwargs.
environment_spec: Optional[specs.EnvironmentSpec] = None,
extra_spec: Optional[types.NestedSpec] = None,
transition_adder: bool = False,
convert_zero_size_to_none: bool = False,
using_deprecated_adder: bool = False,
sequence_length: Optional[int] = None,
) -> tf.data.Dataset:
"""Make a TensorFlow dataset backed by a Reverb trajectory replay service.
Arguments:
server_address: Address of the Reverb server.
batch_size: Batch size of the returned dataset.
prefetch_size: The number of elements to prefetch from the original dataset.
Note that Reverb may do some internal prefetching in addition to this.
table: The name of the Reverb table to use, or a mapping of (table_name,
float_weight) for mixing multiple tables in the input (e.g. mixing online
and offline experiences).
num_parallel_calls: The parralelism to use. Setting it to `tf.data.AUTOTUNE`
will allow `tf.data` to automatically find a reasonable value.
max_in_flight_samples_per_worker: see reverb.TrajectoryDataset for details.
postprocess: User-specified transformation to be applied to the dataset (as
`ds.map(postprocess)`).
environment_spec: DEPRECATED! Do not use.
extra_spec: DEPRECATED! Do not use.
transition_adder: DEPRECATED! Do not use.
convert_zero_size_to_none: DEPRECATED! Do not use.
using_deprecated_adder: DEPRECATED! Do not use.
sequence_length: DEPRECATED! Do not use.
Returns:
A `tf.data.Dataset` iterating over the contents of the Reverb table.
Raises:
ValueError if `environment_spec` or `extra_spec` are set, or `table` is a
mapping with no positive weight values.
"""
if environment_spec or extra_spec:
raise ValueError(
'The make_reverb_dataset factory function no longer requires specs as'
' as they should be passed as a signature to the reverb.Table when it'
' is created. Consider either updating your code or falling back to the'
' deprecated dataset factory in acme/datasets/deprecated.')
# These are no longer used and are only kept in the call signature for
# backward compatibility.
del environment_spec
del extra_spec
del transition_adder
del convert_zero_size_to_none
del using_deprecated_adder
del sequence_length
# This is the default that used to be set by reverb.TFClient.dataset().
if max_in_flight_samples_per_worker is None and batch_size is None:
max_in_flight_samples_per_worker = 100
elif max_in_flight_samples_per_worker is None:
max_in_flight_samples_per_worker = 2 * batch_size
# Create mapping from tables to non-zero weights.
if isinstance(table, str):
tables = collections.OrderedDict([(table, 1.)])
else:
tables = collections.OrderedDict([
(name, weight) for name, weight in table.items() if weight > 0.
])
if len(tables) <= 0:
raise ValueError(f'No positive weights in input tables {tables}')
# Normalize weights.
total_weight = sum(tables.values())
tables = collections.OrderedDict([
(name, weight / total_weight) for name, weight in tables.items()
])
def _make_dataset(unused_idx: tf.Tensor) -> tf.data.Dataset:
datasets = ()
for table_name, weight in tables.items():
max_in_flight_samples = max(
1, int(max_in_flight_samples_per_worker * weight))
dataset = reverb.TrajectoryDataset.from_table_signature(
server_address=server_address,
table=table_name,
max_in_flight_samples_per_worker=max_in_flight_samples)
datasets += (dataset,)
if len(datasets) > 1:
dataset = tf.data.Dataset.sample_from_datasets(
datasets, weights=tables.values())
else:
dataset = datasets[0]
# Post-process each element if a post-processing function is passed, e.g.
# observation-stacking or data augmenting transformations.
if postprocess:
dataset = dataset.map(postprocess)
if batch_size:
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
if num_parallel_calls is not None:
# Create a datasets and interleaves it to create `num_parallel_calls`
# `TrajectoryDataset`s.
num_datasets_to_interleave = (
os.cpu_count()
if num_parallel_calls == tf.data.AUTOTUNE else num_parallel_calls)
dataset = tf.data.Dataset.range(num_datasets_to_interleave).interleave(
map_func=_make_dataset,
cycle_length=num_parallel_calls,
num_parallel_calls=num_parallel_calls,
deterministic=False)
else:
dataset = _make_dataset(tf.constant(0))
if prefetch_size:
dataset = dataset.prefetch(prefetch_size)
return dataset
|
acme-master
|
acme/datasets/reverb.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Agent implementations."""
|
acme-master
|
acme/agents/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common tools for reverb replay."""
import dataclasses
from typing import Any, Callable, Dict, Iterator, Optional
from acme import adders as adders_lib
from acme import datasets
from acme import specs
from acme import types
from acme.adders import reverb as adders
import reverb
@dataclasses.dataclass
class ReverbReplay:
server: reverb.Server
adder: adders_lib.Adder
data_iterator: Iterator[reverb.ReplaySample]
client: Optional[reverb.Client] = None
can_sample: Callable[[], bool] = lambda: True
def make_reverb_prioritized_nstep_replay(
environment_spec: specs.EnvironmentSpec,
extra_spec: types.NestedSpec = (),
n_step: int = 1,
batch_size: int = 32,
max_replay_size: int = 100_000,
min_replay_size: int = 1,
discount: float = 1.,
prefetch_size: int = 4, # TODO(iosband): rationalize prefetch size.
replay_table_name: str = adders.DEFAULT_PRIORITY_TABLE,
priority_exponent: Optional[float] = None, # If None, default to uniform.
) -> ReverbReplay:
"""Creates a single-process replay infrastructure from an environment spec."""
# Parsing priority exponent to determine uniform vs prioritized replay
if priority_exponent is None:
sampler = reverb.selectors.Uniform()
priority_fns = {replay_table_name: lambda x: 1.}
else:
sampler = reverb.selectors.Prioritized(priority_exponent)
priority_fns = None
# Create a replay server to add data to. This uses no limiter behavior in
# order to allow the Agent interface to handle it.
replay_table = reverb.Table(
name=replay_table_name,
sampler=sampler,
remover=reverb.selectors.Fifo(),
max_size=max_replay_size,
rate_limiter=reverb.rate_limiters.MinSize(min_replay_size),
signature=adders.NStepTransitionAdder.signature(environment_spec,
extra_spec),
)
server = reverb.Server([replay_table], port=None)
# The adder is used to insert observations into replay.
address = f'localhost:{server.port}'
client = reverb.Client(address)
adder = adders.NStepTransitionAdder(
client, n_step, discount, priority_fns=priority_fns)
# The dataset provides an interface to sample from replay.
data_iterator = datasets.make_reverb_dataset(
table=replay_table_name,
server_address=address,
batch_size=batch_size,
prefetch_size=prefetch_size,
).as_numpy_iterator()
return ReverbReplay(server, adder, data_iterator, client=client)
def make_reverb_online_queue(
environment_spec: specs.EnvironmentSpec,
extra_spec: Dict[str, Any],
max_queue_size: int,
sequence_length: int,
sequence_period: int,
batch_size: int,
replay_table_name: str = adders.DEFAULT_PRIORITY_TABLE,
) -> ReverbReplay:
"""Creates a single process queue from an environment spec and extra_spec."""
signature = adders.SequenceAdder.signature(environment_spec, extra_spec)
queue = reverb.Table.queue(
name=replay_table_name, max_size=max_queue_size, signature=signature)
server = reverb.Server([queue], port=None)
can_sample = lambda: queue.can_sample(batch_size)
# Component to add things into replay.
address = f'localhost:{server.port}'
adder = adders.SequenceAdder(
client=reverb.Client(address),
period=sequence_period,
sequence_length=sequence_length,
)
# The dataset object to learn from.
# We don't use datasets.make_reverb_dataset() here to avoid interleaving
# and prefetching, that doesn't work well with can_sample() check on update.
dataset = reverb.TrajectoryDataset.from_table_signature(
server_address=address,
table=replay_table_name,
max_in_flight_samples_per_worker=1,
)
dataset = dataset.batch(batch_size, drop_remainder=True)
data_iterator = dataset.as_numpy_iterator()
return ReverbReplay(server, adder, data_iterator, can_sample=can_sample)
def make_reverb_prioritized_sequence_replay(
environment_spec: specs.EnvironmentSpec,
extra_spec: types.NestedSpec = (),
batch_size: int = 32,
max_replay_size: int = 100_000,
min_replay_size: int = 1,
priority_exponent: float = 0.,
burn_in_length: int = 40,
sequence_length: int = 80,
sequence_period: int = 40,
replay_table_name: str = adders.DEFAULT_PRIORITY_TABLE,
prefetch_size: int = 4,
) -> ReverbReplay:
"""Single-process replay for sequence data from an environment spec."""
# Create a replay server to add data to. This uses no limiter behavior in
# order to allow the Agent interface to handle it.
replay_table = reverb.Table(
name=replay_table_name,
sampler=reverb.selectors.Prioritized(priority_exponent),
remover=reverb.selectors.Fifo(),
max_size=max_replay_size,
rate_limiter=reverb.rate_limiters.MinSize(min_replay_size),
signature=adders.SequenceAdder.signature(environment_spec, extra_spec),
)
server = reverb.Server([replay_table], port=None)
# The adder is used to insert observations into replay.
address = f'localhost:{server.port}'
client = reverb.Client(address)
sequence_length = burn_in_length + sequence_length + 1
adder = adders.SequenceAdder(
client=client,
period=sequence_period,
sequence_length=sequence_length,
delta_encoded=True,
)
# The dataset provides an interface to sample from replay.
data_iterator = datasets.make_reverb_dataset(
table=replay_table_name,
server_address=address,
batch_size=batch_size,
prefetch_size=prefetch_size,
).as_numpy_iterator()
return ReverbReplay(server, adder, data_iterator, client)
|
acme-master
|
acme/agents/replay.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The base agent interface."""
import math
from typing import List, Optional, Sequence
from acme import core
from acme import types
import dm_env
import numpy as np
import reverb
def _calculate_num_learner_steps(num_observations: int,
min_observations: int,
observations_per_step: float) -> int:
"""Calculates the number of learner steps to do at step=num_observations."""
n = num_observations - min_observations
if n < 0:
# Do not do any learner steps until you have seen min_observations.
return 0
if observations_per_step > 1:
# One batch every 1/obs_per_step observations, otherwise zero.
return int(n % int(observations_per_step) == 0)
else:
# Always return 1/obs_per_step batches every observation.
return int(1 / observations_per_step)
class Agent(core.Actor, core.VariableSource):
"""Agent class which combines acting and learning.
This provides an implementation of the `Actor` interface which acts and
learns. It takes as input instances of both `acme.Actor` and `acme.Learner`
classes, and implements the policy, observation, and update methods which
defer to the underlying actor and learner.
The only real logic implemented by this class is that it controls the number
of observations to make before running a learner step. This is done by
passing the number of `min_observations` to use and a ratio of
`observations_per_step` := num_actor_actions / num_learner_steps.
Note that the number of `observations_per_step` can also be in the range[0, 1]
in order to allow the agent to take more than 1 learner step per action.
"""
def __init__(self, actor: core.Actor, learner: core.Learner,
min_observations: Optional[int] = None,
observations_per_step: Optional[float] = None,
iterator: Optional[core.PrefetchingIterator] = None,
replay_tables: Optional[List[reverb.Table]] = None):
self._actor = actor
self._learner = learner
self._min_observations = min_observations
self._observations_per_step = observations_per_step
self._num_observations = 0
self._iterator = iterator
self._replay_tables = replay_tables
self._batch_size_upper_bounds = [1_000_000_000] * len(
replay_tables) if replay_tables else None
def select_action(self, observation: types.NestedArray) -> types.NestedArray:
return self._actor.select_action(observation)
def observe_first(self, timestep: dm_env.TimeStep):
self._actor.observe_first(timestep)
def observe(self, action: types.NestedArray, next_timestep: dm_env.TimeStep):
self._num_observations += 1
self._actor.observe(action, next_timestep)
def _has_data_for_training(self):
if self._iterator.ready():
return True
for (table, batch_size) in zip(self._replay_tables,
self._batch_size_upper_bounds):
if not table.can_sample(batch_size):
return False
return True
def update(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
if self._iterator:
# Perform learner steps as long as iterator has data.
update_actor = False
while self._has_data_for_training():
# Run learner steps (usually means gradient steps).
total_batches = self._iterator.retrieved_elements()
self._learner.step()
current_batches = self._iterator.retrieved_elements() - total_batches
assert current_batches == 1, (
'Learner step must retrieve exactly one element from the iterator'
f' (retrieved {current_batches}). Otherwise agent can deadlock. '
'Example cause is that your chosen agent'
's Builder has a '
'`make_learner` factory that prefetches the data but it '
'shouldn'
't.')
self._batch_size_upper_bounds = [
math.ceil(t.info.rate_limiter_info.sample_stats.completed /
(total_batches + 1)) for t in self._replay_tables
]
update_actor = True
if update_actor:
# Update the actor weights only when learner was updated.
self._actor.update()
return
# If dataset is not provided, follback to the old logic.
# TODO(stanczyk): Remove when not used.
num_steps = _calculate_num_learner_steps(
num_observations=self._num_observations,
min_observations=self._min_observations,
observations_per_step=self._observations_per_step,
)
for _ in range(num_steps):
# Run learner steps (usually means gradient steps).
self._learner.step()
if num_steps > 0:
# Update the actor weights when learner updates.
self._actor.update()
def get_variables(self, names: Sequence[str]) -> List[List[np.ndarray]]:
return self._learner.get_variables(names)
|
acme-master
|
acme/agents/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RL agent Builder interface."""
import abc
import dataclasses
from typing import Generic, Iterator, List, Optional
from acme import adders
from acme import core
from acme import specs
from acme.jax import networks as networks_lib
from acme.jax import types as jax_types
from acme.utils import counting
from acme.utils import loggers
import reverb
Networks = jax_types.Networks
Policy = jax_types.Policy
Sample = jax_types.Sample
class OfflineBuilder(abc.ABC, Generic[Networks, Policy, Sample]):
"""Interface for defining the components of an offline RL agent.
Implementations of this interface contain a complete specification of a
concrete offline RL agent. An instance of this class can be used to build an
offline RL agent that operates either locally or in a distributed setup.
"""
@abc.abstractmethod
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: Networks,
dataset: Iterator[Sample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
*,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
"""Creates an instance of the learner.
Args:
random_key: A key for random number generation.
networks: struct describing the networks needed by the learner; this is
specific to the learner in question.
dataset: iterator over demonstration samples.
logger_fn: factory providing loggers used for logging progress.
environment_spec: A container for all relevant environment specs.
counter: a Counter which allows for recording of counts (learner steps,
evaluator steps, etc.) distributed throughout the agent.
"""
@abc.abstractmethod
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: Policy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
) -> core.Actor:
"""Create an actor instance to be used for evaluation.
Args:
random_key: A key for random number generation.
policy: Instance of a policy expected by the algorithm corresponding to
this builder.
environment_spec: A container for all relevant environment specs.
variable_source: A source providing the necessary actor parameters.
"""
@abc.abstractmethod
def make_policy(self, networks: Networks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool) -> Policy:
"""Creates the agent policy to be used for evaluation.
Args:
networks: struct describing the networks needed to generate the policy.
environment_spec: struct describing the specs of the environment.
evaluation: This flag is present for consistency with the
ActorLearnerBuilder, in which case data-generating actors and evaluation
actors can behave differently. For OfflineBuilders, this should be set
to True.
Returns:
Policy to be used for evaluation. The exact form of this object may differ
from one agent to the next; it could be a simple callable, a nest of
callables, or an ActorCore for instance.
"""
class ActorLearnerBuilder(OfflineBuilder[Networks, Policy, Sample],
Generic[Networks, Policy, Sample]):
"""Defines an interface for defining the components of an RL agent.
Implementations of this interface contain a complete specification of a
concrete RL agent. An instance of this class can be used to build an
RL agent which interacts with the environment either locally or in a
distributed setup.
"""
@abc.abstractmethod
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: Policy,
) -> List[reverb.Table]:
"""Create tables to insert data into.
Args:
environment_spec: A container for all relevant environment specs.
policy: Agent's policy which can be used to extract the extras_spec.
Returns:
The replay tables used to store the experience the agent uses to train.
"""
@abc.abstractmethod
def make_dataset_iterator(
self,
replay_client: reverb.Client,
) -> Iterator[Sample]:
"""Create a dataset iterator to use for learning/updating the agent."""
@abc.abstractmethod
def make_adder(
self,
replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[Policy],
) -> Optional[adders.Adder]:
"""Create an adder which records data generated by the actor/environment.
Args:
replay_client: Reverb Client which points to the replay server.
environment_spec: specs of the environment.
policy: Agent's policy which can be used to extract the extras_spec.
"""
# TODO(sabela): make the parameters non-optional.
@abc.abstractmethod
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: Policy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
"""Create an actor instance.
Args:
random_key: A key for random number generation.
policy: Instance of a policy expected by the algorithm corresponding to
this builder.
environment_spec: A container for all relevant environment specs.
variable_source: A source providing the necessary actor parameters.
adder: How data is recorded (e.g. added to replay).
"""
@abc.abstractmethod
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: Networks,
dataset: Iterator[Sample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
"""Creates an instance of the learner.
Args:
random_key: A key for random number generation.
networks: struct describing the networks needed by the learner; this can
be specific to the learner in question.
dataset: iterator over samples from replay.
logger_fn: factory providing loggers used for logging progress.
environment_spec: A container for all relevant environment specs.
replay_client: client which allows communication with replay. Note that
this is only intended to be used for updating priorities. Samples should
be obtained from `dataset`.
counter: a Counter which allows for recording of counts (learner steps,
actor steps, etc.) distributed throughout the agent.
"""
def make_policy(self,
networks: Networks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> Policy:
"""Creates the agent policy.
Creates the agent policy given the collection of network components and
environment spec. An optional boolean can be given to indicate if the
policy will be used for evaluation.
Args:
networks: struct describing the networks needed to generate the policy.
environment_spec: struct describing the specs of the environment.
evaluation: when true, a version of the policy to use for evaluation
should be returned. This is algorithm-specific so if an algorithm makes
no distinction between behavior and evaluation policies this boolean may
be ignored.
Returns:
Behavior policy or evaluation policy for the agent.
"""
# TODO(sabela): make abstract once all agents implement it.
del networks, environment_spec, evaluation
raise NotImplementedError
@dataclasses.dataclass(frozen=True)
class ActorLearnerBuilderWrapper(ActorLearnerBuilder[Networks, Policy, Sample],
Generic[Networks, Policy, Sample]):
"""An empty wrapper for ActorLearnerBuilder."""
wrapped: ActorLearnerBuilder[Networks, Policy, Sample]
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: Policy,
) -> List[reverb.Table]:
return self.wrapped.make_replay_tables(environment_spec, policy)
def make_dataset_iterator(
self,
replay_client: reverb.Client,
) -> Iterator[Sample]:
return self.wrapped.make_dataset_iterator(replay_client)
def make_adder(
self,
replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[Policy],
) -> Optional[adders.Adder]:
return self.wrapped.make_adder(replay_client, environment_spec, policy)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: Policy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
return self.wrapped.make_actor(random_key, policy, environment_spec,
variable_source, adder)
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: Networks,
dataset: Iterator[Sample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
return self.wrapped.make_learner(random_key, networks, dataset, logger_fn,
environment_spec, replay_client, counter)
def make_policy(self,
networks: Networks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> Policy:
return self.wrapped.make_policy(networks, environment_spec, evaluation)
# TODO(sinopalnikov): deprecated, migrate all users and remove.
GenericActorLearnerBuilder = ActorLearnerBuilder
|
acme-master
|
acme/agents/jax/builders.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ActorCore interface definition."""
import dataclasses
from typing import Callable, Generic, Mapping, Tuple, TypeVar, Union
from acme import types
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax.types import PRNGKey
import chex
import jax
import jax.numpy as jnp
NoneType = type(None)
# The state of the actor. This could include recurrent network state or any
# other state which needs to be propagated through the select_action calls.
State = TypeVar('State')
# The extras to be passed to the observe method.
Extras = TypeVar('Extras')
RecurrentState = TypeVar('RecurrentState')
SelectActionFn = Callable[
[networks_lib.Params, networks_lib.Observation, State],
Tuple[networks_lib.Action, State]]
@dataclasses.dataclass
class ActorCore(Generic[State, Extras]):
"""Pure functions that define the algorithm-specific actor functionality."""
init: Callable[[PRNGKey], State]
select_action: SelectActionFn
get_extras: Callable[[State], Extras]
# A simple feed forward policy which produces no extras and takes only an RNGKey
# as a state.
FeedForwardPolicy = Callable[
[networks_lib.Params, PRNGKey, networks_lib.Observation],
networks_lib.Action]
FeedForwardPolicyWithExtra = Callable[
[networks_lib.Params, PRNGKey, networks_lib.Observation],
Tuple[networks_lib.Action, types.NestedArray]]
RecurrentPolicy = Callable[[
networks_lib.Params, PRNGKey, networks_lib
.Observation, RecurrentState
], Tuple[networks_lib.Action, RecurrentState]]
Policy = Union[FeedForwardPolicy, FeedForwardPolicyWithExtra, RecurrentPolicy]
def batched_feed_forward_to_actor_core(
policy: FeedForwardPolicy) -> ActorCore[PRNGKey, Tuple[()]]:
"""A convenience adaptor from FeedForwardPolicy to ActorCore."""
def select_action(params: networks_lib.Params,
observation: networks_lib.Observation,
state: PRNGKey):
rng = state
rng1, rng2 = jax.random.split(rng)
observation = utils.add_batch_dim(observation)
action = utils.squeeze_batch_dim(policy(params, rng1, observation))
return action, rng2
def init(rng: PRNGKey) -> PRNGKey:
return rng
def get_extras(unused_rng: PRNGKey) -> Tuple[()]:
return ()
return ActorCore(init=init, select_action=select_action,
get_extras=get_extras)
@chex.dataclass(frozen=True, mappable_dataclass=False)
class SimpleActorCoreStateWithExtras:
rng: PRNGKey
extras: Mapping[str, jnp.ndarray]
def unvectorize_select_action(actor_core: ActorCore) -> ActorCore:
"""Makes an actor core's select_action method expect unbatched arguments."""
def unvectorized_select_action(
params: networks_lib.Params,
observations: networks_lib.Observation,
state: State,
) -> Tuple[networks_lib.Action, State]:
observations, state = utils.add_batch_dim((observations, state))
actions, state = actor_core.select_action(params, observations, state)
return utils.squeeze_batch_dim((actions, state))
return ActorCore(
init=actor_core.init,
select_action=unvectorized_select_action,
get_extras=actor_core.get_extras)
def batched_feed_forward_with_extras_to_actor_core(
policy: FeedForwardPolicyWithExtra
) -> ActorCore[SimpleActorCoreStateWithExtras, Mapping[str, jnp.ndarray]]:
"""A convenience adaptor from FeedForwardPolicy to ActorCore."""
def select_action(params: networks_lib.Params,
observation: networks_lib.Observation,
state: SimpleActorCoreStateWithExtras):
rng = state.rng
rng1, rng2 = jax.random.split(rng)
observation = utils.add_batch_dim(observation)
action, extras = utils.squeeze_batch_dim(policy(params, rng1, observation))
return action, SimpleActorCoreStateWithExtras(rng2, extras)
def init(rng: PRNGKey) -> SimpleActorCoreStateWithExtras:
return SimpleActorCoreStateWithExtras(rng, {})
def get_extras(
state: SimpleActorCoreStateWithExtras) -> Mapping[str, jnp.ndarray]:
return state.extras
return ActorCore(init=init, select_action=select_action,
get_extras=get_extras)
@chex.dataclass(frozen=True, mappable_dataclass=False)
class SimpleActorCoreRecurrentState(Generic[RecurrentState]):
rng: PRNGKey
recurrent_state: RecurrentState
def batched_recurrent_to_actor_core(
recurrent_policy: RecurrentPolicy, initial_core_state: RecurrentState
) -> ActorCore[SimpleActorCoreRecurrentState[RecurrentState], Mapping[
str, jnp.ndarray]]:
"""Returns ActorCore for a recurrent policy."""
def select_action(params: networks_lib.Params,
observation: networks_lib.Observation,
state: SimpleActorCoreRecurrentState[RecurrentState]):
# TODO(b/161332815): Make JAX Actor work with batched or unbatched inputs.
rng = state.rng
rng, policy_rng = jax.random.split(rng)
observation = utils.add_batch_dim(observation)
recurrent_state = utils.add_batch_dim(state.recurrent_state)
action, new_recurrent_state = utils.squeeze_batch_dim(recurrent_policy(
params, policy_rng, observation, recurrent_state))
return action, SimpleActorCoreRecurrentState(rng, new_recurrent_state)
initial_core_state = utils.squeeze_batch_dim(initial_core_state)
def init(rng: PRNGKey) -> SimpleActorCoreRecurrentState[RecurrentState]:
return SimpleActorCoreRecurrentState(rng, initial_core_state)
def get_extras(
state: SimpleActorCoreRecurrentState[RecurrentState]
) -> Mapping[str, jnp.ndarray]:
return {'core_state': state.recurrent_state}
return ActorCore(init=init, select_action=select_action,
get_extras=get_extras)
|
acme-master
|
acme/agents/jax/actor_core.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX agents."""
|
acme-master
|
acme/agents/jax/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple JAX actors."""
from typing import Generic, Optional
from acme import adders
from acme import core
from acme import types
from acme.agents.jax import actor_core
from acme.jax import networks as network_lib
from acme.jax import utils
from acme.jax import variable_utils
import dm_env
import jax
class GenericActor(core.Actor, Generic[actor_core.State, actor_core.Extras]):
"""A generic actor implemented on top of ActorCore.
An actor based on a policy which takes observations and outputs actions. It
also adds experiences to replay and updates the actor weights from the policy
on the learner.
"""
def __init__(
self,
actor: actor_core.ActorCore[actor_core.State, actor_core.Extras],
random_key: network_lib.PRNGKey,
variable_client: Optional[variable_utils.VariableClient],
adder: Optional[adders.Adder] = None,
jit: bool = True,
backend: Optional[str] = 'cpu',
per_episode_update: bool = False
):
"""Initializes a feed forward actor.
Args:
actor: actor core.
random_key: Random key.
variable_client: The variable client to get policy parameters from.
adder: An adder to add experiences to.
jit: Whether or not to jit the passed ActorCore's pure functions.
backend: Which backend to use when jitting the policy.
per_episode_update: if True, updates variable client params once at the
beginning of each episode
"""
self._random_key = random_key
self._variable_client = variable_client
self._adder = adder
self._state = None
# Unpack ActorCore, jitting if requested.
if jit:
self._init = jax.jit(actor.init, backend=backend)
self._policy = jax.jit(actor.select_action, backend=backend)
else:
self._init = actor.init
self._policy = actor.select_action
self._get_extras = actor.get_extras
self._per_episode_update = per_episode_update
@property
def _params(self):
return self._variable_client.params if self._variable_client else []
def select_action(self,
observation: network_lib.Observation) -> types.NestedArray:
action, self._state = self._policy(self._params, observation, self._state)
return utils.to_numpy(action)
def observe_first(self, timestep: dm_env.TimeStep):
self._random_key, key = jax.random.split(self._random_key)
self._state = self._init(key)
if self._adder:
self._adder.add_first(timestep)
if self._variable_client and self._per_episode_update:
self._variable_client.update_and_wait()
def observe(self, action: network_lib.Action, next_timestep: dm_env.TimeStep):
if self._adder:
self._adder.add(
action, next_timestep, extras=self._get_extras(self._state))
def update(self, wait: bool = False):
if self._variable_client and not self._per_episode_update:
self._variable_client.update(wait)
|
acme-master
|
acme/agents/jax/actors.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for actors."""
from typing import Optional, Tuple
from acme import environment_loop
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.jax import utils
from acme.jax import variable_utils
from acme.testing import fakes
import dm_env
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
def _make_fake_env() -> dm_env.Environment:
env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(10, 5), dtype=np.float32),
actions=specs.DiscreteArray(num_values=3),
rewards=specs.Array(shape=(), dtype=np.float32),
discounts=specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1.),
)
return fakes.Environment(env_spec, episode_length=10)
class ActorTest(parameterized.TestCase):
@parameterized.named_parameters(
('policy', False),
('policy_with_extras', True))
def test_feedforward(self, has_extras):
environment = _make_fake_env()
env_spec = specs.make_environment_spec(environment)
def policy(inputs: jnp.ndarray):
action_values = hk.Sequential([
hk.Flatten(),
hk.Linear(env_spec.actions.num_values),
])(
inputs)
action = jnp.argmax(action_values, axis=-1)
if has_extras:
return action, (action_values,)
else:
return action
policy = hk.transform(policy)
rng = hk.PRNGSequence(1)
dummy_obs = utils.add_batch_dim(utils.zeros_like(env_spec.observations))
params = policy.init(next(rng), dummy_obs)
variable_source = fakes.VariableSource(params)
variable_client = variable_utils.VariableClient(variable_source, 'policy')
if has_extras:
actor_core = actor_core_lib.batched_feed_forward_with_extras_to_actor_core(
policy.apply)
else:
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(
policy.apply)
actor = actors.GenericActor(
actor_core,
random_key=jax.random.PRNGKey(1),
variable_client=variable_client)
loop = environment_loop.EnvironmentLoop(environment, actor)
loop.run(20)
def _transform_without_rng(f):
return hk.without_apply_rng(hk.transform(f))
class RecurrentActorTest(absltest.TestCase):
def test_recurrent(self):
environment = _make_fake_env()
env_spec = specs.make_environment_spec(environment)
output_size = env_spec.actions.num_values
obs = utils.add_batch_dim(utils.zeros_like(env_spec.observations))
rng = hk.PRNGSequence(1)
@_transform_without_rng
def network(inputs: jnp.ndarray, state: hk.LSTMState):
return hk.DeepRNN([hk.Reshape([-1], preserve_dims=1),
hk.LSTM(output_size)])(inputs, state)
@_transform_without_rng
def initial_state(batch_size: Optional[int] = None):
network = hk.DeepRNN([hk.Reshape([-1], preserve_dims=1),
hk.LSTM(output_size)])
return network.initial_state(batch_size)
initial_state = initial_state.apply(initial_state.init(next(rng)), 1)
params = network.init(next(rng), obs, initial_state)
def policy(
params: jnp.ndarray,
key: jnp.ndarray,
observation: jnp.ndarray,
core_state: hk.LSTMState
) -> Tuple[jnp.ndarray, hk.LSTMState]:
del key # Unused for test-case deterministic policy.
action_values, core_state = network.apply(params, observation, core_state)
actions = jnp.argmax(action_values, axis=-1)
return actions, core_state
variable_source = fakes.VariableSource(params)
variable_client = variable_utils.VariableClient(variable_source, 'policy')
actor_core = actor_core_lib.batched_recurrent_to_actor_core(
policy, initial_state)
actor = actors.GenericActor(actor_core, jax.random.PRNGKey(1),
variable_client)
loop = environment_loop.EnvironmentLoop(environment, actor)
loop.run(20)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/actors_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility classes for input normalization."""
import dataclasses
import functools
from typing import Any, Callable, Generic, Iterator, List, Optional, Tuple
import acme
from acme import adders
from acme import core
from acme import specs
from acme import types
from acme.agents.jax import builders
from acme.jax import networks as networks_lib
from acme.jax import running_statistics
from acme.jax import variable_utils
from acme.jax.types import Networks, Policy # pylint: disable=g-multiple-import
from acme.utils import counting
from acme.utils import loggers
import dm_env
import jax
import reverb
from typing_extensions import Protocol
_NORMALIZATION_VARIABLES = 'normalization_variables'
# Wrapping the network instead might look more straightforward, but then
# different implementations would be needed for feed-forward and
# recurrent networks.
class NormalizationActorWrapper(core.Actor):
"""An actor wrapper that normalizes observations before applying policy."""
def __init__(self,
wrapped_actor: core.Actor,
variable_source: core.VariableSource,
max_abs_observation: Optional[float],
update_period: int = 1,
backend: Optional[str] = None):
self._wrapped_actor = wrapped_actor
self._variable_client = variable_utils.VariableClient(
variable_source,
key=_NORMALIZATION_VARIABLES,
update_period=update_period,
device=backend)
self._apply_normalization = jax.jit(
functools.partial(
running_statistics.normalize, max_abs_value=max_abs_observation),
backend=backend)
def select_action(self, observation: types.NestedArray) -> types.NestedArray:
self._variable_client.update()
observation_stats = self._variable_client.params
observation = self._apply_normalization(observation, observation_stats)
return self._wrapped_actor.select_action(observation)
def observe_first(self, timestep: dm_env.TimeStep):
return self._wrapped_actor.observe_first(timestep)
def observe(
self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
):
return self._wrapped_actor.observe(action, next_timestep)
def update(self, wait: bool = False):
return self._wrapped_actor.update(wait)
@dataclasses.dataclass
class NormalizationLearnerWrapperState:
wrapped_learner_state: Any
observation_running_statistics: running_statistics.RunningStatisticsState
class NormalizationLearnerWrapper(core.Learner, core.Saveable):
"""A learner wrapper that normalizes observations using running statistics."""
def __init__(self, learner_factory: Callable[[Iterator[reverb.ReplaySample]],
acme.Learner],
iterator: Iterator[reverb.ReplaySample],
environment_spec: specs.EnvironmentSpec,
max_abs_observation: Optional[float]):
def normalize_sample(
observation_statistics: running_statistics.RunningStatisticsState,
sample: reverb.ReplaySample
) -> Tuple[running_statistics.RunningStatisticsState, reverb.ReplaySample]:
observation = sample.data.observation
observation_statistics = running_statistics.update(
observation_statistics, observation)
observation = running_statistics.normalize(
observation,
observation_statistics,
max_abs_value=max_abs_observation)
sample = reverb.ReplaySample(
sample.info, sample.data._replace(observation=observation))
if hasattr(sample.data, 'next_observation'):
next_observation = running_statistics.normalize(
sample.data.next_observation,
observation_statistics,
max_abs_value=max_abs_observation)
sample = reverb.ReplaySample(
sample.info,
sample.data._replace(next_observation=next_observation))
return observation_statistics, sample
self._observation_running_statistics = running_statistics.init_state(
environment_spec.observations)
self._normalize_sample = jax.jit(normalize_sample)
normalizing_iterator = (
self._normalize_sample_and_update(sample) for sample in iterator)
self._wrapped_learner = learner_factory(normalizing_iterator)
def _normalize_sample_and_update(
self, sample: reverb.ReplaySample) -> reverb.ReplaySample:
self._observation_running_statistics, sample = self._normalize_sample(
self._observation_running_statistics, sample)
return sample
def step(self):
self._wrapped_learner.step()
def get_variables(self, names: List[str]) -> List[types.NestedArray]:
stats = self._observation_running_statistics
# Make sure to only pass mean and std to minimize trafic.
mean_std = running_statistics.NestedMeanStd(mean=stats.mean, std=stats.std)
normalization_variables = {_NORMALIZATION_VARIABLES: mean_std}
learner_names = [
name for name in names if name not in normalization_variables
]
learner_variables = dict(
zip(learner_names, self._wrapped_learner.get_variables(
learner_names))) if learner_names else {}
return [
normalization_variables.get(name, learner_variables.get(name, None))
for name in names
]
def save(self) -> NormalizationLearnerWrapperState:
return NormalizationLearnerWrapperState(
wrapped_learner_state=self._wrapped_learner.save(),
observation_running_statistics=self._observation_running_statistics)
def restore(self, state: NormalizationLearnerWrapperState):
self._wrapped_learner.restore(state.wrapped_learner_state)
self._observation_running_statistics = state.observation_running_statistics
@dataclasses.dataclass
class NormalizationBuilder(Generic[Networks, Policy],
builders.ActorLearnerBuilder[Networks, Policy,
reverb.ReplaySample]):
"""Builder wrapper that normalizes observations using running mean/std."""
builder: builders.ActorLearnerBuilder[Networks, Policy, reverb.ReplaySample]
max_abs_observation: Optional[float] = 10.0
statistics_update_period: int = 100
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: Policy,
) -> List[reverb.Table]:
return self.builder.make_replay_tables(environment_spec, policy)
def make_dataset_iterator(
self, replay_client: reverb.Client) -> Iterator[reverb.ReplaySample]:
return self.builder.make_dataset_iterator(replay_client)
def make_adder(self, replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[Policy]) -> Optional[adders.Adder]:
return self.builder.make_adder(replay_client, environment_spec, policy)
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: Networks,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
learner_factory = functools.partial(
self.builder.make_learner,
random_key,
networks,
logger_fn=logger_fn,
environment_spec=environment_spec,
replay_client=replay_client,
counter=counter)
return NormalizationLearnerWrapper(
learner_factory=learner_factory,
iterator=dataset,
environment_spec=environment_spec,
max_abs_observation=self.max_abs_observation)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: Policy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
actor = self.builder.make_actor(random_key, policy, environment_spec,
variable_source, adder)
return NormalizationActorWrapper(
actor,
variable_source,
max_abs_observation=self.max_abs_observation,
update_period=self.statistics_update_period,
backend='cpu')
def make_policy(self,
networks: Networks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> Policy:
return self.builder.make_policy(
networks=networks,
environment_spec=environment_spec,
evaluation=evaluation)
@dataclasses.dataclass(frozen=True)
class NormalizationConfig:
"""Configuration for normalization based on running statistics.
Attributes:
max_abs: Maximum value for clipping.
statistics_update_period: How often to update running statistics used for
normalization.
"""
max_abs: int = 10
statistics_update_period: int = 100
class InputNormalizerConfig(Protocol):
"""Protocol for the config of the agent that uses the normalization decorator.
If the agent builder is decorated with the `input_normalization_builder`
the agent config class must implement this protocol.
"""
@property
def input_normalization(self) -> Optional[NormalizationConfig]:
...
def input_normalization_builder(
actor_learner_builder_class: Callable[[InputNormalizerConfig],
builders.ActorLearnerBuilder]):
"""Builder class decorator that adds support for input normalization."""
# TODO(b/247075349): find a way to use ActorLearnerBuilderWrapper here.
class InputNormalizationBuilder(
Generic[builders.Networks, builders.Policy, builders.Sample],
builders.ActorLearnerBuilder[builders.Networks, builders.Policy,
builders.Sample]):
"""Builder wrapper that adds input normalization based on the config."""
def __init__(self, config: InputNormalizerConfig):
builder = actor_learner_builder_class(config)
if config.input_normalization:
builder = NormalizationBuilder(
builder,
max_abs_observation=config.input_normalization.max_abs,
statistics_update_period=config.input_normalization
.statistics_update_period)
self.wrapped = builder
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: builders.Policy,
) -> List[reverb.Table]:
return self.wrapped.make_replay_tables(environment_spec, policy)
def make_dataset_iterator(
self,
replay_client: reverb.Client,
) -> Iterator[builders.Sample]:
return self.wrapped.make_dataset_iterator(replay_client)
def make_adder(
self,
replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[builders.Policy],
) -> Optional[adders.Adder]:
return self.wrapped.make_adder(replay_client, environment_spec, policy)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: builders.Policy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
return self.wrapped.make_actor(random_key, policy, environment_spec,
variable_source, adder)
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: Networks,
dataset: Iterator[builders.Sample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
return self.wrapped.make_learner(random_key, networks, dataset, logger_fn,
environment_spec, replay_client, counter)
def make_policy(self,
networks: builders.Networks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> builders.Policy:
return self.wrapped.make_policy(networks, environment_spec, evaluation)
return InputNormalizationBuilder
|
acme-master
|
acme/agents/jax/normalization.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiagent implementations."""
|
acme-master
|
acme/agents/jax/multiagent/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decentralized multiagent config."""
import dataclasses
from typing import Dict
from acme.multiagent import types
@dataclasses.dataclass
class DecentralizedMultiagentConfig:
"""Configuration options for decentralized multiagent."""
sub_agent_configs: Dict[types.AgentID, types.AgentConfig]
batch_size: int = 256
prefetch_size: int = 2
|
acme-master
|
acme/agents/jax/multiagent/decentralized/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decentralized multiagent configuration."""
from acme.agents.jax.multiagent.decentralized.builder import DecentralizedMultiAgentBuilder
from acme.agents.jax.multiagent.decentralized.config import DecentralizedMultiagentConfig
from acme.agents.jax.multiagent.decentralized.factories import builder_factory
from acme.agents.jax.multiagent.decentralized.factories import default_config_factory
from acme.agents.jax.multiagent.decentralized.factories import DefaultSupportedAgent
from acme.agents.jax.multiagent.decentralized.factories import network_factory
from acme.agents.jax.multiagent.decentralized.factories import policy_network_factory
|
acme-master
|
acme/agents/jax/multiagent/decentralized/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decentralized multiagent factories.
Used to unify agent initialization for both local and distributed layouts.
"""
import enum
from typing import Any, Dict, Optional
from acme import specs
from acme.adders import reverb as adders_reverb
from acme.agents.jax import builders as jax_builders
from acme.agents.jax import ppo
from acme.agents.jax import sac
from acme.agents.jax import td3
from acme.multiagent import types as ma_types
from acme.multiagent import utils as ma_utils
class DefaultSupportedAgent(enum.Enum):
"""Agents which have default initializers supported below."""
TD3 = 'TD3'
SAC = 'SAC'
PPO = 'PPO'
def init_default_network(
agent_type: DefaultSupportedAgent,
agent_spec: specs.EnvironmentSpec) -> ma_types.Networks:
"""Returns default networks for a single agent."""
if agent_type == DefaultSupportedAgent.TD3:
return td3.make_networks(agent_spec)
elif agent_type == DefaultSupportedAgent.SAC:
return sac.make_networks(agent_spec)
elif agent_type == DefaultSupportedAgent.PPO:
return ppo.make_networks(agent_spec)
else:
raise ValueError(f'Unsupported agent type: {agent_type}.')
def init_default_policy_network(
agent_type: DefaultSupportedAgent,
network: ma_types.Networks,
agent_spec: specs.EnvironmentSpec,
config: ma_types.AgentConfig,
eval_mode: ma_types.EvalMode = False) -> ma_types.PolicyNetwork:
"""Returns default policy network for a single agent."""
if agent_type == DefaultSupportedAgent.TD3:
sigma = 0. if eval_mode else config.sigma
return td3.get_default_behavior_policy(
network, agent_spec.actions, sigma=sigma)
elif agent_type == DefaultSupportedAgent.SAC:
return sac.apply_policy_and_sample(network, eval_mode=eval_mode)
elif agent_type == DefaultSupportedAgent.PPO:
return ppo.make_inference_fn(network, evaluation=eval_mode)
else:
raise ValueError(f'Unsupported agent type: {agent_type}.')
def init_default_builder(
agent_type: DefaultSupportedAgent,
agent_config: ma_types.AgentConfig,
) -> jax_builders.GenericActorLearnerBuilder:
"""Returns default builder for a single agent."""
if agent_type == DefaultSupportedAgent.TD3:
assert isinstance(agent_config, td3.TD3Config)
return td3.TD3Builder(agent_config)
elif agent_type == DefaultSupportedAgent.SAC:
assert isinstance(agent_config, sac.SACConfig)
return sac.SACBuilder(agent_config)
elif agent_type == DefaultSupportedAgent.PPO:
assert isinstance(agent_config, ppo.PPOConfig)
return ppo.PPOBuilder(agent_config)
else:
raise ValueError(f'Unsupported agent type: {agent_type}.')
def init_default_config(
agent_type: DefaultSupportedAgent,
config_overrides: Dict[str, Any]) -> ma_types.AgentConfig:
"""Returns default config for a single agent."""
if agent_type == DefaultSupportedAgent.TD3:
return td3.TD3Config(**config_overrides)
elif agent_type == DefaultSupportedAgent.SAC:
return sac.SACConfig(**config_overrides)
elif agent_type == DefaultSupportedAgent.PPO:
return ppo.PPOConfig(**config_overrides)
else:
raise ValueError(f'Unsupported agent type: {agent_type}.')
def default_config_factory(
agent_types: Dict[ma_types.AgentID, DefaultSupportedAgent],
batch_size: int,
config_overrides: Optional[Dict[ma_types.AgentID, Dict[str, Any]]] = None
) -> Dict[ma_types.AgentID, ma_types.AgentConfig]:
"""Returns default configs for all agents.
Args:
agent_types: dict mapping agent IDs to their type.
batch_size: shared batch size for all agents.
config_overrides: dict mapping (potentially a subset of) agent IDs to their
config overrides. This should include any mandatory config parameters for
the agents that do not have default values.
"""
configs = {}
for agent_id, agent_type in agent_types.items():
agent_config_overrides = dict(
# batch_size is required by LocalLayout, which is shared amongst
# the agents. Hence, we enforce a shared batch_size in builders.
batch_size=batch_size,
# Unique replay_table_name per agent.
replay_table_name=f'{adders_reverb.DEFAULT_PRIORITY_TABLE}_agent{agent_id}'
)
if config_overrides is not None and agent_id in config_overrides:
agent_config_overrides = {
**config_overrides[agent_id],
**agent_config_overrides # Comes second to ensure batch_size override
}
configs[agent_id] = init_default_config(agent_type, agent_config_overrides)
return configs
def network_factory(
environment_spec: specs.EnvironmentSpec,
agent_types: Dict[ma_types.AgentID, ma_types.GenericAgent],
init_network_fn: Optional[ma_types.InitNetworkFn] = None
) -> ma_types.MultiAgentNetworks:
"""Returns networks for all agents.
Args:
environment_spec: environment spec.
agent_types: dict mapping agent IDs to their type.
init_network_fn: optional callable that handles the network initialization
for all sub-agents. If this is not supplied, a default network initializer
is used (if it is supported for the designated agent type).
"""
init_fn = init_network_fn or init_default_network
networks = {}
for agent_id, agent_type in agent_types.items():
single_agent_spec = ma_utils.get_agent_spec(environment_spec, agent_id)
networks[agent_id] = init_fn(agent_type, single_agent_spec)
return networks
def policy_network_factory(
networks: ma_types.MultiAgentNetworks,
environment_spec: specs.EnvironmentSpec,
agent_types: Dict[ma_types.AgentID, ma_types.GenericAgent],
agent_configs: Dict[ma_types.AgentID, ma_types.AgentConfig],
eval_mode: ma_types.EvalMode,
init_policy_network_fn: Optional[ma_types.InitPolicyNetworkFn] = None
) -> ma_types.MultiAgentPolicyNetworks:
"""Returns default policy networks for all agents.
Args:
networks: dict mapping agent IDs to their networks.
environment_spec: environment spec.
agent_types: dict mapping agent IDs to their type.
agent_configs: dict mapping agent IDs to their config.
eval_mode: whether the policy should be initialized in evaluation mode (only
used if an init_policy_network_fn is not explicitly supplied).
init_policy_network_fn: optional callable that handles the policy network
initialization for all sub-agents. If this is not supplied, a default
policy network initializer is used (if it is supported for the designated
agent type).
"""
init_fn = init_policy_network_fn or init_default_policy_network
policy_networks = {}
for agent_id, agent_type in agent_types.items():
single_agent_spec = ma_utils.get_agent_spec(environment_spec, agent_id)
policy_networks[agent_id] = init_fn(agent_type, networks[agent_id],
single_agent_spec,
agent_configs[agent_id], eval_mode)
return policy_networks
def builder_factory(
agent_types: Dict[ma_types.AgentID, ma_types.GenericAgent],
agent_configs: Dict[ma_types.AgentID, ma_types.AgentConfig],
init_builder_fn: Optional[ma_types.InitBuilderFn] = None
) -> Dict[ma_types.AgentID, jax_builders.GenericActorLearnerBuilder]:
"""Returns default policy networks for all agents."""
init_fn = init_builder_fn or init_default_builder
builders = {}
for agent_id, agent_type in agent_types.items():
builders[agent_id] = init_fn(agent_type, agent_configs[agent_id])
return builders
|
acme-master
|
acme/agents/jax/multiagent/decentralized/factories.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX multiagent builders."""
from typing import Dict, Iterator, List, Mapping, Optional, Sequence
from acme import adders
from acme import core
from acme import specs
from acme import types
from acme.agents.jax import builders as acme_builders
from acme.agents.jax.multiagent.decentralized import actor
from acme.agents.jax.multiagent.decentralized import factories as decentralized_factories
from acme.agents.jax.multiagent.decentralized import learner_set
from acme.jax import networks as networks_lib
from acme.multiagent import types as ma_types
from acme.multiagent import utils as ma_utils
from acme.utils import counting
from acme.utils import iterator_utils
from acme.utils import loggers
import jax
import reverb
VARIABLE_SEPARATOR = '-'
class PrefixedVariableSource(core.VariableSource):
"""Wraps a variable source to add a pre-defined prefix to all names."""
def __init__(self, source: core.VariableSource, prefix: str):
self._source = source
self._prefix = prefix
def get_variables(self, names: Sequence[str]) -> List[types.NestedArray]:
return self._source.get_variables([self._prefix + name for name in names])
class DecentralizedMultiAgentBuilder(
acme_builders.GenericActorLearnerBuilder[
ma_types.MultiAgentNetworks,
ma_types.MultiAgentPolicyNetworks,
ma_types.MultiAgentSample]):
"""Builder for decentralized multiagent setup."""
def __init__(
self,
agent_types: Dict[ma_types.AgentID, ma_types.GenericAgent],
agent_configs: Dict[ma_types.AgentID, ma_types.AgentConfig],
init_policy_network_fn: Optional[ma_types.InitPolicyNetworkFn] = None):
"""Initializer.
Args:
agent_types: Dict mapping agent IDs to their types.
agent_configs: Dict mapping agent IDs to their configs.
init_policy_network_fn: Optional custom policy network initializer
function.
"""
self._agent_types = agent_types
self._agent_configs = agent_configs
self._builders = decentralized_factories.builder_factory(
agent_types, agent_configs)
self._num_agents = len(self._builders)
self._init_policy_network_fn = init_policy_network_fn
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: ma_types.MultiAgentPolicyNetworks,
) -> List[reverb.Table]:
"""Returns replay tables for all agents.
Args:
environment_spec: the (multiagent) environment spec, which will be
factorized into single-agent specs for replay table initialization.
policy: the (multiagent) mapping from agent ID to the corresponding
agent's policy, used to get the correct extras_spec.
"""
replay_tables = []
for agent_id, builder in self._builders.items():
single_agent_spec = ma_utils.get_agent_spec(environment_spec, agent_id)
replay_tables += builder.make_replay_tables(single_agent_spec,
policy[agent_id])
return replay_tables
def make_dataset_iterator(
self,
replay_client: reverb.Client) -> Iterator[ma_types.MultiAgentSample]:
# Zipping stores sub-iterators in the order dictated by
# self._builders.values(), which are insertion-ordered in Python3.7+.
# Hence, later unzipping (in make_learner()) and accessing the iterators
# via the same self._builders.items() dict ordering should be safe.
return zip(*[
b.make_dataset_iterator(replay_client) for b in self._builders.values()
])
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: ma_types.MultiAgentNetworks,
dataset: Iterator[ma_types.MultiAgentSample],
logger_fn: loggers.LoggerFactory,
environment_spec: Optional[specs.EnvironmentSpec] = None,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None
) -> learner_set.SynchronousDecentralizedLearnerSet:
"""Returns multiagent learner set.
Args:
random_key: random key.
networks: dict of networks, one per learner. Networks can be heterogeneous
(i.e., distinct in architecture) across learners.
dataset: list of iterators over samples from replay, one per learner.
logger_fn: factory providing loggers used for logging progress.
environment_spec: the (multiagent) environment spec, which will be
factorized into single-agent specs for replay table initialization.
replay_client: replay client that is shared amongst the sub-learners.
counter: a Counter which allows for recording of counts (learner steps,
actor steps, etc.) distributed throughout the agent.
"""
parent_counter = counter or counting.Counter()
sub_learners = {}
unzipped_dataset = iterator_utils.unzip_iterators(
dataset, num_sub_iterators=self._num_agents)
def make_logger_fn(agent_id: str) -> loggers.LoggerFactory:
"""Returns a logger factory for the subagent with the given id."""
def logger_factory(
label: loggers.LoggerLabel,
steps_key: Optional[loggers.LoggerStepsKey] = None,
instance: Optional[loggers.TaskInstance] = None) -> loggers.Logger:
return logger_fn(f'{label}{agent_id}', steps_key, instance)
return logger_factory
for i_dataset, (agent_id, builder) in enumerate(self._builders.items()):
counter = counting.Counter(parent_counter, prefix=f'{agent_id}')
single_agent_spec = ma_utils.get_agent_spec(environment_spec, agent_id)
random_key, learner_key = jax.random.split(random_key)
sub_learners[agent_id] = builder.make_learner(
learner_key,
networks[agent_id],
unzipped_dataset[i_dataset],
logger_fn=make_logger_fn(agent_id),
environment_spec=single_agent_spec,
replay_client=replay_client,
counter=counter)
return learner_set.SynchronousDecentralizedLearnerSet(
sub_learners, separator=VARIABLE_SEPARATOR)
def make_adder( # Internal pytype check.
self,
replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec] = None,
policy: Optional[ma_types.MultiAgentPolicyNetworks] = None,
) -> Mapping[ma_types.AgentID, Optional[adders.Adder]]:
del environment_spec, policy # Unused.
return {
agent_id:
b.make_adder(replay_client, environment_spec=None, policy=None)
for agent_id, b in self._builders.items()
}
def make_actor( # Internal pytype check.
self,
random_key: networks_lib.PRNGKey,
policy: ma_types.MultiAgentPolicyNetworks,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[Mapping[ma_types.AgentID, adders.Adder]] = None,
) -> core.Actor:
"""Returns simultaneous-acting multiagent actor instance.
Args:
random_key: random key.
policy: dict of policies, one for each actor. Policies can
be heterogeneous (i.e., distinct in architecture) across actors.
environment_spec: the (multiagent) environment spec, which will be
factorized into single-agent specs for replay table initialization.
variable_source: an optional LearnerSet. Each sub_actor pulls its local
variables from variable_source.
adder: how data is recorded (e.g., added to replay) for each actor.
"""
if adder is None:
adder = {agent_id: None for agent_id in policy.keys()}
sub_actors = {}
for agent_id, builder in self._builders.items():
single_agent_spec = ma_utils.get_agent_spec(environment_spec, agent_id)
random_key, actor_key = jax.random.split(random_key)
# Adds a prefix to each sub-actor's variable names to ensure the correct
# sub-learner is queried for variables.
sub_variable_source = PrefixedVariableSource(
variable_source, f'{agent_id}{VARIABLE_SEPARATOR}')
sub_actors[agent_id] = builder.make_actor(actor_key, policy[agent_id],
single_agent_spec,
sub_variable_source,
adder[agent_id])
return actor.SimultaneousActingMultiAgentActor(sub_actors)
def make_policy(
self,
networks: ma_types.MultiAgentNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> ma_types.MultiAgentPolicyNetworks:
return decentralized_factories.policy_network_factory(
networks,
environment_spec,
self._agent_types,
self._agent_configs,
eval_mode=evaluation,
init_policy_network_fn=self._init_policy_network_fn)
|
acme-master
|
acme/agents/jax/multiagent/decentralized/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decentralized multiagent learnerset."""
import dataclasses
from typing import Any, Dict, List
from acme import core
from acme import types
from acme.multiagent import types as ma_types
LearnerState = Any
@dataclasses.dataclass
class SynchronousDecentralizedLearnerSetState:
"""State of a SynchronousDecentralizedLearnerSet."""
# States of the learners keyed by their names.
learner_states: Dict[ma_types.AgentID, LearnerState]
class SynchronousDecentralizedLearnerSet(core.Learner):
"""Creates a composed learner which wraps a set of local agent learners."""
def __init__(self,
learners: Dict[ma_types.AgentID, core.Learner],
separator: str = '-'):
"""Initializer.
Args:
learners: a dict specifying the learners for all sub-agents.
separator: separator character used to disambiguate sub-learner variables.
"""
self._learners = learners
self._separator = separator
def step(self):
for learner in self._learners.values():
learner.step()
def get_variables(self, names: List[str]) -> List[types.NestedArray]:
"""Return the named variables as a collection of (nested) numpy arrays.
The variable names should be prefixed with the name of the child learners
using the separator specified in the constructor, e.g. learner1/var.
Args:
names: args where each name is a string identifying a predefined subset of
the variables. The variables names should be prefixed with the name of
the learners using the separator specified in the constructor, e.g.
learner-var if the separator is -.
Returns:
A list of (nested) numpy arrays `variables` such that `variables[i]`
corresponds to the collection named by `names[i]`.
"""
variables = []
for name in names:
# Note: if separator is not found, learner_name=name, which is OK.
learner_id, _, variable_name = name.partition(self._separator)
learner = self._learners[learner_id]
variables.extend(learner.get_variables([variable_name]))
return variables
def save(self) -> SynchronousDecentralizedLearnerSetState:
return SynchronousDecentralizedLearnerSetState(learner_states={
name: learner.save() for name, learner in self._learners.items()
})
def restore(self, state: SynchronousDecentralizedLearnerSetState):
for name, learner in self._learners.items():
learner.restore(state.learner_states[name])
|
acme-master
|
acme/agents/jax/multiagent/decentralized/learner_set.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decentralized multiagent actor."""
from typing import Dict
from acme import core
from acme.jax import networks
from acme.multiagent import types as ma_types
from acme.multiagent import utils as ma_utils
import dm_env
class SimultaneousActingMultiAgentActor(core.Actor):
"""Simultaneous-move actor (see README.md for expected environment interface)."""
def __init__(self, actors: Dict[ma_types.AgentID, core.Actor]):
"""Initializer.
Args:
actors: a dict specifying sub-actors.
"""
self._actors = actors
def select_action(
self, observation: Dict[ma_types.AgentID, networks.Observation]
) -> Dict[ma_types.AgentID, networks.Action]:
return {
actor_id: actor.select_action(observation[actor_id])
for actor_id, actor in self._actors.items()
}
def observe_first(self, timestep: dm_env.TimeStep):
for actor_id, actor in self._actors.items():
sub_timestep = ma_utils.get_agent_timestep(timestep, actor_id)
actor.observe_first(sub_timestep)
def observe(self, actions: Dict[ma_types.AgentID, networks.Action],
next_timestep: dm_env.TimeStep):
for actor_id, actor in self._actors.items():
sub_next_timestep = ma_utils.get_agent_timestep(next_timestep, actor_id)
actor.observe(actions[actor_id], sub_next_timestep)
def update(self, wait: bool = False):
for actor in self._actors.values():
actor.update(wait=wait)
|
acme-master
|
acme/agents/jax/multiagent/decentralized/actor.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TD3 agent learning from demonstrations."""
import dataclasses
from typing import Callable, Iterator
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import td3
from acme.agents.jax.lfd import builder
from acme.agents.jax.lfd import config
import reverb
@dataclasses.dataclass
class TD3fDConfig:
"""Configuration options specific to TD3 with demonstrations.
Attributes:
lfd_config: LfD config.
td3_config: TD3 config.
"""
lfd_config: config.LfdConfig
td3_config: td3.TD3Config
class TD3fDBuilder(builder.LfdBuilder[td3.TD3Networks,
actor_core_lib.FeedForwardPolicy,
reverb.ReplaySample]):
"""Builder for TD3 agent learning from demonstrations."""
def __init__(self, td3_fd_config: TD3fDConfig,
lfd_iterator_fn: Callable[[], Iterator[builder.LfdStep]]):
td3_builder = td3.TD3Builder(td3_fd_config.td3_config)
super().__init__(td3_builder, lfd_iterator_fn, td3_fd_config.lfd_config)
|
acme-master
|
acme/agents/jax/lfd/td3fd.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An adder useful in the context of Learning From Demonstrations.
This adder is mixing the collected episodes with some demonstrations
coming from an offline dataset.
TODO(damienv): Mixing demonstrations and collected episodes could also be
done when reading from the replay buffer. In that case, all the processing
applied by reverb should also be applied on the demonstrations.
Design wise, both solutions make equally sense. The alternative solution
could then be later implemented as well.
"""
from typing import Any, Iterator, Tuple
from acme import adders
from acme import types
import dm_env
class LfdAdder(adders.Adder):
"""Adder which adds from time to time some demonstrations.
Lfd stands for Learning From Demonstrations and is the same technique
as the one used in R2D3.
"""
def __init__(self,
adder: adders.Adder,
demonstrations: Iterator[Tuple[Any, dm_env.TimeStep]],
initial_insert_count: int,
demonstration_ratio: float):
"""LfdAdder constructor.
Args:
adder: The underlying adder used to add mixed episodes.
demonstrations: An iterator on infinite stream of (action, next_timestep)
pairs. Episode boundaries are defined by TimeStep.FIRST and
timestep.LAST markers. Note that the first action of an episode is
ignored. Note also that proper uniform sampling of demonstrations is the
responsibility of the iterator.
initial_insert_count: Number of steps of demonstrations to add before
adding any step of the collected episodes. Note that since only full
episodes can be added, this number of steps is only a target.
demonstration_ratio: Ratio of demonstration steps to add to the underlying
adder. ratio = num_demonstration_steps_added / total_num_steps_added
and must be in [0, 1).
Note that this ratio is the desired ratio in the steady behavior
and does not account for the initial inserts of demonstrations.
Note also that this ratio is only a target ratio since the granularity
is the episode.
"""
self._adder = adder
self._demonstrations = demonstrations
self._demonstration_ratio = demonstration_ratio
if demonstration_ratio < 0 or demonstration_ratio >= 1.:
raise ValueError('Invalid demonstration ratio.')
# Number of demonstration steps that should have been added to the replay
# buffer to meet the target demonstration ratio minus what has been really
# added.
# As a consequence:
# - when this delta is zero, the effective ratio exactly matches the desired
# ratio
# - when it is positive, more demonstrations need to be added to
# reestablish the balance
# The initial value is set so that after exactly initial_insert_count
# inserts of demonstration steps, _delta_demonstration_step_count will be
# zero.
self._delta_demonstration_step_count = (
(1. - self._demonstration_ratio) * initial_insert_count)
def reset(self):
self._adder.reset()
def _add_demonstration_episode(self):
_, timestep = next(self._demonstrations)
if not timestep.first():
raise ValueError('Expecting the start of an episode.')
self._adder.add_first(timestep)
self._delta_demonstration_step_count -= (1. - self._demonstration_ratio)
while not timestep.last():
action, timestep = next(self._demonstrations)
self._adder.add(action, timestep)
self._delta_demonstration_step_count -= (1. - self._demonstration_ratio)
# Reset is being called periodically to reset the connection to reverb.
# TODO(damienv, bshahr): Make the reset an internal detail of the reverb
# adder and remove it from the adder API.
self._adder.reset()
def add_first(self, timestep: dm_env.TimeStep):
while self._delta_demonstration_step_count > 0.:
self._add_demonstration_episode()
self._adder.add_first(timestep)
self._delta_demonstration_step_count += self._demonstration_ratio
def add(self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
extras: types.NestedArray = ()):
self._adder.add(action, next_timestep)
self._delta_demonstration_step_count += self._demonstration_ratio
|
acme-master
|
acme/agents/jax/lfd/lfd_adder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LfD config."""
import dataclasses
@dataclasses.dataclass
class LfdConfig:
"""Configuration options for LfD.
Attributes:
initial_insert_count: Number of steps of demonstrations to add to the replay
buffer before adding any step of the collected episodes. Note that since
only full episodes can be added, this number of steps is only a target.
demonstration_ratio: Ratio of demonstration steps to add to the replay
buffer. ratio = num_demonstration_steps_added / total_num_steps_added.
The ratio must be in [0, 1).
Note that this ratio is the desired ratio in the steady behavior and does
not account for the initial demonstrations inserts.
Note also that this ratio is only a target ratio since the granularity
is the episode.
"""
initial_insert_count: int = 0
demonstration_ratio: float = 0.01
|
acme-master
|
acme/agents/jax/lfd/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lfd agents."""
from acme.agents.jax.lfd.builder import LfdBuilder
from acme.agents.jax.lfd.builder import LfdStep
from acme.agents.jax.lfd.config import LfdConfig
from acme.agents.jax.lfd.sacfd import SACfDBuilder
from acme.agents.jax.lfd.sacfd import SACfDConfig
from acme.agents.jax.lfd.td3fd import TD3fDBuilder
from acme.agents.jax.lfd.td3fd import TD3fDConfig
|
acme-master
|
acme/agents/jax/lfd/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builder enabling off-policy algorithms to learn from demonstrations."""
from typing import Any, Callable, Generic, Iterator, Tuple
from acme.agents.jax import builders
from acme.agents.jax.lfd import config as lfd_config
from acme.agents.jax.lfd import lfd_adder
import dm_env
LfdStep = Tuple[Any, dm_env.TimeStep]
class LfdBuilder(builders.ActorLearnerBuilder[builders.Networks,
builders.Policy,
builders.Sample,],
Generic[builders.Networks, builders.Policy, builders.Sample]):
"""Builder that enables Learning From demonstrations.
This builder is not self contained and requires an underlying builder
implementing an off-policy algorithm.
"""
def __init__(self, builder: builders.ActorLearnerBuilder[builders.Networks,
builders.Policy,
builders.Sample],
demonstrations_factory: Callable[[], Iterator[LfdStep]],
config: lfd_config.LfdConfig):
"""LfdBuilder constructor.
Args:
builder: The underlying builder implementing the off-policy algorithm.
demonstrations_factory: Factory returning an infinite stream (as an
iterator) of (action, next_timesteps). Episode boundaries in this stream
are given by timestep.first() and timestep.last(). Note that in the
distributed version of this algorithm, each actor is mixing the same
demonstrations with its online experience. This effectively results in
the demonstrations being replicated in the replay buffer as many times
as the number of actors being used.
config: LfD configuration.
"""
self._builder = builder
self._demonstrations_factory = demonstrations_factory
self._config = config
def make_replay_tables(self, *args, **kwargs):
return self._builder.make_replay_tables(*args, **kwargs)
def make_dataset_iterator(self, *args, **kwargs):
return self._builder.make_dataset_iterator(*args, **kwargs)
def make_adder(self, *args, **kwargs):
demonstrations = self._demonstrations_factory()
return lfd_adder.LfdAdder(self._builder.make_adder(*args, **kwargs),
demonstrations,
self._config.initial_insert_count,
self._config.demonstration_ratio)
def make_actor(self, *args, **kwargs):
return self._builder.make_actor(*args, **kwargs)
def make_learner(self, *args, **kwargs):
return self._builder.make_learner(*args, **kwargs)
def make_policy(self, *args, **kwargs):
return self._builder.make_policy(*args, **kwargs)
|
acme-master
|
acme/agents/jax/lfd/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests of the LfD adder."""
import collections
from acme import adders
from acme import types
from acme.agents.jax.lfd import lfd_adder
import dm_env
import numpy as np
from absl.testing import absltest
class TestStatisticsAdder(adders.Adder):
def __init__(self):
self.counts = collections.defaultdict(int)
def reset(self):
pass
def add_first(self, timestep: dm_env.TimeStep):
self.counts[int(timestep.observation[0])] += 1
def add(self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
extras: types.NestedArray = ()):
del action
del extras
self.counts[int(next_timestep.observation[0])] += 1
class LfdAdderTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._demonstration_episode_type = 1
self._demonstration_episode_length = 10
self._collected_episode_type = 2
self._collected_episode_length = 5
def generate_episode(self, episode_type, episode_index, length):
episode = []
action_dim = 8
obs_dim = 16
for k in range(length):
if k == 0:
action = None
else:
action = np.concatenate([
np.asarray([episode_type, episode_index], dtype=np.float32),
np.random.uniform(0., 1., (action_dim - 2,))])
observation = np.concatenate([
np.asarray([episode_type, episode_index], dtype=np.float32),
np.random.uniform(0., 1., (obs_dim - 2,))])
if k == 0:
timestep = dm_env.restart(observation)
elif k == length - 1:
timestep = dm_env.termination(0., observation)
else:
timestep = dm_env.transition(0., observation, 1.)
episode.append((action, timestep))
return episode
def generate_demonstration(self):
episode_index = 0
while True:
episode = self.generate_episode(self._demonstration_episode_type,
episode_index,
self._demonstration_episode_length)
for x in episode:
yield x
episode_index += 1
def test_adder(self):
stats_adder = TestStatisticsAdder()
demonstration_ratio = 0.2
initial_insert_count = 50
adder = lfd_adder.LfdAdder(
stats_adder,
self.generate_demonstration(),
initial_insert_count=initial_insert_count,
demonstration_ratio=demonstration_ratio)
num_episodes = 100
for episode_index in range(num_episodes):
episode = self.generate_episode(self._collected_episode_type,
episode_index,
self._collected_episode_length)
for k, (action, timestep) in enumerate(episode):
if k == 0:
adder.add_first(timestep)
if episode_index == 0:
self.assertGreaterEqual(
stats_adder.counts[self._demonstration_episode_type],
initial_insert_count - self._demonstration_episode_length)
self.assertLessEqual(
stats_adder.counts[self._demonstration_episode_type],
initial_insert_count + self._demonstration_episode_length)
else:
adder.add(action, timestep)
# Only 2 types of episodes.
self.assertLen(stats_adder.counts, 2)
total_count = (stats_adder.counts[self._demonstration_episode_type] +
stats_adder.counts[self._collected_episode_type])
# The demonstration ratio does not account for the initial demonstration
# insertion. Computes a ratio that takes it into account.
target_ratio = (
demonstration_ratio * (float)(total_count - initial_insert_count)
+ initial_insert_count) / (float)(total_count)
# Effective ratio of demonstrations.
effective_ratio = (
float(stats_adder.counts[self._demonstration_episode_type]) /
float(total_count))
# Only full episodes can be fed to the adder so the effective ratio
# might be slightly different from the requested demonstration ratio.
min_ratio = (target_ratio -
self._demonstration_episode_length / float(total_count))
max_ratio = (target_ratio +
self._demonstration_episode_length / float(total_count))
self.assertGreaterEqual(effective_ratio, min_ratio)
self.assertLessEqual(effective_ratio, max_ratio)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/lfd/lfd_adder_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAC agent learning from demonstrations."""
import dataclasses
from typing import Callable, Iterator
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import sac
from acme.agents.jax.lfd import builder
from acme.agents.jax.lfd import config
import reverb
@dataclasses.dataclass
class SACfDConfig:
"""Configuration options specific to SAC with demonstrations.
Attributes:
lfd_config: LfD config.
sac_config: SAC config.
"""
lfd_config: config.LfdConfig
sac_config: sac.SACConfig
class SACfDBuilder(builder.LfdBuilder[sac.SACNetworks,
actor_core_lib.FeedForwardPolicy,
reverb.ReplaySample]):
"""Builder for SAC agent learning from demonstrations."""
def __init__(self, sac_fd_config: SACfDConfig,
lfd_iterator_fn: Callable[[], Iterator[builder.LfdStep]]):
sac_builder = sac.SACBuilder(sac_fd_config.sac_config)
super().__init__(sac_builder, lfd_iterator_fn, sac_fd_config.lfd_config)
|
acme-master
|
acme/agents/jax/lfd/sacfd.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQIL agent."""
from acme.agents.jax.sqil.builder import SQILBuilder
|
acme-master
|
acme/agents/jax/sqil/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQIL Builder (https://arxiv.org/pdf/1905.11108.pdf)."""
from typing import Callable, Generic, Iterator, List, Optional
from acme import adders
from acme import core
from acme import specs
from acme import types
from acme.agents.jax import builders
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax.imitation_learning_types import DirectPolicyNetwork, DirectRLNetworks # pylint: disable=g-multiple-import
from acme.utils import counting
from acme.utils import loggers
import jax
import numpy as np
import reverb
import tree
def _generate_sqil_samples(
demonstration_iterator: Iterator[types.Transition],
replay_iterator: Iterator[reverb.ReplaySample]
) -> Iterator[reverb.ReplaySample]:
"""Generator which creates the sample iterator for SQIL.
Args:
demonstration_iterator: Iterator of demonstrations.
replay_iterator: Replay buffer sample iterator.
Yields:
Samples having a mix of demonstrations with reward 1 and replay samples with
reward 0.
"""
for demonstrations, replay_sample in zip(demonstration_iterator,
replay_iterator):
demonstrations = demonstrations._replace(
reward=np.ones_like(demonstrations.reward))
replay_transitions = replay_sample.data
replay_transitions = replay_transitions._replace(
reward=np.zeros_like(replay_transitions.reward))
double_batch = tree.map_structure(lambda x, y: np.concatenate([x, y]),
demonstrations, replay_transitions)
# Split the double batch in an interleaving fashion.
# e.g [1, 2, 3, 4 ,5 ,6] -> [1, 3, 5] and [2, 4, 6]
yield reverb.ReplaySample(
info=replay_sample.info,
data=tree.map_structure(lambda x: x[0::2], double_batch))
yield reverb.ReplaySample(
info=replay_sample.info,
data=tree.map_structure(lambda x: x[1::2], double_batch))
class SQILBuilder(Generic[DirectRLNetworks, DirectPolicyNetwork],
builders.ActorLearnerBuilder[DirectRLNetworks,
DirectPolicyNetwork,
reverb.ReplaySample]):
"""SQIL Builder (https://openreview.net/pdf?id=S1xKd24twB)."""
def __init__(self,
rl_agent: builders.ActorLearnerBuilder[DirectRLNetworks,
DirectPolicyNetwork,
reverb.ReplaySample],
rl_agent_batch_size: int,
make_demonstrations: Callable[[int],
Iterator[types.Transition]]):
"""Builds a SQIL agent.
Args:
rl_agent: An off policy direct RL agent..
rl_agent_batch_size: The batch size of the above algorithm.
make_demonstrations: A function that returns an infinite iterator with
demonstrations.
"""
self._rl_agent = rl_agent
self._rl_agent_batch_size = rl_agent_batch_size
self._make_demonstrations = make_demonstrations
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: DirectRLNetworks,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: Optional[specs.EnvironmentSpec] = None,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
"""Creates the learner."""
counter = counter or counting.Counter()
direct_rl_counter = counting.Counter(counter, 'direct_rl')
return self._rl_agent.make_learner(
random_key,
networks,
dataset=dataset,
logger_fn=logger_fn,
environment_spec=environment_spec,
replay_client=replay_client,
counter=direct_rl_counter)
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: DirectPolicyNetwork,
) -> List[reverb.Table]:
return self._rl_agent.make_replay_tables(environment_spec, policy)
def make_dataset_iterator( # pytype: disable=signature-mismatch # overriding-return-type-checks
self,
replay_client: reverb.Client) -> Optional[Iterator[reverb.ReplaySample]]:
"""The returned iterator returns batches with both expert and policy data.
Batch items will alternate between expert data and policy data.
Args:
replay_client: Reverb client.
Returns:
The Replay sample iterator.
"""
# TODO(eorsini): Make sure we have the exact same format as the rl_agent's
# adder writes in.
demonstration_iterator = self._make_demonstrations(
self._rl_agent_batch_size)
rb_iterator = self._rl_agent.make_dataset_iterator(replay_client)
return utils.device_put(
_generate_sqil_samples(demonstration_iterator, rb_iterator),
jax.devices()[0])
def make_adder(
self, replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[DirectPolicyNetwork]) -> Optional[adders.Adder]:
return self._rl_agent.make_adder(replay_client, environment_spec, policy)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: DirectPolicyNetwork,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
return self._rl_agent.make_actor(random_key, policy, environment_spec,
variable_source, adder)
def make_policy(self,
networks: DirectRLNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> DirectPolicyNetwork:
return self._rl_agent.make_policy(networks, environment_spec, evaluation)
|
acme-master
|
acme/agents/jax/sqil/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the SQIL iterator."""
from acme import types
from acme.agents.jax.sqil import builder
import numpy as np
import reverb
from absl.testing import absltest
class BuilderTest(absltest.TestCase):
def test_sqil_iterator(self):
demonstrations = [
types.Transition(np.array([[1], [2], [3]]), (), (), (), ())
]
replay = [
reverb.ReplaySample(
info=(),
data=types.Transition(np.array([[4], [5], [6]]), (), (), (), ()))
]
sqil_it = builder._generate_sqil_samples(iter(demonstrations), iter(replay))
np.testing.assert_array_equal(
next(sqil_it).data.observation, np.array([[1], [3], [5]]))
np.testing.assert_array_equal(
next(sqil_it).data.observation, np.array([[2], [4], [6]]))
self.assertRaises(StopIteration, lambda: next(sqil_it))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/sqil/builder_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAC config."""
import dataclasses
from typing import Any, Optional
from acme import specs
from acme.adders import reverb as adders_reverb
from acme.agents.jax import normalization
import numpy as onp
@dataclasses.dataclass
class SACConfig(normalization.InputNormalizerConfig):
"""Configuration options for SAC."""
# Loss options
batch_size: int = 256
learning_rate: float = 3e-4
reward_scale: float = 1
discount: float = 0.99
n_step: int = 1
# Coefficient applied to the entropy bonus. If None, an adaptative
# coefficient will be used.
entropy_coefficient: Optional[float] = None
target_entropy: float = 0.0
# Target smoothing coefficient.
tau: float = 0.005
# Replay options
min_replay_size: int = 10000
max_replay_size: int = 1000000
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
prefetch_size: int = 4
samples_per_insert: float = 256
# Rate to be used for the SampleToInsertRatio rate limitter tolerance.
# See a formula in make_replay_tables for more details.
samples_per_insert_tolerance_rate: float = 0.1
# How many gradient updates to perform per step.
num_sgd_steps_per_step: int = 1
input_normalization: Optional[normalization.NormalizationConfig] = None
def target_entropy_from_env_spec(
spec: specs.EnvironmentSpec,
target_entropy_per_dimension: Optional[float] = None,
) -> float:
"""A heuristic to determine a target entropy.
If target_entropy_per_dimension is not specified, the target entropy is
computed as "-num_actions", otherwise it is
"target_entropy_per_dimension * num_actions".
Args:
spec: environment spec
target_entropy_per_dimension: None or target entropy per action dimension
Returns:
target entropy
"""
def get_num_actions(action_spec: Any) -> float:
"""Returns a number of actions in the spec."""
if isinstance(action_spec, specs.BoundedArray):
return onp.prod(action_spec.shape, dtype=int)
elif isinstance(action_spec, tuple):
return sum(get_num_actions(subspace) for subspace in action_spec)
else:
raise ValueError('Unknown action space type.')
num_actions = get_num_actions(spec.actions)
if target_entropy_per_dimension is None:
if not isinstance(spec.actions, specs.BoundedArray) or isinstance(
spec.actions, specs.DiscreteArray):
raise ValueError('Only accept BoundedArrays for automatic '
f'target_entropy, got: {spec.actions}')
if not onp.all(spec.actions.minimum == -1.):
raise ValueError(
f'Minimum expected to be -1, got: {spec.actions.minimum}')
if not onp.all(spec.actions.maximum == 1.):
raise ValueError(
f'Maximum expected to be 1, got: {spec.actions.maximum}')
return -num_actions
else:
return target_entropy_per_dimension * num_actions
|
acme-master
|
acme/agents/jax/sac/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAC agent."""
from acme.agents.jax.sac.builder import SACBuilder
from acme.agents.jax.sac.config import SACConfig
from acme.agents.jax.sac.config import target_entropy_from_env_spec
from acme.agents.jax.sac.learning import SACLearner
from acme.agents.jax.sac.networks import apply_policy_and_sample
from acme.agents.jax.sac.networks import default_models_to_snapshot
from acme.agents.jax.sac.networks import make_networks
from acme.agents.jax.sac.networks import SACNetworks
|
acme-master
|
acme/agents/jax/sac/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAC Builder."""
from typing import Iterator, List, Optional
import acme
from acme import adders
from acme import core
from acme import specs
from acme.adders import reverb as adders_reverb
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax import normalization
from acme.agents.jax.sac import config as sac_config
from acme.agents.jax.sac import learning
from acme.agents.jax.sac import networks as sac_networks
from acme.datasets import reverb as datasets
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import optax
import reverb
from reverb import rate_limiters
@normalization.input_normalization_builder
class SACBuilder(builders.ActorLearnerBuilder[sac_networks.SACNetworks,
actor_core_lib.FeedForwardPolicy,
reverb.ReplaySample]):
"""SAC Builder."""
def __init__(
self,
config: sac_config.SACConfig,
):
"""Creates a SAC learner, a behavior policy and an eval actor.
Args:
config: a config with SAC hps
"""
self._config = config
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: sac_networks.SACNetworks,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec, replay_client
# Create optimizers
policy_optimizer = optax.adam(learning_rate=self._config.learning_rate)
q_optimizer = optax.adam(learning_rate=self._config.learning_rate)
return learning.SACLearner(
networks=networks,
tau=self._config.tau,
discount=self._config.discount,
entropy_coefficient=self._config.entropy_coefficient,
target_entropy=self._config.target_entropy,
rng=random_key,
reward_scale=self._config.reward_scale,
num_sgd_steps_per_step=self._config.num_sgd_steps_per_step,
policy_optimizer=policy_optimizer,
q_optimizer=q_optimizer,
iterator=dataset,
logger=logger_fn('learner'),
counter=counter)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: actor_core_lib.FeedForwardPolicy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> acme.Actor:
del environment_spec
assert variable_source is not None
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(policy)
variable_client = variable_utils.VariableClient(
variable_source, 'policy', device='cpu')
return actors.GenericActor(
actor_core, random_key, variable_client, adder, backend='cpu')
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: actor_core_lib.FeedForwardPolicy,
) -> List[reverb.Table]:
"""Create tables to insert data into."""
del policy
samples_per_insert_tolerance = (
self._config.samples_per_insert_tolerance_rate *
self._config.samples_per_insert)
error_buffer = self._config.min_replay_size * samples_per_insert_tolerance
limiter = rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._config.min_replay_size,
samples_per_insert=self._config.samples_per_insert,
error_buffer=error_buffer)
return [
reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=limiter,
signature=adders_reverb.NStepTransitionAdder.signature(
environment_spec))
]
def make_dataset_iterator(
self, replay_client: reverb.Client) -> Iterator[reverb.ReplaySample]:
"""Create a dataset iterator to use for learning/updating the agent."""
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=replay_client.server_address,
batch_size=(self._config.batch_size *
self._config.num_sgd_steps_per_step),
prefetch_size=self._config.prefetch_size)
return utils.device_put(dataset.as_numpy_iterator(), jax.devices()[0])
def make_adder(
self, replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[actor_core_lib.FeedForwardPolicy]
) -> Optional[adders.Adder]:
"""Create an adder which records data generated by the actor/environment."""
del environment_spec, policy
return adders_reverb.NStepTransitionAdder(
priority_fns={self._config.replay_table_name: None},
client=replay_client,
n_step=self._config.n_step,
discount=self._config.discount)
def make_policy(self,
networks: sac_networks.SACNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> actor_core_lib.FeedForwardPolicy:
"""Construct the policy."""
del environment_spec
return sac_networks.apply_policy_and_sample(networks, eval_mode=evaluation)
|
acme-master
|
acme/agents/jax/sac/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAC networks definition."""
import dataclasses
from typing import Optional, Tuple
from acme import core
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.jax import networks as networks_lib
from acme.jax import types
from acme.jax import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
@dataclasses.dataclass
class SACNetworks:
"""Network and pure functions for the SAC agent.."""
policy_network: networks_lib.FeedForwardNetwork
q_network: networks_lib.FeedForwardNetwork
log_prob: networks_lib.LogProbFn
sample: networks_lib.SampleFn
sample_eval: Optional[networks_lib.SampleFn] = None
def default_models_to_snapshot(
networks: SACNetworks,
spec: specs.EnvironmentSpec):
"""Defines default models to be snapshotted."""
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.zeros_like(spec.actions)
dummy_key = jax.random.PRNGKey(0)
def q_network(
source: core.VariableSource) -> types.ModelToSnapshot:
params = source.get_variables(['critic'])[0]
return types.ModelToSnapshot(
networks.q_network.apply, params,
{'obs': dummy_obs, 'action': dummy_action})
def default_training_actor(
source: core.VariableSource) -> types.ModelToSnapshot:
params = source.get_variables(['policy'])[0]
return types.ModelToSnapshot(apply_policy_and_sample(networks, False),
params,
{'key': dummy_key, 'obs': dummy_obs})
def default_eval_actor(
source: core.VariableSource) -> types.ModelToSnapshot:
params = source.get_variables(['policy'])[0]
return types.ModelToSnapshot(
apply_policy_and_sample(networks, True), params,
{'key': dummy_key, 'obs': dummy_obs})
return {
'q_network': q_network,
'default_training_actor': default_training_actor,
'default_eval_actor': default_eval_actor,
}
def apply_policy_and_sample(
networks: SACNetworks,
eval_mode: bool = False) -> actor_core_lib.FeedForwardPolicy:
"""Returns a function that computes actions."""
sample_fn = networks.sample if not eval_mode else networks.sample_eval
if not sample_fn:
raise ValueError('sample function is not provided')
def apply_and_sample(params, key, obs):
return sample_fn(networks.policy_network.apply(params, obs), key)
return apply_and_sample
def make_networks(
spec: specs.EnvironmentSpec,
hidden_layer_sizes: Tuple[int, ...] = (256, 256)) -> SACNetworks:
"""Creates networks used by the agent."""
num_dimensions = np.prod(spec.actions.shape, dtype=int)
def _actor_fn(obs):
network = hk.Sequential([
hk.nets.MLP(
list(hidden_layer_sizes),
w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
activation=jax.nn.relu,
activate_final=True),
networks_lib.NormalTanhDistribution(num_dimensions),
])
return network(obs)
def _critic_fn(obs, action):
network1 = hk.Sequential([
hk.nets.MLP(
list(hidden_layer_sizes) + [1],
w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
activation=jax.nn.relu),
])
network2 = hk.Sequential([
hk.nets.MLP(
list(hidden_layer_sizes) + [1],
w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
activation=jax.nn.relu),
])
input_ = jnp.concatenate([obs, action], axis=-1)
value1 = network1(input_)
value2 = network2(input_)
return jnp.concatenate([value1, value2], axis=-1)
policy = hk.without_apply_rng(hk.transform(_actor_fn))
critic = hk.without_apply_rng(hk.transform(_critic_fn))
# Create dummy observations and actions to create network parameters.
dummy_action = utils.zeros_like(spec.actions)
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.add_batch_dim(dummy_action)
dummy_obs = utils.add_batch_dim(dummy_obs)
return SACNetworks(
policy_network=networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_obs), policy.apply),
q_network=networks_lib.FeedForwardNetwork(
lambda key: critic.init(key, dummy_obs, dummy_action), critic.apply),
log_prob=lambda params, actions: params.log_prob(actions),
sample=lambda params, key: params.sample(seed=key),
sample_eval=lambda params, key: params.mode())
|
acme-master
|
acme/agents/jax/sac/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAC learner implementation."""
import time
from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Tuple
import acme
from acme import types
from acme.agents.jax.sac import networks as sac_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
import reverb
class TrainingState(NamedTuple):
"""Contains training state for the learner."""
policy_optimizer_state: optax.OptState
q_optimizer_state: optax.OptState
policy_params: networks_lib.Params
q_params: networks_lib.Params
target_q_params: networks_lib.Params
key: networks_lib.PRNGKey
alpha_optimizer_state: Optional[optax.OptState] = None
alpha_params: Optional[networks_lib.Params] = None
class SACLearner(acme.Learner):
"""SAC learner."""
_state: TrainingState
def __init__(
self,
networks: sac_networks.SACNetworks,
rng: jnp.ndarray,
iterator: Iterator[reverb.ReplaySample],
policy_optimizer: optax.GradientTransformation,
q_optimizer: optax.GradientTransformation,
tau: float = 0.005,
reward_scale: float = 1.0,
discount: float = 0.99,
entropy_coefficient: Optional[float] = None,
target_entropy: float = 0,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
num_sgd_steps_per_step: int = 1):
"""Initialize the SAC learner.
Args:
networks: SAC networks
rng: a key for random number generation.
iterator: an iterator over training data.
policy_optimizer: the policy optimizer.
q_optimizer: the Q-function optimizer.
tau: target smoothing coefficient.
reward_scale: reward scale.
discount: discount to use for TD updates.
entropy_coefficient: coefficient applied to the entropy bonus. If None, an
adaptative coefficient will be used.
target_entropy: Used to normalize entropy. Only used when
entropy_coefficient is None.
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
num_sgd_steps_per_step: number of sgd steps to perform per learner 'step'.
"""
adaptive_entropy_coefficient = entropy_coefficient is None
if adaptive_entropy_coefficient:
# alpha is the temperature parameter that determines the relative
# importance of the entropy term versus the reward.
log_alpha = jnp.asarray(0., dtype=jnp.float32)
alpha_optimizer = optax.adam(learning_rate=3e-4)
alpha_optimizer_state = alpha_optimizer.init(log_alpha)
else:
if target_entropy:
raise ValueError('target_entropy should not be set when '
'entropy_coefficient is provided')
def alpha_loss(log_alpha: jnp.ndarray,
policy_params: networks_lib.Params,
transitions: types.Transition,
key: networks_lib.PRNGKey) -> jnp.ndarray:
"""Eq 18 from https://arxiv.org/pdf/1812.05905.pdf."""
dist_params = networks.policy_network.apply(
policy_params, transitions.observation)
action = networks.sample(dist_params, key)
log_prob = networks.log_prob(dist_params, action)
alpha = jnp.exp(log_alpha)
alpha_loss = alpha * jax.lax.stop_gradient(-log_prob - target_entropy)
return jnp.mean(alpha_loss)
def critic_loss(q_params: networks_lib.Params,
policy_params: networks_lib.Params,
target_q_params: networks_lib.Params,
alpha: jnp.ndarray,
transitions: types.Transition,
key: networks_lib.PRNGKey) -> jnp.ndarray:
q_old_action = networks.q_network.apply(
q_params, transitions.observation, transitions.action)
next_dist_params = networks.policy_network.apply(
policy_params, transitions.next_observation)
next_action = networks.sample(next_dist_params, key)
next_log_prob = networks.log_prob(next_dist_params, next_action)
next_q = networks.q_network.apply(
target_q_params, transitions.next_observation, next_action)
next_v = jnp.min(next_q, axis=-1) - alpha * next_log_prob
target_q = jax.lax.stop_gradient(transitions.reward * reward_scale +
transitions.discount * discount * next_v)
q_error = q_old_action - jnp.expand_dims(target_q, -1)
q_loss = 0.5 * jnp.mean(jnp.square(q_error))
return q_loss
def actor_loss(policy_params: networks_lib.Params,
q_params: networks_lib.Params,
alpha: jnp.ndarray,
transitions: types.Transition,
key: networks_lib.PRNGKey) -> jnp.ndarray:
dist_params = networks.policy_network.apply(
policy_params, transitions.observation)
action = networks.sample(dist_params, key)
log_prob = networks.log_prob(dist_params, action)
q_action = networks.q_network.apply(
q_params, transitions.observation, action)
min_q = jnp.min(q_action, axis=-1)
actor_loss = alpha * log_prob - min_q
return jnp.mean(actor_loss)
alpha_grad = jax.value_and_grad(alpha_loss)
critic_grad = jax.value_and_grad(critic_loss)
actor_grad = jax.value_and_grad(actor_loss)
def update_step(
state: TrainingState,
transitions: types.Transition,
) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:
key, key_alpha, key_critic, key_actor = jax.random.split(state.key, 4)
if adaptive_entropy_coefficient:
alpha_loss, alpha_grads = alpha_grad(state.alpha_params,
state.policy_params, transitions,
key_alpha)
alpha = jnp.exp(state.alpha_params)
else:
alpha = entropy_coefficient
critic_loss, critic_grads = critic_grad(state.q_params,
state.policy_params,
state.target_q_params, alpha,
transitions, key_critic)
actor_loss, actor_grads = actor_grad(state.policy_params, state.q_params,
alpha, transitions, key_actor)
# Apply policy gradients
actor_update, policy_optimizer_state = policy_optimizer.update(
actor_grads, state.policy_optimizer_state)
policy_params = optax.apply_updates(state.policy_params, actor_update)
# Apply critic gradients
critic_update, q_optimizer_state = q_optimizer.update(
critic_grads, state.q_optimizer_state)
q_params = optax.apply_updates(state.q_params, critic_update)
new_target_q_params = jax.tree_map(lambda x, y: x * (1 - tau) + y * tau,
state.target_q_params, q_params)
metrics = {
'critic_loss': critic_loss,
'actor_loss': actor_loss,
}
new_state = TrainingState(
policy_optimizer_state=policy_optimizer_state,
q_optimizer_state=q_optimizer_state,
policy_params=policy_params,
q_params=q_params,
target_q_params=new_target_q_params,
key=key,
)
if adaptive_entropy_coefficient:
# Apply alpha gradients
alpha_update, alpha_optimizer_state = alpha_optimizer.update(
alpha_grads, state.alpha_optimizer_state)
alpha_params = optax.apply_updates(state.alpha_params, alpha_update)
metrics.update({
'alpha_loss': alpha_loss,
'alpha': jnp.exp(alpha_params),
})
new_state = new_state._replace(
alpha_optimizer_state=alpha_optimizer_state,
alpha_params=alpha_params)
metrics['rewards_mean'] = jnp.mean(
jnp.abs(jnp.mean(transitions.reward, axis=0)))
metrics['rewards_std'] = jnp.std(transitions.reward, axis=0)
return new_state, metrics
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
steps_key=self._counter.get_steps_key())
# Iterator on demonstration transitions.
self._iterator = iterator
update_step = utils.process_multiple_batches(update_step,
num_sgd_steps_per_step)
# Use the JIT compiler.
self._update_step = jax.jit(update_step)
def make_initial_state(key: networks_lib.PRNGKey) -> TrainingState:
"""Initialises the training state (parameters and optimiser state)."""
key_policy, key_q, key = jax.random.split(key, 3)
policy_params = networks.policy_network.init(key_policy)
policy_optimizer_state = policy_optimizer.init(policy_params)
q_params = networks.q_network.init(key_q)
q_optimizer_state = q_optimizer.init(q_params)
state = TrainingState(
policy_optimizer_state=policy_optimizer_state,
q_optimizer_state=q_optimizer_state,
policy_params=policy_params,
q_params=q_params,
target_q_params=q_params,
key=key)
if adaptive_entropy_coefficient:
state = state._replace(alpha_optimizer_state=alpha_optimizer_state,
alpha_params=log_alpha)
return state
# Create initial state.
self._state = make_initial_state(rng)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
def step(self):
sample = next(self._iterator)
transitions = types.Transition(*sample.data)
self._state, metrics = self._update_step(self._state, transitions)
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(steps=1, walltime=elapsed_time)
# Attempts to write the logs.
self._logger.write({**metrics, **counts})
def get_variables(self, names: List[str]) -> List[Any]:
variables = {
'policy': self._state.policy_params,
'critic': self._state.q_params,
}
return [variables[name] for name in names]
def save(self) -> TrainingState:
return self._state
def restore(self, state: TrainingState):
self._state = state
|
acme-master
|
acme/agents/jax/sac/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bc_initialization."""
from acme import specs
from acme.agents.jax import bc
from acme.agents.jax import sac
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.testing import fakes
import haiku as hk
import jax
import numpy as np
from absl.testing import absltest
def make_networks(spec: specs.EnvironmentSpec) -> bc.BCNetworks:
"""Creates networks used by the agent."""
final_layer_size = np.prod(spec.actions.shape, dtype=int)
def _actor_fn(obs, is_training=False, key=None):
# is_training and key allows to defined train/test dependant modules
# like dropout.
del is_training
del key
network = networks_lib.LayerNormMLP([64, 64, final_layer_size],
activate_final=False)
return jax.nn.tanh(network(obs))
policy = hk.without_apply_rng(hk.transform(_actor_fn))
# Create dummy observations and actions to create network parameters.
dummy_obs = utils.zeros_like(spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
policy_network = bc.BCPolicyNetwork(lambda key: policy.init(key, dummy_obs),
policy.apply)
return bc.BCNetworks(policy_network)
class BcPretrainingTest(absltest.TestCase):
def test_bc_initialization(self):
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(
episode_length=10, bounded=True, action_dim=6)
spec = specs.make_environment_spec(environment)
# Construct the agent.
nets = make_networks(spec)
loss = bc.mse()
bc.pretraining.train_with_bc(
fakes.transition_iterator(environment), nets, loss, num_steps=100)
def test_sac_to_bc_networks(self):
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(
episode_length=10, bounded=True, action_dim=6)
spec = specs.make_environment_spec(environment)
sac_nets = sac.make_networks(spec, hidden_layer_sizes=(4, 4))
bc_nets = bc.convert_to_bc_network(sac_nets.policy_network)
rng = jax.random.PRNGKey(0)
dummy_obs = utils.zeros_like(spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
sac_params = sac_nets.policy_network.init(rng)
sac_output = sac_nets.policy_network.apply(sac_params, dummy_obs)
bc_params = bc_nets.init(rng)
bc_output = bc_nets.apply(bc_params, dummy_obs, is_training=False, key=None)
np.testing.assert_array_equal(sac_output.mode(), bc_output.mode())
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/bc/pretraining_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to train a policy network with BC."""
from typing import Callable, Iterator
from acme import types
from acme.agents.jax.bc import learning
from acme.agents.jax.bc import losses
from acme.agents.jax.bc import networks as bc_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
import jax
import optax
def train_with_bc(make_demonstrations: Callable[[int],
Iterator[types.Transition]],
networks: bc_networks.BCNetworks,
loss: losses.BCLoss,
num_steps: int = 100000) -> networks_lib.Params:
"""Trains the given network with BC and returns the params.
Args:
make_demonstrations: A function (batch_size) -> iterator with demonstrations
to be imitated.
networks: Network taking (params, obs, is_training, key) as input
loss: BC loss to use.
num_steps: number of training steps
Returns:
The trained network params.
"""
demonstration_iterator = make_demonstrations(256)
prefetching_iterator = utils.sharded_prefetch(
demonstration_iterator,
buffer_size=2,
num_threads=jax.local_device_count())
learner = learning.BCLearner(
networks=networks,
random_key=jax.random.PRNGKey(0),
loss_fn=loss,
prefetching_iterator=prefetching_iterator,
optimizer=optax.adam(1e-4),
num_sgd_steps_per_step=1)
# Train the agent
for _ in range(num_steps):
learner.step()
return learner.get_variables(['policy'])[0]
|
acme-master
|
acme/agents/jax/bc/pretraining.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config classes for BC."""
import dataclasses
@dataclasses.dataclass
class BCConfig:
"""Configuration options for BC.
Attributes:
learning_rate: Learning rate.
num_sgd_steps_per_step: How many gradient updates to perform per step.
"""
learning_rate: float = 1e-4
num_sgd_steps_per_step: int = 1
|
acme-master
|
acme/agents/jax/bc/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a behavior cloning (BC) agent."""
from acme.agents.jax.bc import pretraining
from acme.agents.jax.bc.builder import BCBuilder
from acme.agents.jax.bc.config import BCConfig
from acme.agents.jax.bc.learning import BCLearner
from acme.agents.jax.bc.losses import BCLoss
from acme.agents.jax.bc.losses import logp
from acme.agents.jax.bc.losses import mse
from acme.agents.jax.bc.losses import peerbc
from acme.agents.jax.bc.losses import rcal
from acme.agents.jax.bc.networks import BCNetworks
from acme.agents.jax.bc.networks import BCPolicyNetwork
from acme.agents.jax.bc.networks import convert_policy_value_to_bc_network
from acme.agents.jax.bc.networks import convert_to_bc_network
|
acme-master
|
acme/agents/jax/bc/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BC Builder."""
from typing import Iterator, Optional
from acme import core
from acme import specs
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.bc import config as bc_config
from acme.agents.jax.bc import learning
from acme.agents.jax.bc import losses
from acme.agents.jax.bc import networks as bc_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import optax
class BCBuilder(builders.OfflineBuilder[bc_networks.BCNetworks,
actor_core_lib.FeedForwardPolicy,
types.Transition]):
"""BC Builder."""
def __init__(
self,
config: bc_config.BCConfig,
loss_fn: losses.BCLoss,
loss_has_aux: bool = False,
):
"""Creates a BC learner, an evaluation policy and an eval actor.
Args:
config: a config with BC hps.
loss_fn: BC loss to use.
loss_has_aux: Whether the loss function returns auxiliary metrics as a
second argument.
"""
self._config = config
self._loss_fn = loss_fn
self._loss_has_aux = loss_has_aux
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: bc_networks.BCNetworks,
dataset: Iterator[types.Transition],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
*,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec
return learning.BCLearner(
networks=networks,
random_key=random_key,
loss_fn=self._loss_fn,
optimizer=optax.adam(learning_rate=self._config.learning_rate),
prefetching_iterator=utils.sharded_prefetch(dataset),
num_sgd_steps_per_step=self._config.num_sgd_steps_per_step,
loss_has_aux=self._loss_has_aux,
logger=logger_fn('learner'),
counter=counter)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: actor_core_lib.FeedForwardPolicy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
) -> core.Actor:
del environment_spec
assert variable_source is not None
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(policy)
variable_client = variable_utils.VariableClient(
variable_source, 'policy', device='cpu')
return actors.GenericActor(
actor_core, random_key, variable_client, backend='cpu')
def make_policy(self,
networks: bc_networks.BCNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> actor_core_lib.FeedForwardPolicy:
"""Construct the policy."""
del environment_spec, evaluation
def evaluation_policy(
params: networks_lib.Params, key: networks_lib.PRNGKey,
observation: networks_lib.Observation) -> networks_lib.Action:
apply_key, sample_key = jax.random.split(key)
network_output = networks.policy_network.apply(
params, observation, is_training=False, key=apply_key)
return networks.sample_fn(network_output, sample_key)
return evaluation_policy
|
acme-master
|
acme/agents/jax/bc/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network definitions for BC."""
import dataclasses
from typing import Callable, Optional
from acme.jax import networks as networks_lib
from acme.jax import types
from typing_extensions import Protocol
class ApplyFn(Protocol):
def __call__(self,
params: networks_lib.Params,
observation: networks_lib.Observation,
*args,
is_training: bool,
key: Optional[types.PRNGKey] = None,
**kwargs) -> networks_lib.NetworkOutput:
...
@dataclasses.dataclass
class BCPolicyNetwork:
"""Holds a pair of pure functions defining a policy network for BC.
This is a feed-forward network taking params, obs, is_training, key as input.
Attributes:
init: A pure function. Initializes and returns the networks parameters.
apply: A pure function. Computes and returns the outputs of a forward pass.
"""
init: Callable[[types.PRNGKey], networks_lib.Params]
apply: ApplyFn
def identity_sample(output: networks_lib.NetworkOutput,
key: types.PRNGKey) -> networks_lib.Action:
"""Placeholder sampling function for non-distributional networks."""
del key
return output
@dataclasses.dataclass
class BCNetworks:
"""The network and pure functions for the BC agent.
Attributes:
policy_network: The policy network.
sample_fn: A pure function. Samples an action based on the network output.
Must be set for distributional networks. Otherwise identity.
log_prob: A pure function. Computes log-probability for an action.
Must be set for distributional networks. Otherwise None.
"""
policy_network: BCPolicyNetwork
sample_fn: networks_lib.SampleFn = identity_sample
log_prob: Optional[networks_lib.LogProbFn] = None
def convert_to_bc_network(
policy_network: networks_lib.FeedForwardNetwork) -> BCPolicyNetwork:
"""Converts a policy network from SAC/TD3/D4PG/.. into a BC policy network.
Args:
policy_network: FeedForwardNetwork taking the observation as input and
returning action representation compatible with one of the BC losses.
Returns:
The BC policy network taking observation, is_training, key as input.
"""
def apply(params: networks_lib.Params,
observation: networks_lib.Observation,
*args,
is_training: bool = False,
key: Optional[types.PRNGKey] = None,
**kwargs) -> networks_lib.NetworkOutput:
del is_training, key
return policy_network.apply(params, observation, *args, **kwargs)
return BCPolicyNetwork(policy_network.init, apply)
def convert_policy_value_to_bc_network(
policy_value_network: networks_lib.FeedForwardNetwork) -> BCPolicyNetwork:
"""Converts a policy-value network (e.g. from PPO) into a BC policy network.
Args:
policy_value_network: FeedForwardNetwork taking the observation as input.
Returns:
The BC policy network taking observation, is_training, key as input.
"""
def apply(params: networks_lib.Params,
observation: networks_lib.Observation,
*args,
is_training: bool = False,
key: Optional[types.PRNGKey] = None,
**kwargs) -> networks_lib.NetworkOutput:
del is_training, key
actions, _ = policy_value_network.apply(params, observation, *args,
**kwargs)
return actions
return BCPolicyNetwork(policy_value_network.init, apply)
|
acme-master
|
acme/agents/jax/bc/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the BC agent."""
from acme import specs
from acme import types
from acme.agents.jax import bc
from acme.jax import networks as networks_lib
from acme.jax import types as jax_types
from acme.jax import utils
from acme.testing import fakes
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax.scipy import special
import numpy as np
import optax
import rlax
from absl.testing import absltest
from absl.testing import parameterized
def make_networks(spec: specs.EnvironmentSpec,
discrete_actions: bool = False) -> bc.BCNetworks:
"""Creates networks used by the agent."""
if discrete_actions:
final_layer_size = spec.actions.num_values
else:
final_layer_size = np.prod(spec.actions.shape, dtype=int)
def _actor_fn(obs, is_training=False, key=None):
# is_training and key allows to defined train/test dependant modules
# like dropout.
del is_training
del key
if discrete_actions:
network = hk.nets.MLP([64, 64, final_layer_size])
else:
network = hk.Sequential([
networks_lib.LayerNormMLP([64, 64], activate_final=True),
networks_lib.NormalTanhDistribution(final_layer_size),
])
return network(obs)
policy = hk.without_apply_rng(hk.transform(_actor_fn))
# Create dummy observations and actions to create network parameters.
dummy_obs = utils.zeros_like(spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
policy_network = networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_obs), policy.apply)
bc_policy_network = bc.convert_to_bc_network(policy_network)
if discrete_actions:
def sample_fn(logits: networks_lib.NetworkOutput,
key: jax_types.PRNGKey) -> networks_lib.Action:
return rlax.epsilon_greedy(epsilon=0.0).sample(key, logits)
def log_prob(logits: networks_lib.NetworkOutput,
actions: networks_lib.Action) -> networks_lib.LogProb:
max_logits = jnp.max(logits, axis=-1, keepdims=True)
logits = logits - max_logits
logits_actions = jnp.sum(
jax.nn.one_hot(actions, spec.actions.num_values) * logits, axis=-1)
log_prob = logits_actions - special.logsumexp(logits, axis=-1)
return log_prob
else:
def sample_fn(distribution: networks_lib.NetworkOutput,
key: jax_types.PRNGKey) -> networks_lib.Action:
return distribution.sample(seed=key)
def log_prob(distribuition: networks_lib.NetworkOutput,
actions: networks_lib.Action) -> networks_lib.LogProb:
return distribuition.log_prob(actions)
return bc.BCNetworks(bc_policy_network, sample_fn, log_prob)
class BCTest(parameterized.TestCase):
@parameterized.parameters(
('logp',),
('mse',),
('peerbc',)
)
def test_continuous_actions(self, loss_name):
with chex.fake_pmap_and_jit():
num_sgd_steps_per_step = 1
num_steps = 5
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(
episode_length=10, bounded=True, action_dim=6)
spec = specs.make_environment_spec(environment)
dataset_demonstration = fakes.transition_dataset(environment)
dataset_demonstration = dataset_demonstration.map(
lambda sample: types.Transition(*sample.data))
dataset_demonstration = dataset_demonstration.batch(8).as_numpy_iterator()
# Construct the agent.
networks = make_networks(spec)
if loss_name == 'logp':
loss_fn = bc.logp()
elif loss_name == 'mse':
loss_fn = bc.mse()
elif loss_name == 'peerbc':
loss_fn = bc.peerbc(bc.logp(), zeta=0.1)
else:
raise ValueError
learner = bc.BCLearner(
networks=networks,
random_key=jax.random.PRNGKey(0),
loss_fn=loss_fn,
optimizer=optax.adam(0.01),
prefetching_iterator=utils.sharded_prefetch(dataset_demonstration),
num_sgd_steps_per_step=num_sgd_steps_per_step)
# Train the agent
for _ in range(num_steps):
learner.step()
@parameterized.parameters(
('logp',),
('rcal',))
def test_discrete_actions(self, loss_name):
with chex.fake_pmap_and_jit():
num_sgd_steps_per_step = 1
num_steps = 5
# Create a fake environment to test with.
environment = fakes.DiscreteEnvironment(
num_actions=10, num_observations=100, obs_shape=(10,),
obs_dtype=np.float32)
spec = specs.make_environment_spec(environment)
dataset_demonstration = fakes.transition_dataset(environment)
dataset_demonstration = dataset_demonstration.map(
lambda sample: types.Transition(*sample.data))
dataset_demonstration = dataset_demonstration.batch(8).as_numpy_iterator()
# Construct the agent.
networks = make_networks(spec, discrete_actions=True)
if loss_name == 'logp':
loss_fn = bc.logp()
elif loss_name == 'rcal':
base_loss_fn = bc.logp()
loss_fn = bc.rcal(base_loss_fn, discount=0.99, alpha=0.1)
else:
raise ValueError
learner = bc.BCLearner(
networks=networks,
random_key=jax.random.PRNGKey(0),
loss_fn=loss_fn,
optimizer=optax.adam(0.01),
prefetching_iterator=utils.sharded_prefetch(dataset_demonstration),
num_sgd_steps_per_step=num_sgd_steps_per_step)
# Train the agent
for _ in range(num_steps):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/bc/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Offline losses used in variants of BC."""
from typing import Callable, Optional, Tuple, Union
from acme import types
from acme.agents.jax.bc import networks as bc_networks
from acme.jax import networks as networks_lib
from acme.jax import types as jax_types
from acme.utils import loggers
import jax
import jax.numpy as jnp
loss_args = [
bc_networks.BCNetworks, networks_lib.Params, networks_lib.PRNGKey,
types.Transition
]
BCLossWithoutAux = Callable[loss_args, jnp.ndarray]
BCLossWithAux = Callable[loss_args, Tuple[jnp.ndarray, loggers.LoggingData]]
BCLoss = Union[BCLossWithoutAux, BCLossWithAux]
def mse() -> BCLossWithoutAux:
"""Mean Squared Error loss."""
def loss(networks: bc_networks.BCNetworks, params: networks_lib.Params,
key: jax_types.PRNGKey,
transitions: types.Transition) -> jnp.ndarray:
key, key_dropout = jax.random.split(key)
dist_params = networks.policy_network.apply(
params, transitions.observation, is_training=True, key=key_dropout)
action = networks.sample_fn(dist_params, key)
return jnp.mean(jnp.square(action - transitions.action))
return loss
def logp() -> BCLossWithoutAux:
"""Log probability loss."""
def loss(networks: bc_networks.BCNetworks, params: networks_lib.Params,
key: jax_types.PRNGKey,
transitions: types.Transition) -> jnp.ndarray:
logits = networks.policy_network.apply(
params, transitions.observation, is_training=True, key=key)
logp_action = networks.log_prob(logits, transitions.action)
return -jnp.mean(logp_action)
return loss
def peerbc(base_loss_fn: BCLossWithoutAux, zeta: float) -> BCLossWithoutAux:
"""Peer-BC loss from https://arxiv.org/pdf/2010.01748.pdf.
Args:
base_loss_fn: the base loss to add RCAL on top of.
zeta: the weight of the regularization.
Returns:
The loss.
"""
def loss(networks: bc_networks.BCNetworks, params: networks_lib.Params,
key: jax_types.PRNGKey,
transitions: types.Transition) -> jnp.ndarray:
key_perm, key_bc_loss, key_permuted_loss = jax.random.split(key, 3)
permutation_keys = jax.random.split(key_perm, transitions.action.shape[0])
permuted_actions = jax.vmap(
jax.random.permutation, in_axes=(0, 0))(permutation_keys,
transitions.action)
permuted_transition = transitions._replace(action=permuted_actions)
bc_loss = base_loss_fn(networks, params, key_bc_loss, transitions)
permuted_loss = base_loss_fn(networks, params, key_permuted_loss,
permuted_transition)
return bc_loss - zeta * permuted_loss
return loss
def rcal(base_loss_fn: BCLossWithoutAux,
discount: float,
alpha: float,
num_bins: Optional[int] = None) -> BCLossWithoutAux:
"""https://www.cristal.univ-lille.fr/~pietquin/pdf/AAMAS_2014_BPMGOP.pdf.
Args:
base_loss_fn: the base loss to add RCAL on top of.
discount: the gamma discount used in RCAL.
alpha: the regularization parameter.
num_bins: how many bins were used for discretization. If None the
environment was originally discrete already.
Returns:
The loss function.
"""
def loss(networks: bc_networks.BCNetworks, params: networks_lib.Params,
key: jax_types.PRNGKey,
transitions: types.Transition) -> jnp.ndarray:
def logits_fn(key: jax_types.PRNGKey,
observations: networks_lib.Observation,
actions: Optional[networks_lib.Action] = None):
logits = networks.policy_network.apply(
params, observations, key=key, is_training=True)
if num_bins:
logits = jnp.reshape(logits, list(logits.shape[:-1]) + [-1, num_bins])
if actions is None:
actions = jnp.argmax(logits, axis=-1)
logits_actions = jnp.sum(
jax.nn.one_hot(actions, logits.shape[-1]) * logits, axis=-1)
return logits_actions
key, key1, key2 = jax.random.split(key, 3)
logits_a_tm1 = logits_fn(key1, transitions.observation, transitions.action)
logits_a_t = logits_fn(key2, transitions.next_observation)
# RCAL, by making a parallel between the logits of BC and Q-values,
# defines a regularization loss that encourages the implicit reward
# (inferred by inversing the Bellman Equation) to be sparse.
# NOTE: In case of discretized envs jnp.mean goes over batch and num_bins
# dimensions.
regularization_loss = jnp.mean(
jnp.abs(logits_a_tm1 - discount * logits_a_t)
)
loss = base_loss_fn(networks, params, key, transitions)
return loss + alpha * regularization_loss
return loss
|
acme-master
|
acme/agents/jax/bc/losses.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BC learner implementation."""
import time
from typing import Dict, List, NamedTuple, Optional, Tuple, Union, Iterator
import acme
from acme import types
from acme.agents.jax.bc import losses
from acme.agents.jax.bc import networks as bc_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
_PMAP_AXIS_NAME = 'data'
class TrainingState(NamedTuple):
"""Contains training state for the learner."""
optimizer_state: optax.OptState
policy_params: networks_lib.Params
key: networks_lib.PRNGKey
steps: int
def _create_loss_metrics(
loss_has_aux: bool,
loss_result: Union[jnp.ndarray, Tuple[jnp.ndarray, loggers.LoggingData]],
gradients: jnp.ndarray,
):
"""Creates loss metrics for logging."""
# Validate input.
if loss_has_aux and not (len(loss_result) == 2 and isinstance(
loss_result[0], jnp.ndarray) and isinstance(loss_result[1], dict)):
raise ValueError('Could not parse loss value and metrics from loss_fn\'s '
'output. Since loss_has_aux is enabled, loss_fn must '
'return loss_value and auxiliary metrics.')
if not loss_has_aux and not isinstance(loss_result, jnp.ndarray):
raise ValueError(f'Loss returns type {loss_result}. However, it should '
'return a jnp.ndarray, given that loss_has_aux = False.')
# Maybe unpack loss result.
if loss_has_aux:
loss, metrics = loss_result
else:
loss = loss_result
metrics = {}
# Complete metrics dict and return it.
metrics['loss'] = loss
metrics['gradient_norm'] = optax.global_norm(gradients)
return metrics
class BCLearner(acme.Learner):
"""BC learner.
This is the learning component of a BC agent. It takes a Transitions iterator
as input and implements update functionality to learn from this iterator.
"""
_state: TrainingState
def __init__(self,
networks: bc_networks.BCNetworks,
random_key: networks_lib.PRNGKey,
loss_fn: losses.BCLoss,
optimizer: optax.GradientTransformation,
prefetching_iterator: Iterator[types.Transition],
num_sgd_steps_per_step: int,
loss_has_aux: bool = False,
logger: Optional[loggers.Logger] = None,
counter: Optional[counting.Counter] = None):
"""Behavior Cloning Learner.
Args:
networks: BC networks
random_key: RNG key.
loss_fn: BC loss to use.
optimizer: Optax optimizer.
prefetching_iterator: A sharded prefetching iterator as outputted from
`acme.jax.utils.sharded_prefetch`. Please see the documentation for
`sharded_prefetch` for more details.
num_sgd_steps_per_step: Number of gradient updates per step.
loss_has_aux: Whether the loss function returns auxiliary metrics as a
second argument.
logger: Logger.
counter: Counter.
"""
def sgd_step(
state: TrainingState,
transitions: types.Transition,
) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:
loss_and_grad = jax.value_and_grad(
loss_fn, argnums=1, has_aux=loss_has_aux)
# Compute losses and their gradients.
key, key_input = jax.random.split(state.key)
loss_result, gradients = loss_and_grad(networks, state.policy_params,
key_input, transitions)
# Combine the gradient across all devices (by taking their mean).
gradients = jax.lax.pmean(gradients, axis_name=_PMAP_AXIS_NAME)
# Compute and combine metrics across all devices.
metrics = _create_loss_metrics(loss_has_aux, loss_result, gradients)
metrics = jax.lax.pmean(metrics, axis_name=_PMAP_AXIS_NAME)
policy_update, optimizer_state = optimizer.update(gradients,
state.optimizer_state,
state.policy_params)
policy_params = optax.apply_updates(state.policy_params, policy_update)
new_state = TrainingState(
optimizer_state=optimizer_state,
policy_params=policy_params,
key=key,
steps=state.steps + 1,
)
return new_state, metrics
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter(prefix='learner')
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
steps_key=self._counter.get_steps_key())
# Split the input batch to `num_sgd_steps_per_step` minibatches in order
# to achieve better performance on accelerators.
sgd_step = utils.process_multiple_batches(sgd_step, num_sgd_steps_per_step)
self._sgd_step = jax.pmap(sgd_step, axis_name=_PMAP_AXIS_NAME)
random_key, init_key = jax.random.split(random_key)
policy_params = networks.policy_network.init(init_key)
optimizer_state = optimizer.init(policy_params)
# Create initial state.
state = TrainingState(
optimizer_state=optimizer_state,
policy_params=policy_params,
key=random_key,
steps=0,
)
self._state = utils.replicate_in_all_devices(state)
self._timestamp = None
self._prefetching_iterator = prefetching_iterator
def step(self):
# Get a batch of Transitions.
transitions = next(self._prefetching_iterator)
self._state, metrics = self._sgd_step(self._state, transitions)
metrics = utils.get_from_first_device(metrics)
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(steps=1, walltime=elapsed_time)
# Attempts to write the logs.
self._logger.write({**metrics, **counts})
def get_variables(self, names: List[str]) -> List[networks_lib.Params]:
variables = {
'policy': utils.get_from_first_device(self._state.policy_params),
}
return [variables[name] for name in names]
def save(self) -> TrainingState:
# Serialize only the first replica of parameters and optimizer state.
return jax.tree_map(utils.get_from_first_device, self._state)
def restore(self, state: TrainingState):
self._state = utils.replicate_in_all_devices(state)
|
acme-master
|
acme/agents/jax/bc/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the available MPO configuration options."""
import dataclasses
from typing import Callable, Optional, Union
from acme import types
from acme.agents.jax.mpo import types as mpo_types
import numpy as np
import rlax
@dataclasses.dataclass
class MPOConfig:
"""MPO agent configuration."""
batch_size: int = 256 # Total batch size across all learner devices.
discount: float = 0.99
discrete_policy: bool = False
# Specification of the type of experience the learner will consume.
experience_type: mpo_types.ExperienceType = dataclasses.field(
default_factory=lambda: mpo_types.FromTransitions(n_step=5)
)
num_stacked_observations: int = 1
# Optional data-augmentation transformation for observations.
observation_transform: Optional[Callable[[types.NestedTensor],
types.NestedTensor]] = None
# Specification of replay, e.g., min/max size, pure or mixed.
# NOTE: When replay_fraction = 1.0, this reverts to pure replay and the online
# queue is not created.
replay_fraction: float = 1.0 # Fraction of replay data (vs online) per batch.
samples_per_insert: Optional[float] = 32.0
min_replay_size: int = 1_000
max_replay_size: int = 1_000_000
online_queue_capacity: int = 0 # If not set, will use 4 * online_batch_size.
# Critic training configuration.
critic_type: mpo_types.CriticType = mpo_types.CriticType.MIXTURE_OF_GAUSSIANS
value_tx_pair: rlax.TxPair = rlax.IDENTITY_PAIR
use_retrace: bool = False
retrace_lambda: float = 0.95
reward_clip: float = np.float32('inf') # pytype: disable=annotation-type-mismatch # numpy-scalars
use_online_policy_to_bootstrap: bool = False
use_stale_state: bool = False
# Policy training configuration.
num_samples: int = 20 # Number of MPO action samples.
policy_loss_config: Optional[mpo_types.PolicyLossConfig] = None
policy_eval_stochastic: bool = True
policy_eval_num_val_samples: int = 128
# Optimizer configuration.
learning_rate: Union[float, Callable[[int], float]] = 1e-4
dual_learning_rate: Union[float, Callable[[int], float]] = 1e-2
grad_norm_clip: float = 40.
adam_b1: float = 0.9
adam_b2: float = 0.999
weight_decay: float = 0.0
use_cosine_lr_decay: bool = False
cosine_lr_decay_warmup_steps: int = 3000
# Set the target update period or rate depending on whether you want a
# periodic or incremental (exponential weighted average) target update.
# Exactly one must be specified (not None).
target_update_period: Optional[int] = 100
target_update_rate: Optional[float] = None
variable_update_period: int = 1000
# Configuring the mixture of policy and critic losses.
policy_loss_scale: float = 1.0
critic_loss_scale: float = 1.0
# Optional roll-out loss configuration (off by default).
model_rollout_length: int = 0
rollout_policy_loss_scale: float = 1.0
rollout_bc_policy_loss_scale: float = 1.0
rollout_critic_loss_scale: float = 1.0
rollout_reward_loss_scale: float = 1.0
jit_learner: bool = True
def __post_init__(self):
if ((self.target_update_period and self.target_update_rate) or
(self.target_update_period is None and
self.target_update_rate is None)):
raise ValueError(
'Exactly one of target_update_{period|rate} must be set.'
f' Received target_update_period={self.target_update_period} and'
f' target_update_rate={self.target_update_rate}.')
online_batch_size = int(self.batch_size * (1. - self.replay_fraction))
if not self.online_queue_capacity:
# Note: larger capacities mean the online data is more "stale". This seems
# a reasonable default for now.
self.online_queue_capacity = int(4 * online_batch_size)
self.online_queue_capacity = max(self.online_queue_capacity,
online_batch_size + 1)
if self.samples_per_insert is not None and self.replay_fraction < 1:
raise ValueError(
'Cannot set samples_per_insert when using a mixed replay (i.e when '
'0 < replay_fraction < 1). Received:\n'
f'\tsamples_per_insert={self.samples_per_insert} and\n'
f'\treplay_fraction={self.replay_fraction}.')
if (0 < self.replay_fraction < 1 and
self.min_replay_size > self.online_queue_capacity):
raise ValueError('When mixing replay with an online queue, min replay '
'size must not be larger than the queue capacity.')
if (isinstance(self.experience_type, mpo_types.FromTransitions) and
self.num_stacked_observations > 1):
raise ValueError(
'Agent-side frame-stacking is currently only supported when learning '
'from sequences. Consider environment-side frame-stacking instead.')
if self.critic_type == mpo_types.CriticType.CATEGORICAL:
if self.model_rollout_length > 0:
raise ValueError(
'Model rollouts are not supported for the Categorical critic')
if not isinstance(self.experience_type, mpo_types.FromTransitions):
raise ValueError(
'Categorical critic only supports experience_type=FromTransitions')
if self.use_retrace:
raise ValueError('retrace is not supported for the Categorical critic')
if self.model_rollout_length > 0 and not self.discrete_policy:
if (self.rollout_policy_loss_scale or self.rollout_bc_policy_loss_scale):
raise ValueError('Policy rollout losses are only supported in the '
'discrete policy case.')
def _compute_spi_from_replay_fraction(replay_fraction: float) -> float:
"""Computes an estimated samples_per_insert from a replay_fraction.
Assumes actors simultaneously add to both the queue and replay in a mixed
replay setup. Since the online queue sets samples_per_insert = 1, then the
total SPI can be calculated as:
SPI = B / O = O / (1 - f) / O = 1 / (1 - f).
Key:
B: total batch size
O: online batch size
f: replay fraction.
Args:
replay_fraction: fraction of a batch size taken from replay (as opposed to
the queue of online experience) in a mixed replay setting.
Returns:
An estimate of the samples_per_insert value to produce comparable runs in
the pure replay setting.
"""
return 1 / (1 - replay_fraction)
def _compute_num_inserts_per_actor_step(samples_per_insert: float,
batch_size: int,
sequence_period: int = 1) -> float:
"""Estimate the number inserts per actor steps."""
return sequence_period * batch_size / samples_per_insert
|
acme-master
|
acme/agents/jax/mpo/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MPO learner implementation. With MoG/not and continuous/discrete policies."""
from typing import Tuple
from acme import types
from acme.adders import reverb as adders
from acme.agents.jax.mpo import categorical_mpo as discrete_losses
from acme.agents.jax.mpo import networks as mpo_networks
from acme.agents.jax.mpo import types as mpo_types
from acme.agents.jax.mpo import utils as mpo_utils
from acme.jax import networks as network_lib
import chex
import jax
import jax.numpy as jnp
import rlax
def softmax_cross_entropy(
logits: chex.Array, target_probs: chex.Array) -> chex.Array:
"""Compute cross entropy loss between logits and target probabilities."""
chex.assert_equal_shape([target_probs, logits])
return -jnp.sum(target_probs * jax.nn.log_softmax(logits), axis=-1)
def top1_accuracy_tiebreak(
logits: chex.Array,
targets: chex.Array,
*,
rng: chex.PRNGKey,
eps: float = 1e-6) -> chex.Array:
"""Compute the top-1 accuracy with an argmax of targets (random tie-break)."""
noise = jax.random.uniform(rng, shape=targets.shape,
minval=-eps, maxval=eps)
acc = jnp.argmax(logits, axis=-1) == jnp.argmax(targets + noise, axis=-1)
return jnp.mean(acc)
class RolloutLoss:
"""A MuZero/Muesli-style loss on the rollouts of the dynamics model."""
def __init__(
self,
dynamics_model: mpo_networks.UnrollableNetwork,
model_rollout_length: int,
loss_scales: mpo_types.LossScalesConfig,
distributional_loss_fn: mpo_types.DistributionalLossFn,
):
self._dynamics_model = dynamics_model
self._model_rollout_length = model_rollout_length
self._loss_scales = loss_scales
self._distributional_loss_fn = distributional_loss_fn
def _rolling_window(self, x: chex.Array, axis: int = 0) -> chex.Array:
"""A convenient tree-mapped and configured call to rolling window.
Stacks R = T - K + 1 action slices of length K = model_rollout_length from
tensor x: [..., 0:K; ...; T-K:T, ...].
Args:
x: The tensor to select rolling slices from (along specified axis), with
shape [..., T, ...] such that T = x.shape[axis].
axis: The axis to slice from (defaults to 0).
Returns:
A tensor containing the stacked slices [0:K, ... T-K:T] from an axis of x
with shape [..., K, R, ...] for input shape [..., T, ...].
"""
def rw(y):
return mpo_utils.rolling_window(
y, window=self._model_rollout_length, axis=axis, time_major=True)
return mpo_utils.tree_map_distribution(rw, x)
def _compute_model_rollout_predictions(
self, params: mpo_networks.MPONetworkParams,
state_embeddings: types.NestedArray,
action_sequence: types.NestedArray) -> mpo_types.ModelOutputs:
"""Roll out the dynamics model for each embedding state."""
assert self._model_rollout_length > 0
# Stack the R=T-K+1 action slices of length K: [0:K; ...; T-K:T]; [K, R].
rollout_actions = self._rolling_window(action_sequence)
# Create batch of root states (embeddings) s_t for t \in {0, ..., R}.
num_rollouts = action_sequence.shape[0] - self._model_rollout_length + 1
root_state = self._dynamics_model.initial_state_fn(
params.dynamics_model_initial_state, state_embeddings[:num_rollouts])
# TODO(abef): randomly choose (fewer?) root unroll states, as in Muesli?
# Roll out K steps forward in time for each root embedding; [K, R, ...].
# For example, policy_rollout[k, t] is the step-k prediction starting from
# state s_t (and same for value_rollout and reward_rollout). Thus, for
# valid values of k, t, and i, policy_rollout[k, t] and
# policy_rollout[k-i, t+i] share the same target.
(policy_rollout, value_rollout, reward_rollout,
embedding_rollout), _ = self._dynamics_model.unroll(
params.dynamics_model, rollout_actions, root_state)
# TODO(abef): try using the same params for both the root & rollout heads.
chex.assert_shape([rollout_actions, embedding_rollout],
(self._model_rollout_length, num_rollouts, ...))
# Create the outputs but drop the rollout that uses action a_{T-1} (and
# thus contains state s_T) for the policy, value, and embedding because we
# don't have targets for s_T (but we do know them for the final reward).
# Also drop the rollout with s_{T-1} for the value because we don't have
# targets for that either.
return mpo_types.ModelOutputs(
policy=policy_rollout[:, :-1], # [K, R-1, ...]
value=value_rollout[:, :-2], # [K, R-2, ...]
reward=reward_rollout, # [K, R, ...]
embedding=embedding_rollout[:, :-1]) # [K, R-1, ...]
def __call__(
self,
params: mpo_networks.MPONetworkParams,
dual_params: mpo_types.DualParams,
sequence: adders.Step,
state_embeddings: types.NestedArray,
targets: mpo_types.LossTargets,
key: network_lib.PRNGKey,
) -> Tuple[jnp.ndarray, mpo_types.LogDict]:
num_rollouts = sequence.reward.shape[0] - self._model_rollout_length + 1
indices = jnp.arange(num_rollouts)
# Create rollout predictions.
rollout = self._compute_model_rollout_predictions(
params=params, state_embeddings=state_embeddings,
action_sequence=sequence.action)
# Create rollout target tensors. The rollouts will not contain the policy
# and value at t=0 because they start after taking the first action in
# the sequence, so drop those when creating the targets. They will contain
# the reward at t=0, however, because of how the sequences are stored.
# Rollout target shapes:
# - value: [N, Z, T-2] -> [N, Z, K, R-2],
# - reward: [T] -> [K, R].
value_targets = self._rolling_window(targets.value[..., 1:], axis=-1)
reward_targets = self._rolling_window(targets.reward)[None, None, ...]
# Define the value and reward rollout loss functions.
def value_loss_fn(root_idx) -> jnp.ndarray:
return self._distributional_loss_fn(
rollout.value[:, root_idx], # [K, R-2, ...]
value_targets[..., root_idx]) # [..., K, R-2]
def reward_loss_fn(root_idx) -> jnp.ndarray:
return self._distributional_loss_fn(
rollout.reward[:, root_idx], # [K, R, ...]
reward_targets[..., root_idx]) # [..., K, R]
# Reward and value losses.
critic_loss = jnp.mean(jax.vmap(value_loss_fn)(indices[:-2]))
reward_loss = jnp.mean(jax.vmap(reward_loss_fn)(indices))
# Define the MPO policy rollout loss.
mpo_policy_loss = 0
if self._loss_scales.rollout.policy:
# Rollout target shapes:
# - policy: [T-1, ...] -> [K, R-1, ...],
# - q_improvement: [N, T-1] -> [N, K, R-1].
policy_targets = self._rolling_window(targets.policy[1:])
q_improvement = self._rolling_window(targets.q_improvement[:, 1:], axis=1)
def policy_loss_fn(root_idx) -> jnp.ndarray:
chex.assert_shape((rollout.policy.logits, policy_targets.logits), # pytype: disable=attribute-error # numpy-scalars
(self._model_rollout_length, num_rollouts - 1, None))
chex.assert_shape(q_improvement,
(None, self._model_rollout_length, num_rollouts - 1))
# Compute MPO's E-step unnormalized logits.
temperature = discrete_losses.get_temperature_from_params(dual_params)
policy_target_probs = jax.nn.softmax(
jnp.transpose(q_improvement[..., root_idx]) / temperature +
jax.nn.log_softmax(policy_targets[:, root_idx].logits, axis=-1)) # pytype: disable=attribute-error # numpy-scalars
return softmax_cross_entropy(rollout.policy[:, root_idx].logits, # pytype: disable=bad-return-type # numpy-scalars
jax.lax.stop_gradient(policy_target_probs))
# Compute the MPO loss and add it to the overall rollout policy loss.
mpo_policy_loss = jax.vmap(policy_loss_fn)(indices[:-1])
mpo_policy_loss = jnp.mean(mpo_policy_loss)
# Define the BC policy rollout loss (only supported for discrete policies).
bc_policy_loss, bc_policy_acc = 0, 0
if self._loss_scales.rollout.bc_policy:
num_actions = rollout.policy.logits.shape[-1] # A
bc_targets = self._rolling_window( # [T-1, A] -> [K, R-1, A]
rlax.one_hot(sequence.action[1:], num_actions))
def bc_policy_loss_fn(root_idx) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Self-behavior-cloning loss (cross entropy on rollout actions)."""
chex.assert_shape(
(rollout.policy.logits, bc_targets),
(self._model_rollout_length, num_rollouts - 1, num_actions))
loss = softmax_cross_entropy(rollout.policy.logits[:, root_idx],
bc_targets[:, root_idx])
top1_accuracy = top1_accuracy_tiebreak(
rollout.policy.logits[:, root_idx],
bc_targets[:, root_idx],
rng=key)
return loss, top1_accuracy # pytype: disable=bad-return-type # numpy-scalars
# Compute each rollout loss by vmapping over the rollouts.
bc_policy_loss, bc_policy_acc = jax.vmap(bc_policy_loss_fn)(indices[:-1])
bc_policy_loss = jnp.mean(bc_policy_loss)
bc_policy_acc = jnp.mean(bc_policy_acc)
# Combine losses.
loss = (
self._loss_scales.rollout.policy * mpo_policy_loss +
self._loss_scales.rollout.bc_policy * bc_policy_loss +
self._loss_scales.critic * self._loss_scales.rollout.critic *
critic_loss + self._loss_scales.rollout.reward * reward_loss)
logging_dict = {
'rollout_critic_loss': critic_loss,
'rollout_reward_loss': reward_loss,
'rollout_policy_loss': mpo_policy_loss,
'rollout_bc_policy_loss': bc_policy_loss,
'rollout_bc_accuracy': bc_policy_acc,
'rollout_loss': loss,
}
return loss, logging_dict # pytype: disable=bad-return-type # jax-ndarray
|
acme-master
|
acme/agents/jax/mpo/rollout_loss.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MPO agent module."""
from acme.agents.jax.mpo.acting import ActorState
from acme.agents.jax.mpo.acting import make_actor_core
from acme.agents.jax.mpo.builder import MPOBuilder
from acme.agents.jax.mpo.config import MPOConfig
from acme.agents.jax.mpo.learning import MPOLearner
from acme.agents.jax.mpo.networks import make_control_networks
from acme.agents.jax.mpo.networks import MPONetworks
from acme.agents.jax.mpo.types import CategoricalPolicyLossConfig
from acme.agents.jax.mpo.types import CriticType
from acme.agents.jax.mpo.types import GaussianPolicyLossConfig
from acme.agents.jax.mpo.types import PolicyLossConfig
|
acme-master
|
acme/agents/jax/mpo/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some types/assumptions used in the MoG-MPO agent."""
import dataclasses
import enum
from typing import Callable, Mapping, Optional, Union
from acme import types
from acme.agents.jax.mpo import categorical_mpo as discrete_losses
import acme.jax.losses.mpo as continuous_losses
import distrax
import jax.numpy as jnp
# TODO(bshahr): consider upstreaming these to core types.
NestedArray = types.NestedArray
Observation = types.NestedArray
ObservationEmbedding = types.NestedArray
Action = jnp.ndarray
RNGKey = jnp.ndarray
Entropy = jnp.ndarray
LogProb = jnp.ndarray
ExperienceType = Union['FromTransitions', 'FromSequences']
DistributionLike = distrax.DistributionLike
DistributionOrArray = Union[DistributionLike, jnp.ndarray]
LogDict = Mapping[str, jnp.ndarray]
PolicyStats = Union[
discrete_losses.CategoricalMPOStats, continuous_losses.MPOStats]
DualParams = Union[continuous_losses.MPOParams,
discrete_losses.CategoricalMPOParams]
DistributionalLossFn = Callable[[DistributionLike, jnp.ndarray], jnp.ndarray]
@dataclasses.dataclass
class FromTransitions:
"""Configuration for learning from n-step transitions."""
n_step: int = 1
# TODO(bshahr): consider adding the discount here.
@dataclasses.dataclass
class FromSequences:
"""Configuration for learning from sequences."""
sequence_length: int = 2
sequence_period: int = 1
# Configuration of how to bootstrap from these sequences.
n_step: Optional[int] = 5
# Lambda used to discount future rewards as in TD(lambda), Retrace, etc.
td_lambda: Optional[float] = 1.0
class CriticType(enum.Enum):
"""Types of critic that are supported."""
NONDISTRIBUTIONAL = 'nondistributional'
MIXTURE_OF_GAUSSIANS = 'mixture_of_gaussians'
CATEGORICAL_2HOT = 'categorical_2hot'
CATEGORICAL = 'categorical'
class RnnCoreType(enum.Enum):
"""Types of core that are supported for rnn."""
IDENTITY = 'identity'
GRU = 'gru'
@dataclasses.dataclass
class GaussianPolicyLossConfig:
"""Configuration for the continuous (Gaussian) policy loss."""
epsilon: float = 0.1
epsilon_penalty: float = 0.001
epsilon_mean: float = 0.0025
epsilon_stddev: float = 1e-6
init_log_temperature: float = 10.
init_log_alpha_mean: float = 10.
init_log_alpha_stddev: float = 1000.
action_penalization: bool = True
per_dim_constraining: bool = True
@dataclasses.dataclass
class CategoricalPolicyLossConfig:
"""Configuration for the discrete (categorical) policy loss."""
epsilon: float = 0.1
epsilon_policy: float = 0.0025
init_log_temperature: float = 3.
init_log_alpha: float = 3.
PolicyLossConfig = Union[GaussianPolicyLossConfig, CategoricalPolicyLossConfig]
@dataclasses.dataclass(frozen=True)
class RolloutLossScalesConfig:
"""Configuration for scaling the rollout losses used in the learner."""
policy: float = 1.0
bc_policy: float = 1.0
critic: float = 1.0
reward: float = 1.0
@dataclasses.dataclass(frozen=True)
class LossScalesConfig:
"""Configuration for scaling the rollout losses used in the learner."""
policy: float = 1.0
critic: float = 1.0
rollout: Optional[RolloutLossScalesConfig] = None
@dataclasses.dataclass(frozen=True)
class ModelOutputs:
"""Container for the outputs of the model."""
policy: Optional[types.NestedArray] = None
q_value: Optional[types.NestedArray] = None
value: Optional[types.NestedArray] = None
reward: Optional[types.NestedArray] = None
embedding: Optional[types.NestedArray] = None
recurrent_state: Optional[types.NestedArray] = None
@dataclasses.dataclass(frozen=True)
class LossTargets:
"""Container for the targets used to compute the model loss."""
# Policy targets.
policy: Optional[types.NestedArray] = None
a_improvement: Optional[types.NestedArray] = None
q_improvement: Optional[types.NestedArray] = None
# Value targets.
q_value: Optional[types.NestedArray] = None
value: Optional[types.NestedArray] = None
reward: Optional[types.NestedArray] = None
embedding: Optional[types.NestedArray] = None
|
acme-master
|
acme/agents/jax/mpo/types.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the MPO agent builder, which holds factories for all components."""
import functools
from typing import Iterator, List, Optional
from absl import logging
from acme import core
from acme import specs
from acme.adders import base
from acme.adders import reverb as adders
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.mpo import acting
from acme.agents.jax.mpo import config as mpo_config
from acme.agents.jax.mpo import learning
from acme.agents.jax.mpo import networks as mpo_networks
from acme.agents.jax.mpo import types as mpo_types
from acme.datasets import image_augmentation as img_aug
from acme.datasets import reverb as datasets
from acme.jax import observation_stacking as obs_stacking
from acme.jax import types as jax_types
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import chex
import jax
import optax
import reverb
# Acme loves Reverb.
import tensorflow as tf
import tree
_POLICY_KEY = 'policy'
_QUEUE_TABLE_NAME = 'queue_table'
class MPOBuilder(builders.ActorLearnerBuilder):
"""Builder class for MPO agent components."""
def __init__(self,
config: mpo_config.MPOConfig,
*,
sgd_steps_per_learner_step: int = 8,
max_learner_steps: Optional[int] = None):
self.config = config
self.sgd_steps_per_learner_step = sgd_steps_per_learner_step
self._max_learner_steps = max_learner_steps
def make_policy(
self,
networks: mpo_networks.MPONetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False,
) -> actor_core_lib.ActorCore:
actor_core = acting.make_actor_core(
networks,
stochastic=not evaluation,
store_core_state=self.config.use_stale_state,
store_log_prob=self.config.use_retrace)
# Maybe wrap the actor core to perform actor-side observation stacking.
if self.config.num_stacked_observations > 1:
actor_core = obs_stacking.wrap_actor_core(
actor_core,
observation_spec=environment_spec.observations,
num_stacked_observations=self.config.num_stacked_observations)
return actor_core
def make_actor(
self,
random_key: jax_types.PRNGKey,
policy: actor_core_lib.ActorCore,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[base.Adder] = None,
) -> core.Actor:
del environment_spec # This actor doesn't need the spec beyond the policy.
variable_client = variable_utils.VariableClient(
client=variable_source,
key=_POLICY_KEY,
update_period=self.config.variable_update_period)
return actors.GenericActor(
actor=policy,
random_key=random_key,
variable_client=variable_client,
adder=adder,
backend='cpu')
def make_learner(self,
random_key: jax_types.PRNGKey,
networks: mpo_networks.MPONetworks,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None) -> core.Learner:
# Set defaults.
del replay_client # Unused as we do not update priorities.
learning_rate = self.config.learning_rate
# Make sure we can split the batches evenly across all accelerator devices.
num_learner_devices = jax.device_count()
if self.config.batch_size % num_learner_devices > 0:
raise ValueError(
'Batch size must divide evenly by the number of learner devices.'
f' Passed a batch size of {self.config.batch_size} and the number of'
f' available learner devices is {num_learner_devices}. Specifically,'
f' devices: {jax.devices()}.')
agent_environment_spec = environment_spec
if self.config.num_stacked_observations > 1:
# Adjust the observation spec for the agent-side frame-stacking.
# Note: this is only for the ActorCore's benefit, the adders want the true
# environment spec.
agent_environment_spec = obs_stacking.get_adjusted_environment_spec(
agent_environment_spec, self.config.num_stacked_observations)
if self.config.use_cosine_lr_decay:
learning_rate = optax.warmup_cosine_decay_schedule(
init_value=0.,
peak_value=self.config.learning_rate,
warmup_steps=self.config.cosine_lr_decay_warmup_steps,
decay_steps=self._max_learner_steps)
optimizer = optax.adamw(
learning_rate,
b1=self.config.adam_b1,
b2=self.config.adam_b2,
weight_decay=self.config.weight_decay)
# TODO(abef): move LR scheduling and optimizer creation into launcher.
loss_scales_config = mpo_types.LossScalesConfig(
policy=self.config.policy_loss_scale,
critic=self.config.critic_loss_scale,
rollout=mpo_types.RolloutLossScalesConfig(
policy=self.config.rollout_policy_loss_scale,
bc_policy=self.config.rollout_bc_policy_loss_scale,
critic=self.config.rollout_critic_loss_scale,
reward=self.config.rollout_reward_loss_scale,
))
logger = logger_fn(
'learner',
steps_key=counter.get_steps_key() if counter else 'learner_steps')
with chex.fake_pmap_and_jit(not self.config.jit_learner,
not self.config.jit_learner):
learner = learning.MPOLearner(
iterator=dataset,
networks=networks,
environment_spec=agent_environment_spec,
critic_type=self.config.critic_type,
discrete_policy=self.config.discrete_policy,
random_key=random_key,
discount=self.config.discount,
num_samples=self.config.num_samples,
policy_eval_stochastic=self.config.policy_eval_stochastic,
policy_eval_num_val_samples=self.config.policy_eval_num_val_samples,
policy_loss_config=self.config.policy_loss_config,
loss_scales=loss_scales_config,
target_update_period=self.config.target_update_period,
target_update_rate=self.config.target_update_rate,
experience_type=self.config.experience_type,
use_online_policy_to_bootstrap=(
self.config.use_online_policy_to_bootstrap),
use_stale_state=self.config.use_stale_state,
use_retrace=self.config.use_retrace,
retrace_lambda=self.config.retrace_lambda,
model_rollout_length=self.config.model_rollout_length,
sgd_steps_per_learner_step=self.sgd_steps_per_learner_step,
optimizer=optimizer,
dual_optimizer=optax.adam(self.config.dual_learning_rate),
grad_norm_clip=self.config.grad_norm_clip,
reward_clip=self.config.reward_clip,
value_tx_pair=self.config.value_tx_pair,
counter=counter,
logger=logger,
devices=jax.devices(),
)
return learner
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: actor_core_lib.ActorCore, # Used to get accurate extras_spec.
) -> List[reverb.Table]:
dummy_actor_state = policy.init(jax.random.PRNGKey(0))
extras_spec = policy.get_extras(dummy_actor_state)
if isinstance(self.config.experience_type, mpo_types.FromTransitions):
signature = adders.NStepTransitionAdder.signature(environment_spec,
extras_spec)
elif isinstance(self.config.experience_type, mpo_types.FromSequences):
sequence_length = (
self.config.experience_type.sequence_length +
self.config.num_stacked_observations - 1)
signature = adders.SequenceAdder.signature(
environment_spec, extras_spec, sequence_length=sequence_length)
# TODO(bshahr): This way of obtaining the signature is error-prone. Find a
# programmatic way via make_adder.
# Create the rate limiter.
if self.config.samples_per_insert:
# Create enough of an error buffer to give a 10% tolerance in rate.
samples_per_insert_tolerance = 0.1 * self.config.samples_per_insert
error_buffer = self.config.min_replay_size * samples_per_insert_tolerance
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self.config.min_replay_size,
samples_per_insert=self.config.samples_per_insert,
error_buffer=max(error_buffer, 2 * self.config.samples_per_insert))
else:
limiter = reverb.rate_limiters.MinSize(self.config.min_replay_size)
# Reverb loves Acme.
replay_extensions = []
queue_extensions = []
# Create replay tables.
tables = []
if self.config.replay_fraction > 0:
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self.config.max_replay_size,
rate_limiter=limiter,
extensions=replay_extensions,
signature=signature)
tables.append(replay_table)
logging.info(
'Creating off-policy replay buffer with replay fraction %g '
'of batch %d', self.config.replay_fraction, self.config.batch_size)
if self.config.replay_fraction < 1:
# Create a FIFO queue. This will provide the rate limitation if used.
queue = reverb.Table.queue(
name=_QUEUE_TABLE_NAME,
max_size=self.config.online_queue_capacity,
extensions=queue_extensions,
signature=signature)
tables.append(queue)
logging.info(
'Creating online replay queue with queue fraction %g '
'of batch %d', 1.0 - self.config.replay_fraction,
self.config.batch_size)
return tables
def make_adder(
self,
replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[actor_core_lib.ActorCore],
) -> Optional[base.Adder]:
del environment_spec, policy
# Specify the tables to insert into but don't use prioritization.
priority_fns = {}
if self.config.replay_fraction > 0:
priority_fns[adders.DEFAULT_PRIORITY_TABLE] = None
if self.config.replay_fraction < 1:
priority_fns[_QUEUE_TABLE_NAME] = None
if isinstance(self.config.experience_type, mpo_types.FromTransitions):
return adders.NStepTransitionAdder(
client=replay_client,
n_step=self.config.experience_type.n_step,
discount=self.config.discount,
priority_fns=priority_fns)
elif isinstance(self.config.experience_type, mpo_types.FromSequences):
sequence_length = (
self.config.experience_type.sequence_length +
self.config.num_stacked_observations - 1)
return adders.SequenceAdder(
client=replay_client,
sequence_length=sequence_length,
period=self.config.experience_type.sequence_period,
end_of_episode_behavior=adders.EndBehavior.WRITE,
max_in_flight_items=1,
priority_fns=priority_fns)
def make_dataset_iterator(
self, replay_client: reverb.Client) -> Iterator[reverb.ReplaySample]:
if self.config.num_stacked_observations > 1:
maybe_stack_observations = functools.partial(
obs_stacking.stack_reverb_observation,
stack_size=self.config.num_stacked_observations)
else:
maybe_stack_observations = None
dataset = datasets.make_reverb_dataset(
server_address=replay_client.server_address,
batch_size=self.config.batch_size // jax.device_count(),
table={
adders.DEFAULT_PRIORITY_TABLE: self.config.replay_fraction,
_QUEUE_TABLE_NAME: 1. - self.config.replay_fraction,
},
num_parallel_calls=max(16, 4 * jax.local_device_count()),
max_in_flight_samples_per_worker=(2 * self.sgd_steps_per_learner_step *
self.config.batch_size //
jax.device_count()),
postprocess=maybe_stack_observations)
if self.config.observation_transform:
# Augment dataset with random translations, simulated by pad-and-crop.
transform = img_aug.make_transform(
observation_transform=self.config.observation_transform,
transform_next_observation=isinstance(self.config.experience_type,
mpo_types.FromTransitions))
dataset = dataset.map(
transform, num_parallel_calls=16, deterministic=False)
# Batch and then flatten to feed multiple SGD steps per learner step.
if self.sgd_steps_per_learner_step > 1:
dataset = dataset.batch(
self.sgd_steps_per_learner_step, drop_remainder=True)
batch_flatten = lambda t: tf.reshape(t, [-1] + t.shape[2:].as_list())
dataset = dataset.map(lambda x: tree.map_structure(batch_flatten, x))
return utils.multi_device_put(dataset.as_numpy_iterator(),
jax.local_devices())
|
acme-master
|
acme/agents/jax/mpo/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MoG-MPO network definitions."""
import dataclasses
from typing import Callable, NamedTuple, Optional, Sequence, Tuple, Union
from acme import specs
from acme.agents.jax.mpo import types
from acme.jax import networks as networks_lib
from acme.jax import utils
import chex
import haiku as hk
import haiku.initializers as hk_init
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow_probability.substrates.jax as tfp
tfd = tfp.distributions
DistributionOrArray = Union[tfd.Distribution, jnp.ndarray]
class MPONetworkParams(NamedTuple):
policy_head: Optional[hk.Params] = None
critic_head: Optional[hk.Params] = None
torso: Optional[hk.Params] = None
torso_initial_state: Optional[hk.Params] = None
dynamics_model: Union[hk.Params, Tuple[()]] = ()
dynamics_model_initial_state: Union[hk.Params, Tuple[()]] = ()
@dataclasses.dataclass
class UnrollableNetwork:
"""Network that can unroll over an input sequence."""
init: Callable[[networks_lib.PRNGKey, types.Observation, hk.LSTMState],
hk.Params]
apply: Callable[[hk.Params, types.Observation, hk.LSTMState],
Tuple[jnp.ndarray, hk.LSTMState]]
unroll: Callable[[hk.Params, types.Observation, hk.LSTMState],
Tuple[jnp.ndarray, hk.LSTMState]]
initial_state_fn_init: Callable[[networks_lib.PRNGKey, Optional[int]],
hk.Params]
initial_state_fn: Callable[[hk.Params, Optional[int]], hk.LSTMState]
@dataclasses.dataclass
class MPONetworks:
"""Network for the MPO agent."""
policy_head: Optional[hk.Transformed] = None
critic_head: Optional[hk.Transformed] = None
torso: Optional[UnrollableNetwork] = None
dynamics_model: Optional[UnrollableNetwork] = None
def policy_head_apply(self, params: MPONetworkParams,
obs_embedding: types.ObservationEmbedding):
return self.policy_head.apply(params.policy_head, obs_embedding)
def critic_head_apply(self, params: MPONetworkParams,
obs_embedding: types.ObservationEmbedding,
actions: types.Action):
return self.critic_head.apply(params.critic_head, obs_embedding, actions)
def torso_unroll(self, params: MPONetworkParams,
observations: types.Observation, state: hk.LSTMState):
return self.torso.unroll(params.torso, observations, state)
def dynamics_model_unroll(self, params: MPONetworkParams,
actions: types.Action, state: hk.LSTMState):
return self.dynamics_model.unroll(params.dynamics_model, actions, state)
def init_params(
networks: MPONetworks,
spec: specs.EnvironmentSpec,
random_key: types.RNGKey,
add_batch_dim: bool = False,
dynamics_rollout_length: int = 0,
) -> Tuple[MPONetworkParams, hk.LSTMState]:
"""Initialize the parameters of a MPO network."""
rng_keys = jax.random.split(random_key, 6)
# Create a dummy observation/action to initialize network parameters.
observations, actions = utils.zeros_like((spec.observations, spec.actions))
# Add batch dimensions if necessary by the scope that is calling this init.
if add_batch_dim:
observations, actions = utils.add_batch_dim((observations, actions))
# Initialize the state torso parameters and create a dummy core state.
batch_size = 1 if add_batch_dim else None
params_torso_initial_state = networks.torso.initial_state_fn_init(
rng_keys[0], batch_size)
state = networks.torso.initial_state_fn(
params_torso_initial_state, batch_size)
# Initialize the core and unroll one step to create a dummy core output.
# The input to the core is the current action and the next observation.
params_torso = networks.torso.init(rng_keys[1], observations, state)
embeddings, _ = networks.torso.apply(params_torso, observations, state)
# Initialize the policy and critic heads by passing in the dummy embedding.
params_policy_head, params_critic_head = {}, {} # Cannot be None for BIT.
if networks.policy_head:
params_policy_head = networks.policy_head.init(rng_keys[2], embeddings)
if networks.critic_head:
params_critic_head = networks.critic_head.init(rng_keys[3], embeddings,
actions)
# Initialize the recurrent dynamics model if it exists.
if networks.dynamics_model and dynamics_rollout_length > 0:
params_dynamics_initial_state = networks.dynamics_model.initial_state_fn_init(
rng_keys[4], embeddings)
dynamics_state = networks.dynamics_model.initial_state_fn(
params_dynamics_initial_state, embeddings)
params_dynamics = networks.dynamics_model.init(
rng_keys[5], actions, dynamics_state)
else:
params_dynamics_initial_state = ()
params_dynamics = ()
params = MPONetworkParams(
policy_head=params_policy_head,
critic_head=params_critic_head,
torso=params_torso,
torso_initial_state=params_torso_initial_state,
dynamics_model=params_dynamics,
dynamics_model_initial_state=params_dynamics_initial_state)
return params, state
def make_unrollable_network(
make_core_module: Callable[[], hk.RNNCore] = hk.IdentityCore,
make_feedforward_module: Optional[Callable[[], hk.SupportsCall]] = None,
make_initial_state_fn: Optional[Callable[[], hk.SupportsCall]] = None,
) -> UnrollableNetwork:
"""Produces an UnrollableNetwork and a state initializing hk.Transformed."""
def default_initial_state_fn(batch_size: Optional[int] = None) -> jnp.ndarray:
return make_core_module().initial_state(batch_size)
def _apply_core_fn(observation: types.Observation,
state: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
if make_feedforward_module:
observation = make_feedforward_module()(observation)
return make_core_module()(observation, state)
def _unroll_core_fn(observation: types.Observation,
state: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
if make_feedforward_module:
observation = make_feedforward_module()(observation)
return hk.dynamic_unroll(make_core_module(), observation, state)
if make_initial_state_fn:
initial_state_fn = make_initial_state_fn()
else:
initial_state_fn = default_initial_state_fn
# Transform module functions into pure functions.
hk_initial_state_fn = hk.without_apply_rng(hk.transform(initial_state_fn))
apply_core = hk.without_apply_rng(hk.transform(_apply_core_fn))
unroll_core = hk.without_apply_rng(hk.transform(_unroll_core_fn))
# Pack all core network pure functions into a single convenient container.
return UnrollableNetwork(
init=apply_core.init,
apply=apply_core.apply,
unroll=unroll_core.apply,
initial_state_fn_init=hk_initial_state_fn.init,
initial_state_fn=hk_initial_state_fn.apply)
def make_control_networks(
environment_spec: specs.EnvironmentSpec,
*,
with_recurrence: bool = False,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
policy_init_scale: float = 0.7,
critic_type: types.CriticType = types.CriticType.MIXTURE_OF_GAUSSIANS,
mog_init_scale: float = 1e-3, # Used by MoG critic.
mog_num_components: int = 5, # Used by MoG critic.
categorical_num_bins: int = 51, # Used by CATEGORICAL* critics.
vmin: float = -150., # Used by CATEGORICAL* critics.
vmax: float = 150., # Used by CATEGORICAL* critics.
) -> MPONetworks:
"""Creates MPONetworks to be used DM Control suite tasks."""
# Unpack the environment spec to get appropriate shapes, dtypes, etc.
num_dimensions = np.prod(environment_spec.actions.shape, dtype=int)
# Factory to create the core hk.Module. Must be a factory as the module must
# be initialized within a hk.transform scope.
if with_recurrence:
make_core_module = lambda: GRUWithSkip(16)
else:
make_core_module = hk.IdentityCore
def policy_fn(observation: types.NestedArray) -> tfd.Distribution:
embedding = networks_lib.LayerNormMLP(
policy_layer_sizes, activate_final=True)(
observation)
return networks_lib.MultivariateNormalDiagHead(
num_dimensions, init_scale=policy_init_scale)(
embedding)
def critic_fn(observation: types.NestedArray,
action: types.NestedArray) -> DistributionOrArray:
# Action is clipped to avoid critic extrapolations outside the spec range.
clipped_action = networks_lib.ClipToSpec(environment_spec.actions)(action)
inputs = jnp.concatenate([observation, clipped_action], axis=-1)
embedding = networks_lib.LayerNormMLP(
critic_layer_sizes, activate_final=True)(
inputs)
if critic_type == types.CriticType.MIXTURE_OF_GAUSSIANS:
return networks_lib.GaussianMixture(
num_dimensions=1,
num_components=mog_num_components,
multivariate=False,
init_scale=mog_init_scale,
append_singleton_event_dim=False,
reinterpreted_batch_ndims=0)(
embedding)
elif critic_type in (types.CriticType.CATEGORICAL,
types.CriticType.CATEGORICAL_2HOT):
return networks_lib.CategoricalCriticHead(
num_bins=categorical_num_bins, vmin=vmin, vmax=vmax)(
embedding)
else:
return hk.Linear(
output_size=1, w_init=hk_init.TruncatedNormal(0.01))(
embedding)
# Create unrollable torso.
torso = make_unrollable_network(make_core_module=make_core_module)
# Create MPONetworks to add functionality required by the agent.
return MPONetworks(
policy_head=hk.without_apply_rng(hk.transform(policy_fn)),
critic_head=hk.without_apply_rng(hk.transform(critic_fn)),
torso=torso)
def add_batch(nest, batch_size: Optional[int]):
"""Adds a batch dimension at axis 0 to the leaves of a nested structure."""
broadcast = lambda x: jnp.broadcast_to(x, (batch_size,) + x.shape)
return jax.tree_map(broadcast, nest)
def w_init_identity(shape: Sequence[int], dtype) -> jnp.ndarray:
chex.assert_equal(len(shape), 2)
chex.assert_equal(shape[0], shape[1])
return jnp.eye(shape[0], dtype=dtype)
class IdentityRNN(hk.RNNCore):
r"""Basic fully-connected RNN core with identity initialization.
Given :math:`x_t` and the previous hidden state :math:`h_{t-1}` the
core computes
.. math::
h_t = \operatorname{ReLU}(w_i x_t + b_i + w_h h_{t-1} + b_h)
The output is equal to the new state, :math:`h_t`.
Initialized using the strategy described in:
https://arxiv.org/pdf/1504.00941.pdf
"""
def __init__(self,
hidden_size: int,
hidden_scale: float = 1e-2,
name: Optional[str] = None):
"""Constructs a vanilla RNN core.
Args:
hidden_size: Hidden layer size.
hidden_scale: Scalar multiplying the hidden-to-hidden matmul.
name: Name of the module.
"""
super().__init__(name=name)
self._initial_state = jnp.zeros([hidden_size])
self._hidden_scale = hidden_scale
self._input_to_hidden = hk.Linear(hidden_size)
self._hidden_to_hidden = hk.Linear(
hidden_size, with_bias=True, w_init=w_init_identity)
def __call__(self, inputs: jnp.ndarray, prev_state: jnp.ndarray):
out = jax.nn.relu(
self._input_to_hidden(inputs) +
self._hidden_scale * self._hidden_to_hidden(prev_state))
return out, out
def initial_state(self, batch_size: Optional[int]):
state = self._initial_state
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class GRU(hk.GRU):
"""GRU with an identity initialization."""
def __init__(self, hidden_size: int, name: Optional[str] = None):
def b_init(unused_size: Sequence[int], dtype) -> jnp.ndarray:
"""Initializes the biases so the GRU ignores the state and acts as a tanh."""
return jnp.concatenate([
+2 * jnp.ones([hidden_size], dtype=dtype),
-2 * jnp.ones([hidden_size], dtype=dtype),
jnp.zeros([hidden_size], dtype=dtype)
])
super().__init__(hidden_size=hidden_size, b_init=b_init, name=name)
class GRUWithSkip(hk.GRU):
"""GRU with a skip-connection from input to output."""
def __call__(self, inputs: jnp.ndarray, prev_state: jnp.ndarray):
outputs, state = super().__call__(inputs, prev_state)
outputs = jnp.concatenate([inputs, outputs], axis=-1)
return outputs, state
class Conv2DLSTMWithSkip(hk.Conv2DLSTM):
"""Conv2DLSTM with a skip-connection from input to output."""
def __call__(self, inputs: jnp.ndarray, state: jnp.ndarray):
outputs, state = super().__call__(inputs, state) # pytype: disable=wrong-arg-types # jax-ndarray
outputs = jnp.concatenate([inputs, outputs], axis=-1)
return outputs, state
|
acme-master
|
acme/agents/jax/mpo/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for MPO agent."""
from typing import Callable
from acme import types
from acme.adders import reverb as adders
from acme.agents.jax.mpo import types as mpo_types
import distrax
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow_probability.substrates.jax as tfp
tfd = tfp.distributions
def _fetch_devicearray(x):
if isinstance(x, jax.Array):
return np.asarray(x)
return x
def get_from_first_device(nest, as_numpy: bool = True):
"""Gets the first array of a nest of `jax.pxla.ShardedDeviceArray`s."""
# TODO(abef): remove this when fake_pmap is fixed or acme error is removed.
def _slice_and_maybe_to_numpy(x):
x = x[0]
return _fetch_devicearray(x) if as_numpy else x
return jax.tree_map(_slice_and_maybe_to_numpy, nest)
def rolling_window(x: jnp.ndarray,
window: int,
axis: int = 0,
time_major: bool = True):
"""Stack the N=T-W+1 length W slices [0:W, 1:W+1, ..., T-W:T] from a tensor.
Args:
x: The tensor to select rolling slices from (along specified axis), with
shape [..., T, ...]; i.e., T = x.shape[axis].
window: The length (W) of the slices to select.
axis: The axis to slice from (defaults to 0).
time_major: If true, output will have shape [..., W, N, ...], otherwise
it will have shape [..., N, W, ...], where x.shape is [..., T, ...].
Returns:
A tensor containing the stacked slices [0:W, ... T-W:T] from an axis of x.
"""
sequence_length = x.shape[axis]
starts = jnp.arange(sequence_length - window + 1)
ends = jnp.arange(window)
if time_major:
idx = starts[None, :] + ends[:, None] # Output will be [..., W, N, ...].
else:
idx = starts[:, None] + ends[None, :] # Output will be [..., N, W, ...].
out = jnp.take(x, idx, axis=axis)
return out
def tree_map_distribution(
f: Callable[[mpo_types.DistributionOrArray], mpo_types.DistributionOrArray],
x: mpo_types.DistributionOrArray) -> mpo_types.DistributionOrArray:
"""Apply a jax function to a distribution by treating it as tree."""
if isinstance(x, distrax.Distribution):
safe_f = lambda y: f(y) if isinstance(y, jnp.ndarray) else y
nil, tree_data = x.tree_flatten()
new_tree_data = jax.tree_map(safe_f, tree_data)
new_x = x.tree_unflatten(new_tree_data, nil)
return new_x
elif isinstance(x, tfd.Distribution):
return jax.tree_map(f, x)
else:
return f(x)
def make_sequences_from_transitions(
transitions: types.Transition,
num_batch_dims: int = 1) -> adders.Step:
"""Convert a batch of transitions into a batch of 1-step sequences."""
stack = lambda x, y: jnp.stack((x, y), axis=num_batch_dims)
duplicate = lambda x: stack(x, x)
observation = jax.tree_map(stack, transitions.observation,
transitions.next_observation)
reward = duplicate(transitions.reward)
return adders.Step(
observation=observation,
action=duplicate(transitions.action),
reward=reward,
discount=duplicate(transitions.discount),
start_of_episode=jnp.zeros_like(reward, dtype=jnp.bool_),
extras=jax.tree_map(duplicate, transitions.extras))
|
acme-master
|
acme/agents/jax/mpo/utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MPO learner implementation. With MoG/not and continuous/discrete policies."""
import dataclasses
import functools
import time
from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Sequence, Tuple, Union
from absl import logging
import acme
from acme import specs
from acme import types
from acme.adders import reverb as adders
from acme.agents.jax.mpo import categorical_mpo as discrete_losses
from acme.agents.jax.mpo import networks as mpo_networks
from acme.agents.jax.mpo import rollout_loss
from acme.agents.jax.mpo import types as mpo_types
from acme.agents.jax.mpo import utils as mpo_utils
from acme.jax import networks as network_lib
from acme.jax import types as jax_types
from acme.jax import utils
import acme.jax.losses.mpo as continuous_losses
from acme.utils import counting
from acme.utils import loggers
import chex
import jax
import jax.numpy as jnp
import numpy as np
import optax
import reverb
import rlax
import tree
_PMAP_AXIS_NAME = 'data'
CriticType = mpo_types.CriticType
class TrainingState(NamedTuple):
"""Contains training state for the learner."""
params: mpo_networks.MPONetworkParams
target_params: mpo_networks.MPONetworkParams
dual_params: mpo_types.DualParams
opt_state: optax.OptState
dual_opt_state: optax.OptState
steps: int
random_key: jax_types.PRNGKey
def softmax_cross_entropy(
logits: chex.Array, target_probs: chex.Array) -> chex.Array:
"""Compute cross entropy loss between logits and target probabilities."""
chex.assert_equal_shape([target_probs, logits])
return -jnp.sum(target_probs * jax.nn.log_softmax(logits), axis=-1)
def top1_accuracy_tiebreak(logits: chex.Array,
targets: chex.Array,
*,
rng: jax_types.PRNGKey,
eps: float = 1e-6) -> chex.Array:
"""Compute the top-1 accuracy with an argmax of targets (random tie-break)."""
noise = jax.random.uniform(rng, shape=targets.shape,
minval=-eps, maxval=eps)
acc = jnp.argmax(logits, axis=-1) == jnp.argmax(targets + noise, axis=-1)
return jnp.mean(acc)
class MPOLearner(acme.Learner):
"""MPO learner (discrete or continuous, distributional or not)."""
_state: TrainingState
def __init__( # pytype: disable=annotation-type-mismatch # numpy-scalars
self,
critic_type: CriticType,
discrete_policy: bool,
environment_spec: specs.EnvironmentSpec,
networks: mpo_networks.MPONetworks,
random_key: jax_types.PRNGKey,
discount: float,
num_samples: int,
iterator: Iterator[reverb.ReplaySample],
experience_type: mpo_types.ExperienceType,
loss_scales: mpo_types.LossScalesConfig,
target_update_period: Optional[int] = 100,
target_update_rate: Optional[float] = None,
sgd_steps_per_learner_step: int = 20,
policy_eval_stochastic: bool = True,
policy_eval_num_val_samples: int = 128,
policy_loss_config: Optional[mpo_types.PolicyLossConfig] = None,
use_online_policy_to_bootstrap: bool = False,
use_stale_state: bool = False,
use_retrace: bool = False,
retrace_lambda: float = 0.95,
model_rollout_length: int = 0,
optimizer: Optional[optax.GradientTransformation] = None,
learning_rate: optax.ScalarOrSchedule = 1e-4,
dual_optimizer: Optional[optax.GradientTransformation] = None,
dual_learning_rate: optax.ScalarOrSchedule = 1e-2,
grad_norm_clip: float = 40.0,
reward_clip: float = np.float32('inf'),
value_tx_pair: rlax.TxPair = rlax.IDENTITY_PAIR,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
devices: Optional[Sequence[jax.Device]] = None,
):
self._critic_type = critic_type
self._discrete_policy = discrete_policy
process_id = jax.process_index()
local_devices = jax.local_devices()
self._devices = devices or local_devices
logging.info('Learner process id: %s. Devices passed: %s', process_id,
devices)
logging.info('Learner process id: %s. Local devices from JAX API: %s',
process_id, local_devices)
self._local_devices = [d for d in self._devices if d in local_devices]
# Store networks.
self._networks = networks
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger
# Other learner parameters.
self._discount = discount
self._num_samples = num_samples
self._sgd_steps_per_learner_step = sgd_steps_per_learner_step
self._policy_eval_stochastic = policy_eval_stochastic
self._policy_eval_num_val_samples = policy_eval_num_val_samples
self._reward_clip_range = sorted([-reward_clip, reward_clip])
self._tx_pair = value_tx_pair
self._loss_scales = loss_scales
self._use_online_policy_to_bootstrap = use_online_policy_to_bootstrap
self._model_rollout_length = model_rollout_length
self._use_retrace = use_retrace
self._retrace_lambda = retrace_lambda
if use_retrace and critic_type == CriticType.MIXTURE_OF_GAUSSIANS:
logging.warning(
'Warning! Retrace has not been tested with the MoG critic.')
self._use_stale_state = use_stale_state
self._experience_type = experience_type
if isinstance(self._experience_type, mpo_types.FromTransitions):
# Each n=5-step transition will be converted to a length 2 sequence before
# being passed to the loss, so we do n=1 step bootstrapping on the
# resulting sequence to get n=5-step bootstrapping as intended.
self._n_step_for_sequence_bootstrap = 1
self._td_lambda = 1.0
elif isinstance(self._experience_type, mpo_types.FromSequences):
self._n_step_for_sequence_bootstrap = self._experience_type.n_step
self._td_lambda = self._experience_type.td_lambda
# Necessary to track when to update target networks.
self._target_update_period = target_update_period
self._target_update_rate = target_update_rate
# Assert one and only one of target update period or rate is defined.
if ((target_update_period and target_update_rate) or
(target_update_period is None and target_update_rate is None)):
raise ValueError(
'Exactly one of target_update_{period|rate} must be set.'
f' Received target_update_period={target_update_period} and'
f' target_update_rate={target_update_rate}.')
# Create policy loss.
if self._discrete_policy:
policy_loss_config = (
policy_loss_config or mpo_types.CategoricalPolicyLossConfig())
self._policy_loss_module = discrete_losses.CategoricalMPO(
**dataclasses.asdict(policy_loss_config))
else:
policy_loss_config = (
policy_loss_config or mpo_types.GaussianPolicyLossConfig())
self._policy_loss_module = continuous_losses.MPO(
**dataclasses.asdict(policy_loss_config))
self._policy_loss_module.__call__ = jax.named_call(
self._policy_loss_module.__call__, name='policy_loss')
# Create the dynamics model rollout loss.
if model_rollout_length > 0:
if not discrete_policy and (self._loss_scales.rollout.policy or
self._loss_scales.rollout.bc_policy):
raise ValueError('Policy rollout losses are only supported in the '
'discrete policy case.')
self._model_rollout_loss_fn = rollout_loss.RolloutLoss(
dynamics_model=networks.dynamics_model,
model_rollout_length=model_rollout_length,
loss_scales=loss_scales,
distributional_loss_fn=self._distributional_loss)
# Create optimizers if they aren't given.
self._optimizer = optimizer or _get_default_optimizer(
learning_rate, grad_norm_clip
)
self._dual_optimizer = dual_optimizer or _get_default_optimizer(
dual_learning_rate, grad_norm_clip
)
self._action_spec = environment_spec.actions
# Initialize random key for the rest of training.
random_key, key = jax.random.split(random_key)
# Initialize network parameters, ignoring the dummy initial state.
network_params, _ = mpo_networks.init_params(
self._networks,
environment_spec,
key,
add_batch_dim=True,
dynamics_rollout_length=self._model_rollout_length)
# Get action dims (unused in the discrete case).
dummy_action = utils.zeros_like(environment_spec.actions)
dummy_action_concat = utils.batch_concat(dummy_action, num_batch_dims=0)
if isinstance(self._policy_loss_module, discrete_losses.CategoricalMPO):
self._dual_clip_fn = discrete_losses.clip_categorical_mpo_params
elif isinstance(self._policy_loss_module, continuous_losses.MPO):
is_constraining = self._policy_loss_module.per_dim_constraining
self._dual_clip_fn = lambda dp: continuous_losses.clip_mpo_params( # pylint: disable=g-long-lambda # pytype: disable=wrong-arg-types # numpy-scalars
dp,
per_dim_constraining=is_constraining)
# Create dual parameters. In the discrete case, the action dim is unused.
dual_params = self._policy_loss_module.init_params(
action_dim=dummy_action_concat.shape[-1], dtype=jnp.float32)
# Initialize optimizers.
opt_state = self._optimizer.init(network_params)
dual_opt_state = self._dual_optimizer.init(dual_params)
# Initialise training state (parameters and optimiser state).
state = TrainingState(
params=network_params,
target_params=network_params,
dual_params=dual_params,
opt_state=opt_state,
dual_opt_state=dual_opt_state,
steps=0,
random_key=random_key,
)
self._state = utils.replicate_in_all_devices(state, self._local_devices)
# Log how many parameters the network has.
sizes = tree.map_structure(jnp.size, network_params)._asdict()
num_params_by_component_str = ' | '.join(
[f'{key}: {sum(tree.flatten(size))}' for key, size in sizes.items()])
logging.info('Number of params by network component: %s',
num_params_by_component_str)
logging.info('Total number of params: %d',
sum(tree.flatten(sizes.values())))
# Combine multiple SGD steps and pmap across devices.
sgd_steps = utils.process_multiple_batches(self._sgd_step,
self._sgd_steps_per_learner_step)
self._sgd_steps = jax.pmap(
sgd_steps, axis_name=_PMAP_AXIS_NAME, devices=self._devices)
self._iterator = iterator
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
self._current_step = 0
def _distributional_loss(self, prediction: mpo_types.DistributionLike,
target: chex.Array):
"""Compute the critic loss given the prediction and target."""
# TODO(abef): break this function into separate functions for each critic.
chex.assert_rank(target, 3) # [N, Z, T] except for Categorical is [1, T, L]
if self._critic_type == CriticType.MIXTURE_OF_GAUSSIANS:
# Sample-based cross-entropy loss.
loss = -prediction.log_prob(target[..., jnp.newaxis])
loss = jnp.mean(loss, axis=[0, 1]) # [T]
elif self._critic_type == CriticType.NONDISTRIBUTIONAL:
# TD error.
prediction = prediction.squeeze(axis=-1) # [T]
loss = 0.5 * jnp.square(target - prediction)
chex.assert_equal_shape([target, loss]) # Check broadcasting.
elif self._critic_type == mpo_types.CriticType.CATEGORICAL_2HOT:
# Cross-entropy loss (two-hot categorical).
target = jnp.mean(target, axis=(0, 1)) # [N, Z, T] -> [T]
# TODO(abef): Compute target differently? (e.g., do mean cross ent.).
target_probs = rlax.transform_to_2hot( # [T, L]
target,
min_value=prediction.values.min(),
max_value=prediction.values.max(),
num_bins=prediction.logits.shape[-1])
logits = jnp.squeeze(prediction.logits, axis=1) # [T, L]
chex.assert_equal_shape([target_probs, logits])
loss = jax.vmap(rlax.categorical_cross_entropy)(target_probs, logits)
elif self._critic_type == mpo_types.CriticType.CATEGORICAL:
loss = jax.vmap(rlax.categorical_cross_entropy)(jnp.squeeze(
target, axis=0), jnp.squeeze(prediction.logits, axis=1))
return jnp.mean(loss) # [T] -> []
def _compute_predictions(self, params: mpo_networks.MPONetworkParams,
sequence: adders.Step) -> mpo_types.ModelOutputs:
"""Compute model predictions at observed and rolled out states."""
# Initialize the core states, possibly to the recorded stale state.
if self._use_stale_state:
initial_state = utils.maybe_recover_lstm_type(
sequence.extras['core_state'])
initial_state = tree.map_structure(lambda x: x[0], initial_state)
else:
initial_state = self._networks.torso.initial_state_fn(
params.torso_initial_state, None)
# Unroll the online core network. Note that this may pass the embeddings
# unchanged if, say, the core is an hk.IdentityCore.
state_embedding, _ = self._networks.torso_unroll( # [T, ...]
params, sequence.observation, initial_state)
# Compute the root policy and critic outputs; [T, ...] and [T-1, ...].
policy = self._networks.policy_head_apply(params, state_embedding)
q_value = self._networks.critic_head_apply(
params, state_embedding[:-1], sequence.action[:-1])
return mpo_types.ModelOutputs(
policy=policy, # [T, ...]
q_value=q_value, # [T-1, ...]
reward=None,
embedding=state_embedding) # [T, ...]
def _compute_targets(
self,
target_params: mpo_networks.MPONetworkParams,
dual_params: mpo_types.DualParams,
sequence: adders.Step,
online_policy: types.NestedArray, # TODO(abef): remove this.
key: jax_types.PRNGKey) -> mpo_types.LossTargets:
"""Compute the targets needed to train the agent."""
# Initialize the core states, possibly to the recorded stale state.
if self._use_stale_state:
initial_state = utils.maybe_recover_lstm_type(
sequence.extras['core_state'])
initial_state = tree.map_structure(lambda x: x[0], initial_state)
else:
initial_state = self._networks.torso.initial_state_fn(
target_params.torso_initial_state, None)
# Unroll the target core network. Note that this may pass the embeddings
# unchanged if, say, the core is an hk.IdentityCore.
target_state_embedding, _ = self._networks.torso_unroll(
target_params, sequence.observation, initial_state) # [T, ...]
# Compute the action distribution from target policy network.
target_policy = self._networks.policy_head_apply(
target_params, target_state_embedding) # [T, ...]
# Maybe reward clip.
clipped_reward = jnp.clip(sequence.reward, *self._reward_clip_range) # [T]
# TODO(abef): when to clip rewards, if at all, if learning dynamics model?
@jax.named_call
@jax.vmap
def critic_mean_fn(action_: jnp.ndarray) -> jnp.ndarray:
"""Compute mean of target critic distribution."""
critic_output = self._networks.critic_head_apply(
target_params, target_state_embedding, action_)
if self._critic_type != CriticType.NONDISTRIBUTIONAL:
critic_output = critic_output.mean()
return critic_output
@jax.named_call
@jax.vmap
def critic_sample_fn(action_: jnp.ndarray,
seed_: jnp.ndarray) -> jnp.ndarray:
"""Sample from the target critic distribution."""
z_distribution = self._networks.critic_head_apply(
target_params, target_state_embedding, action_)
z_samples = z_distribution.sample(
self._policy_eval_num_val_samples, seed=seed_)
return z_samples # [Z, T, 1]
if self._discrete_policy:
# Use all actions to improve policy (no sampling); N = num_actions.
a_improvement = jnp.arange(self._action_spec.num_values) # [N]
seq_len = target_state_embedding.shape[0] # T
a_improvement = jnp.tile(a_improvement[..., None], [1, seq_len]) # [N, T]
else:
# Sample actions to improve policy; [N=num_samples, T].
a_improvement = target_policy.sample(self._num_samples, seed=key)
# TODO(abef): use model to get q_improvement = r + gamma*V?
# Compute the mean Q-values used in policy improvement; [N, T].
q_improvement = critic_mean_fn(a_improvement).squeeze(axis=-1)
# Policy to use for policy evaluation and bootstrapping.
if self._use_online_policy_to_bootstrap:
policy_to_evaluate = online_policy
chex.assert_equal(online_policy.batch_shape, target_policy.batch_shape)
else:
policy_to_evaluate = target_policy
# Action(s) to use for policy evaluation; shape [N, T].
if self._policy_eval_stochastic:
a_evaluation = policy_to_evaluate.sample(self._num_samples, seed=key)
else:
a_evaluation = policy_to_evaluate.mode()
a_evaluation = jnp.expand_dims(a_evaluation, axis=0) # [N=1, T]
# TODO(abef): policy_eval_stochastic=False makes our targets more "greedy"
# Add a stopgrad in case we use the online policy for evaluation.
a_evaluation = jax.lax.stop_gradient(a_evaluation)
if self._critic_type == CriticType.MIXTURE_OF_GAUSSIANS:
# Produce Z return samples for every N action sample; [N, Z, T, 1].
seeds = jax.random.split(key, num=a_evaluation.shape[0])
z_samples = critic_sample_fn(a_evaluation, seeds)
else:
normalized_weights = 1. / a_evaluation.shape[0]
z_samples = critic_mean_fn(a_evaluation) # [N, T, 1]
# When policy_eval_stochastic == True, this corresponds to expected SARSA.
# Otherwise, normalized_weights = 1.0 and N = 1 so the sum is a no-op.
z_samples = jnp.sum(normalized_weights * z_samples, axis=0, keepdims=True)
z_samples = jnp.expand_dims(z_samples, axis=1) # [N, Z=1, T, 1]
# Slice to t = 1...T and transform into raw reward space; [N, Z, T].
z_samples_itx = self._tx_pair.apply_inv(z_samples.squeeze(axis=-1))
# Compute the value estimate by averaging the sampled returns in the raw
# reward space; shape [N=1, Z=1, T].
value_target_itx = jnp.mean(z_samples_itx, axis=(0, 1), keepdims=True)
if self._use_retrace:
# Warning! Retrace has not been tested with the MoG critic.
log_rhos = (
target_policy.log_prob(sequence.action) - sequence.extras['log_prob'])
# Compute Q-values; expand and squeeze because critic_mean_fn is vmapped.
q_t = critic_mean_fn(jnp.expand_dims(sequence.action, axis=0)).squeeze(0)
q_t = q_t.squeeze(-1) # Also squeeze trailing scalar dimension; [T].
# Compute retrace targets.
# These targets use the rewards and discounts as in normal TD-learning but
# they use a mix of bootstrapped values V(s') and Q(s', a'), weighing the
# latter based on how likely a' is under the current policy (s' and a' are
# samples from replay).
# See [Munos et al., 2016](https://arxiv.org/abs/1606.02647) for more.
q_value_target_itx = rlax.general_off_policy_returns_from_q_and_v(
q_t=self._tx_pair.apply_inv(q_t[1:-1]),
v_t=jnp.squeeze(value_target_itx, axis=(0, 1))[1:],
r_t=clipped_reward[:-1],
discount_t=self._discount * sequence.discount[:-1],
c_t=self._retrace_lambda * jnp.minimum(1.0, jnp.exp(log_rhos[1:-1])))
# Expand dims to the expected [N=1, Z=1, T-1].
q_value_target_itx = jnp.expand_dims(q_value_target_itx, axis=(0, 1))
else:
# Compute bootstrap target from sequences. vmap return computation across
# N action and Z return samples; shape [N, Z, T-1].
n_step_return_fn = functools.partial(
rlax.n_step_bootstrapped_returns,
r_t=clipped_reward[:-1],
discount_t=self._discount * sequence.discount[:-1],
n=self._n_step_for_sequence_bootstrap,
lambda_t=self._td_lambda)
n_step_return_vfn = jax.vmap(jax.vmap(n_step_return_fn))
q_value_target_itx = n_step_return_vfn(v_t=z_samples_itx[..., 1:])
# Transform back to the canonical space and stop gradients.
q_value_target = jax.lax.stop_gradient(
self._tx_pair.apply(q_value_target_itx))
reward_target = jax.lax.stop_gradient(self._tx_pair.apply(clipped_reward))
value_target = jax.lax.stop_gradient(self._tx_pair.apply(value_target_itx))
if self._critic_type == mpo_types.CriticType.CATEGORICAL:
@jax.vmap
def get_logits_and_values(
action: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
critic_output = self._networks.critic_head_apply(
target_params, target_state_embedding[1:], action)
return critic_output.logits, critic_output.values
z_t_logits, z_t_values = get_logits_and_values(a_evaluation[:, 1:])
z_t_logits = jnp.squeeze(z_t_logits, axis=2) # [N, T-1, L]
z_t_values = z_t_values[0] # Values are identical at each N; [L].
gamma = self._discount * sequence.discount[:-1, None] # [T-1, 1]
r_t = clipped_reward[:-1, None] # [T-1, 1]
atoms_itx = self._tx_pair.apply_inv(z_t_values)[None, ...] # [1, L]
z_target_atoms = self._tx_pair.apply(r_t + gamma * atoms_itx) # [T-1, L]
# Note: this is n=1-step TD unless using experience=FromTransitions(n>1).
z_target_probs = jax.nn.softmax(z_t_logits) # [N, T-1, L]
z_target_atoms = jax.lax.broadcast(
z_target_atoms, z_target_probs.shape[:1]) # [N, T-1, L]
project_fn = functools.partial(
rlax.categorical_l2_project, z_q=z_t_values)
z_target = jax.vmap(jax.vmap(project_fn))(z_target_atoms, z_target_probs)
z_target = jnp.mean(z_target, axis=0)
q_value_target = jax.lax.stop_gradient(z_target[None, ...]) # [1, T-1, L]
# TODO(abef): make q_v_target shape align with expected [N, Z, T-1] shape?
targets = mpo_types.LossTargets(
policy=target_policy, # [T, ...]
a_improvement=a_improvement, # [N, T]
q_improvement=q_improvement, # [N, T]
q_value=q_value_target, # [N, Z, T-1] ([1, T-1, L] for CATEGORICAL)
value=value_target[..., :-1], # [N=1, Z=1, T-1]
reward=reward_target, # [T]
embedding=target_state_embedding) # [T, ...]
return targets
def _loss_fn(
self,
params: mpo_networks.MPONetworkParams,
dual_params: mpo_types.DualParams,
# TODO(bshahr): clean up types: Step is not a great type for sequences.
sequence: adders.Step,
target_params: mpo_networks.MPONetworkParams,
key: jax_types.PRNGKey) -> Tuple[jnp.ndarray, mpo_types.LogDict]:
# Compute the model predictions at the root and for the rollouts.
predictions = self._compute_predictions(params=params, sequence=sequence)
# Compute the targets to use for the losses.
targets = self._compute_targets(
target_params=target_params,
dual_params=dual_params,
sequence=sequence,
online_policy=predictions.policy,
key=key)
# TODO(abef): mask policy loss at terminal states or use uniform targets
# is_terminal = sequence.discount == 0.
# Compute MPO policy loss on each state in the sequence.
policy_loss, policy_stats = self._policy_loss_module(
params=dual_params,
online_action_distribution=predictions.policy, # [T, ...].
target_action_distribution=targets.policy, # [T, ...].
actions=targets.a_improvement, # Unused in discrete case; [N, T].
q_values=targets.q_improvement) # [N, T]
# Compute the critic loss on the states in the sequence.
critic_loss = self._distributional_loss(
prediction=predictions.q_value, # [T-1, 1, ...]
target=targets.q_value) # [N, Z, T-1]
loss = (self._loss_scales.policy * policy_loss +
self._loss_scales.critic * critic_loss)
loss_logging_dict = {
'loss': loss,
'root_policy_loss': policy_loss,
'root_critic_loss': critic_loss,
'policy_loss': policy_loss,
'critic_loss': critic_loss,
}
# Append MPO statistics.
loss_logging_dict.update(
{f'policy/root/{k}': v for k, v in policy_stats._asdict().items()})
# Compute rollout losses.
if self._model_rollout_length > 0:
model_rollout_loss, rollout_logs = self._model_rollout_loss_fn(
params, dual_params, sequence, predictions.embedding, targets, key)
loss += model_rollout_loss
loss_logging_dict.update(rollout_logs)
loss_logging_dict.update({
'policy_loss': policy_loss + rollout_logs['rollout_policy_loss'],
'critic_loss': critic_loss + rollout_logs['rollout_critic_loss'],
'loss': loss})
return loss, loss_logging_dict
def _sgd_step(
self,
state: TrainingState,
transitions: Union[types.Transition, adders.Step],
) -> Tuple[TrainingState, Dict[str, Any]]:
"""Perform one parameter update step."""
if isinstance(transitions, types.Transition):
sequences = mpo_utils.make_sequences_from_transitions(transitions)
if self._model_rollout_length > 0:
raise ValueError('model rollouts not yet supported from transitions')
else:
sequences = transitions
# Get next random_key and `batch_size` keys.
batch_size = sequences.reward.shape[0]
keys = jax.random.split(state.random_key, num=batch_size+1)
random_key, keys = keys[0], keys[1:]
# Vmap over the batch dimension when learning from sequences.
loss_vfn = jax.vmap(self._loss_fn, in_axes=(None, None, 0, None, 0))
safe_mean = lambda x: jnp.mean(x) if x is not None else x
# TODO(bshahr): Consider cleaning this up via acme.tree_utils.tree_map.
loss_fn = lambda *a, **k: tree.map_structure(safe_mean, loss_vfn(*a, **k))
loss_and_grad = jax.value_and_grad(loss_fn, argnums=(0, 1), has_aux=True)
# Compute the loss and gradient.
(_, loss_log_dict), all_gradients = loss_and_grad(
state.params, state.dual_params, sequences, state.target_params, keys)
# Average gradients across replicas.
gradients, dual_gradients = jax.lax.pmean(all_gradients, _PMAP_AXIS_NAME)
# Compute gradient norms before clipping.
gradients_norm = optax.global_norm(gradients)
dual_gradients_norm = optax.global_norm(dual_gradients)
# Get optimizer updates and state.
updates, opt_state = self._optimizer.update(
gradients, state.opt_state, state.params)
dual_updates, dual_opt_state = self._dual_optimizer.update(
dual_gradients, state.dual_opt_state, state.dual_params)
# Apply optimizer updates to parameters.
params = optax.apply_updates(state.params, updates)
dual_params = optax.apply_updates(state.dual_params, dual_updates)
# Clip dual params at some minimum value.
dual_params = self._dual_clip_fn(dual_params)
steps = state.steps + 1
# Periodically update target networks.
if self._target_update_period:
target_params = optax.periodic_update(params, state.target_params, steps, # pytype: disable=wrong-arg-types # numpy-scalars
self._target_update_period)
elif self._target_update_rate:
target_params = optax.incremental_update(params, state.target_params,
self._target_update_rate)
new_state = TrainingState( # pytype: disable=wrong-arg-types # numpy-scalars
params=params,
target_params=target_params,
dual_params=dual_params,
opt_state=opt_state,
dual_opt_state=dual_opt_state,
steps=steps,
random_key=random_key,
)
# Log the metrics from this learner step.
metrics = {f'loss/{k}': v for k, v in loss_log_dict.items()}
metrics.update({
'opt/grad_norm': gradients_norm,
'opt/param_norm': optax.global_norm(params)})
dual_metrics = {
'opt/dual_grad_norm': dual_gradients_norm,
'opt/dual_param_norm': optax.global_norm(dual_params),
'params/dual/log_temperature_avg': dual_params.log_temperature}
if isinstance(dual_params, continuous_losses.MPOParams):
dual_metrics.update({
'params/dual/log_alpha_mean_avg': dual_params.log_alpha_mean,
'params/dual/log_alpha_stddev_avg': dual_params.log_alpha_stddev})
if dual_params.log_penalty_temperature is not None:
dual_metrics['params/dual/log_penalty_temp_mean'] = (
dual_params.log_penalty_temperature)
elif isinstance(dual_params, discrete_losses.CategoricalMPOParams):
dual_metrics['params/dual/log_alpha_avg'] = dual_params.log_alpha
metrics.update(jax.tree_map(jnp.mean, dual_metrics))
return new_state, metrics
def step(self):
"""Perform one learner step, which in general does multiple SGD steps."""
with jax.profiler.StepTraceAnnotation('step', step_num=self._current_step):
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
sample = next(self._iterator)
if isinstance(self._experience_type, mpo_types.FromTransitions):
minibatch = types.Transition(*sample.data)
elif isinstance(self._experience_type, mpo_types.FromSequences):
minibatch = adders.Step(*sample.data)
self._state, metrics = self._sgd_steps(self._state, minibatch)
self._current_step, metrics = mpo_utils.get_from_first_device(
(self._state.steps, metrics))
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(
steps=self._sgd_steps_per_learner_step, walltime=elapsed_time)
if elapsed_time > 0:
metrics['steps_per_second'] = (
self._sgd_steps_per_learner_step / elapsed_time)
else:
metrics['steps_per_second'] = 0.
# Attempts to write the logs.
if self._logger:
self._logger.write({**metrics, **counts})
def get_variables(self, names: List[str]) -> network_lib.Params:
params = mpo_utils.get_from_first_device(self._state.target_params)
variables = {
'policy_head': params.policy_head,
'critic_head': params.critic_head,
'torso': params.torso,
'network': params,
'policy': params._replace(critic_head={}),
'critic': params._replace(policy_head={}),
}
return [variables[name] for name in names]
def save(self) -> TrainingState:
return jax.tree_map(mpo_utils.get_from_first_device, self._state)
def restore(self, state: TrainingState):
self._state = utils.replicate_in_all_devices(state, self._local_devices)
def _get_default_optimizer(
learning_rate: optax.ScalarOrSchedule, max_grad_norm: Optional[float] = None
) -> optax.GradientTransformation:
optimizer = optax.adam(learning_rate)
if max_grad_norm and max_grad_norm > 0:
optimizer = optax.chain(optax.clip_by_global_norm(max_grad_norm), optimizer)
return optimizer
|
acme-master
|
acme/agents/jax/mpo/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Acting logic for the MPO agent."""
from typing import Mapping, NamedTuple, Tuple, Union
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax.mpo import networks
from acme.agents.jax.mpo import types
from acme.jax import types as jax_types
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class ActorState(NamedTuple):
key: jax_types.PRNGKey
core_state: hk.LSTMState
prev_core_state: hk.LSTMState
log_prob: Union[jnp.ndarray, Tuple[()]] = ()
def make_actor_core(mpo_networks: networks.MPONetworks,
stochastic: bool = True,
store_core_state: bool = False,
store_log_prob: bool = True) -> actor_core_lib.ActorCore:
"""Returns a MPO ActorCore from the MPONetworks."""
def init(key: jax_types.PRNGKey) -> ActorState:
next_key, key = jax.random.split(key, 2)
batch_size = None
params_initial_state = mpo_networks.torso.initial_state_fn_init(
key, batch_size)
core_state = mpo_networks.torso.initial_state_fn(params_initial_state,
batch_size)
return ActorState(
key=next_key,
core_state=core_state,
prev_core_state=core_state,
log_prob=np.zeros(shape=(), dtype=np.float32) if store_log_prob else ())
def select_action(params: networks.MPONetworkParams,
observations: types.Observation,
state: ActorState) -> Tuple[types.Action, ActorState]:
next_key, key = jax.random.split(state.key, 2)
# Embed observations and apply stateful core (e.g. recurrent, transformer).
embeddings, core_state = mpo_networks.torso.apply(params.torso,
observations,
state.core_state)
# Get the action distribution for these observations.
policy = mpo_networks.policy_head_apply(params, embeddings)
actions = policy.sample(seed=key) if stochastic else policy.mode()
return actions, ActorState(
key=next_key,
core_state=core_state,
prev_core_state=state.core_state,
# Compute log-probabilities for use in off-policy correction schemes.
log_prob=policy.log_prob(actions) if store_log_prob else ())
def get_extras(state: ActorState) -> Mapping[str, jnp.ndarray]:
extras = {}
if store_core_state:
extras['core_state'] = state.prev_core_state
if store_log_prob:
extras['log_prob'] = state.log_prob
return extras # pytype: disable=bad-return-type # jax-ndarray
return actor_core_lib.ActorCore(
init=init, select_action=select_action, get_extras=get_extras)
|
acme-master
|
acme/agents/jax/mpo/acting.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the MPO loss for a discrete (categorical) policy.
The MPO loss uses CategoricalMPOParams, which can be initialized using
init_params, to track the temperature and the dual variables.
Tensor shapes are annotated, where helpful, as follow:
B: batch size,
D: dimensionality of the action space.
"""
from typing import NamedTuple, Tuple
import distrax
import jax
import jax.numpy as jnp
_MPO_FLOAT_EPSILON = 1e-8
_MIN_LOG_TEMPERATURE = -18.0
_MIN_LOG_ALPHA = -18.0
DType = type(jnp.float32) # _ScalarMeta, a private type.
class CategoricalMPOParams(NamedTuple):
"""NamedTuple to store trainable loss parameters."""
log_temperature: jnp.ndarray
log_alpha: jnp.ndarray
class CategoricalMPOStats(NamedTuple):
"""NamedTuple to store loss statistics."""
dual_alpha: float
dual_temperature: float
loss_e_step: float
loss_m_step: float
loss_dual: float
loss_policy: float
loss_alpha: float
loss_temperature: float
kl_q_rel: float
kl_mean_rel: float
q_min: float
q_max: float
entropy_online: float
entropy_target: float
class CategoricalMPO:
"""MPO loss for a categorical policy (Abdolmaleki et al., 2018).
(Abdolmaleki et al., 2018): https://arxiv.org/pdf/1812.02256.pdf
"""
def __init__(self,
epsilon: float,
epsilon_policy: float,
init_log_temperature: float,
init_log_alpha: float):
"""Initializes the MPO loss for discrete (categorical) policies.
Args:
epsilon: KL constraint on the non-parametric auxiliary policy, the one
associated with the dual variable called temperature.
epsilon_policy: KL constraint on the categorical policy, the one
associated with the dual variable called alpha.
init_log_temperature: initial value for the temperature in log-space, note
a softplus (rather than an exp) will be used to transform this.
init_log_alpha: initial value for alpha in log-space. Note that a softplus
(rather than an exp) will be used to transform this.
"""
# MPO constraint thresholds.
self._epsilon = epsilon
self._epsilon_policy = epsilon_policy
# Initial values for the constraints' dual variables.
self._init_log_temperature = init_log_temperature
self._init_log_alpha = init_log_alpha
def init_params(self, action_dim: int, dtype: DType = jnp.float32):
"""Creates an initial set of parameters."""
del action_dim # Unused.
return CategoricalMPOParams(
log_temperature=jnp.full([1], self._init_log_temperature, dtype=dtype),
log_alpha=jnp.full([1], self._init_log_alpha, dtype=dtype))
def __call__(
self,
params: CategoricalMPOParams,
online_action_distribution: distrax.Categorical,
target_action_distribution: distrax.Categorical,
actions: jnp.ndarray, # Unused.
q_values: jnp.ndarray, # Shape [D, B].
) -> Tuple[jnp.ndarray, CategoricalMPOStats]:
"""Computes the MPO loss for a categorical policy.
Args:
params: parameters tracking the temperature and the dual variables.
online_action_distribution: online distribution returned by the online
policy network; expects batch_dims of [B] and event_dims of [D].
target_action_distribution: target distribution returned by the target
policy network; expects same shapes as online distribution.
actions: Unused.
q_values: Q-values associated with every action; expects shape [D, B].
Returns:
Loss, combining the policy loss, KL penalty, and dual losses required to
adapt the dual variables.
Stats, for diagnostics and tracking performance.
"""
q_values = jnp.transpose(q_values) # [D, B] --> [B, D].
# Transform dual variables from log-space.
# Note: using softplus instead of exponential for numerical stability.
temperature = get_temperature_from_params(params)
alpha = jax.nn.softplus(params.log_alpha) + _MPO_FLOAT_EPSILON
# Compute the E-step logits and the temperature loss, used to adapt the
# tempering of Q-values.
logits_e_step, loss_temperature = compute_weights_and_temperature_loss( # pytype: disable=wrong-arg-types # jax-ndarray
q_values=q_values, logits=target_action_distribution.logits,
epsilon=self._epsilon, temperature=temperature)
action_distribution_e_step = distrax.Categorical(logits=logits_e_step)
# Only needed for diagnostics: Compute estimated actualized KL between the
# non-parametric and current target policies.
kl_nonparametric = action_distribution_e_step.kl_divergence(
target_action_distribution)
# Compute the policy loss.
loss_policy = action_distribution_e_step.cross_entropy(
online_action_distribution)
loss_policy = jnp.mean(loss_policy)
# Compute the regularization.
kl = target_action_distribution.kl_divergence(online_action_distribution)
mean_kl = jnp.mean(kl, axis=0)
loss_kl = jax.lax.stop_gradient(alpha) * mean_kl
# Compute the dual loss.
loss_alpha = alpha * (self._epsilon_policy - jax.lax.stop_gradient(mean_kl))
# Combine losses.
loss_dual = loss_alpha + loss_temperature
loss = loss_policy + loss_kl + loss_dual
# Create statistics.
stats = CategoricalMPOStats(
# Dual Variables.
dual_alpha=jnp.mean(alpha),
dual_temperature=jnp.mean(temperature),
# Losses.
loss_e_step=loss_policy,
loss_m_step=loss_kl,
loss_dual=loss_dual,
loss_policy=jnp.mean(loss),
loss_alpha=jnp.mean(loss_alpha),
loss_temperature=jnp.mean(loss_temperature),
# KL measurements.
kl_q_rel=jnp.mean(kl_nonparametric) / self._epsilon,
kl_mean_rel=mean_kl / self._epsilon_policy,
# Q measurements.
q_min=jnp.mean(jnp.min(q_values, axis=0)),
q_max=jnp.mean(jnp.max(q_values, axis=0)),
entropy_online=jnp.mean(online_action_distribution.entropy()),
entropy_target=jnp.mean(target_action_distribution.entropy())
)
return loss, stats
def compute_weights_and_temperature_loss(
q_values: jnp.ndarray,
logits: jnp.ndarray,
epsilon: float,
temperature: jnp.ndarray,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Computes normalized importance weights for the policy optimization.
Args:
q_values: Q-values associated with the actions sampled from the target
policy; expected shape [B, D].
logits: Parameters to the categorical distribution with respect to which the
expectations are going to be computed.
epsilon: Desired constraint on the KL between the target and non-parametric
policies.
temperature: Scalar used to temper the Q-values before computing normalized
importance weights from them. This is really the Lagrange dual variable in
the constrained optimization problem, the solution of which is the
non-parametric policy targeted by the policy loss.
Returns:
Normalized importance weights, used for policy optimization.
Temperature loss, used to adapt the temperature.
"""
# Temper the given Q-values using the current temperature.
tempered_q_values = jax.lax.stop_gradient(q_values) / temperature
# Compute the E-step normalized logits.
unnormalized_logits = tempered_q_values + jax.nn.log_softmax(logits, axis=-1)
logits_e_step = jax.nn.log_softmax(unnormalized_logits, axis=-1)
# Compute the temperature loss (dual of the E-step optimization problem).
# Note that the log normalizer will be the same for all actions, so we choose
# only the first one.
log_normalizer = unnormalized_logits[:, 0] - logits_e_step[:, 0]
loss_temperature = temperature * (epsilon + jnp.mean(log_normalizer))
return logits_e_step, loss_temperature
def clip_categorical_mpo_params(
params: CategoricalMPOParams) -> CategoricalMPOParams:
return params._replace(
log_temperature=jnp.maximum(_MIN_LOG_TEMPERATURE, params.log_temperature),
log_alpha=jnp.maximum(_MIN_LOG_ALPHA, params.log_alpha))
def get_temperature_from_params(params: CategoricalMPOParams) -> float:
return jax.nn.softplus(params.log_temperature) + _MPO_FLOAT_EPSILON
|
acme-master
|
acme/agents/jax/mpo/categorical_mpo.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ARS config."""
import dataclasses
from acme.adders import reverb as adders_reverb
@dataclasses.dataclass
class ARSConfig:
"""Configuration options for ARS."""
num_steps: int = 1000000
normalize_observations: bool = True
step_size: float = 0.015
num_directions: int = 60
exploration_noise_std: float = 0.025
top_directions: int = 20
reward_shift: float = 1.0
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
|
acme-master
|
acme/agents/jax/ars/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ARS agent."""
from acme.agents.jax.ars.builder import ARSBuilder
from acme.agents.jax.ars.config import ARSConfig
from acme.agents.jax.ars.networks import make_networks
from acme.agents.jax.ars.networks import make_policy_network
|
acme-master
|
acme/agents/jax/ars/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ARS Builder."""
from typing import Dict, Iterator, List, Optional, Tuple
import acme
from acme import adders
from acme import core
from acme import specs
from acme.adders import reverb as adders_reverb
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.ars import config as ars_config
from acme.agents.jax.ars import learning
from acme.jax import networks as networks_lib
from acme.jax import running_statistics
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import numpy as np
import reverb
def get_policy(policy_network: networks_lib.FeedForwardNetwork,
normalization_apply_fn) -> actor_core_lib.FeedForwardPolicy:
"""Returns a function that computes actions."""
def apply(
params: networks_lib.Params, key: networks_lib.PRNGKey,
obs: networks_lib.Observation
) -> Tuple[networks_lib.Action, Dict[str, jnp.ndarray]]:
del key
params_key, policy_params, normalization_params = params
normalized_obs = normalization_apply_fn(obs, normalization_params)
action = policy_network.apply(policy_params, normalized_obs)
return action, {
'params_key':
jax.tree_map(lambda x: jnp.expand_dims(x, axis=0), params_key)
}
return apply
class ARSBuilder(
builders.ActorLearnerBuilder[networks_lib.FeedForwardNetwork,
Tuple[str, networks_lib.FeedForwardNetwork],
reverb.ReplaySample]):
"""ARS Builder."""
def __init__(
self,
config: ars_config.ARSConfig,
spec: specs.EnvironmentSpec,
):
self._config = config
self._spec = spec
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: networks_lib.FeedForwardNetwork,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec, replay_client
return learning.ARSLearner(self._spec, networks, random_key, self._config,
dataset, counter, logger_fn('learner'))
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: Tuple[str, networks_lib.FeedForwardNetwork],
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> acme.Actor:
del environment_spec
assert variable_source is not None
kname, policy = policy
normalization_apply_fn = (
running_statistics.normalize if self._config.normalize_observations else
(lambda a, b: a))
policy_to_run = get_policy(policy, normalization_apply_fn)
actor_core = actor_core_lib.batched_feed_forward_with_extras_to_actor_core(
policy_to_run)
variable_client = variable_utils.VariableClient(variable_source, kname,
device='cpu')
return actors.GenericActor(
actor_core,
random_key,
variable_client,
adder,
backend='cpu',
per_episode_update=True)
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: Tuple[str, networks_lib.FeedForwardNetwork],
) -> List[reverb.Table]:
"""Create tables to insert data into."""
del policy
extra_spec = {
'params_key': (np.zeros(shape=(), dtype=np.int32),
np.zeros(shape=(), dtype=np.int32),
np.zeros(shape=(), dtype=np.bool_)),
}
signature = adders_reverb.EpisodeAdder.signature(
environment_spec, sequence_length=None, extras_spec=extra_spec)
return [
reverb.Table.queue(
name=self._config.replay_table_name,
max_size=10000, # a big number
signature=signature)
]
def make_dataset_iterator(
self, replay_client: reverb.Client) -> Iterator[reverb.ReplaySample]:
"""Create a dataset iterator to use for learning/updating the agent."""
dataset = reverb.TrajectoryDataset.from_table_signature(
server_address=replay_client.server_address,
table=self._config.replay_table_name,
max_in_flight_samples_per_worker=1)
return utils.device_put(dataset.as_numpy_iterator(), jax.devices()[0])
def make_adder(
self, replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[Tuple[str, networks_lib.FeedForwardNetwork]]
) -> Optional[adders.Adder]:
"""Create an adder which records data generated by the actor/environment."""
del environment_spec, policy
return adders_reverb.EpisodeAdder(
priority_fns={self._config.replay_table_name: None},
client=replay_client,
max_sequence_length=2000,
)
|
acme-master
|
acme/agents/jax/ars/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ARS networks definition."""
from typing import Tuple
from acme import specs
from acme.jax import networks as networks_lib
import jax.numpy as jnp
BEHAVIOR_PARAMS_NAME = 'policy'
EVAL_PARAMS_NAME = 'eval'
def make_networks(
spec: specs.EnvironmentSpec) -> networks_lib.FeedForwardNetwork:
"""Creates networks used by the agent.
The model used by the ARS paper is a simple clipped linear model.
Args:
spec: an environment spec
Returns:
A FeedForwardNetwork network.
"""
obs_size = spec.observations.shape[0]
act_size = spec.actions.shape[0]
return networks_lib.FeedForwardNetwork(
init=lambda _: jnp.zeros((obs_size, act_size)),
apply=lambda matrix, obs: jnp.clip(jnp.matmul(obs, matrix), -1, 1))
def make_policy_network(
network: networks_lib.FeedForwardNetwork,
eval_mode: bool = True) -> Tuple[str, networks_lib.FeedForwardNetwork]:
params_name = EVAL_PARAMS_NAME if eval_mode else BEHAVIOR_PARAMS_NAME
return (params_name, network)
|
acme-master
|
acme/agents/jax/ars/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ARS learner implementation."""
import collections
import threading
import time
from typing import Any, Deque, Dict, Iterator, List, NamedTuple, Optional
import acme
from acme import specs
from acme.adders import reverb as acme_reverb
from acme.agents.jax.ars import config as ars_config
from acme.agents.jax.ars import networks as ars_networks
from acme.jax import networks as networks_lib
from acme.jax import running_statistics
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import jax
import numpy as np
import reverb
class PerturbationKey(NamedTuple):
training_iteration: int
perturbation_id: int
is_opposite: bool
class EvaluationResult(NamedTuple):
total_reward: float
observation: networks_lib.Observation
class EvaluationRequest(NamedTuple):
key: PerturbationKey
policy_params: networks_lib.Params
normalization_params: networks_lib.Params
class TrainingState(NamedTuple):
"""Contains training state for the learner."""
key: networks_lib.PRNGKey
normalizer_params: networks_lib.Params
policy_params: networks_lib.Params
training_iteration: int
class EvaluationState(NamedTuple):
"""Contains training state for the learner."""
key: networks_lib.PRNGKey
evaluation_queue: Deque[EvaluationRequest]
received_results: Dict[PerturbationKey, EvaluationResult]
noises: List[networks_lib.Params]
class ARSLearner(acme.Learner):
"""ARS learner."""
_state: TrainingState
def __init__(
self,
spec: specs.EnvironmentSpec,
networks: networks_lib.FeedForwardNetwork,
rng: networks_lib.PRNGKey,
config: ars_config.ARSConfig,
iterator: Iterator[reverb.ReplaySample],
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None):
self._config = config
self._lock = threading.Lock()
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
steps_key=self._counter.get_steps_key())
# Iterator on demonstration transitions.
self._iterator = iterator
if self._config.normalize_observations:
normalizer_params = running_statistics.init_state(spec.observations)
self._normalizer_update_fn = running_statistics.update
else:
normalizer_params = ()
self._normalizer_update_fn = lambda a, b: a
rng1, rng2, tmp = jax.random.split(rng, 3)
# Create initial state.
self._training_state = TrainingState(
key=rng1,
policy_params=networks.init(tmp),
normalizer_params=normalizer_params,
training_iteration=0)
self._evaluation_state = EvaluationState(
key=rng2,
evaluation_queue=collections.deque(),
received_results={},
noises=[])
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
def _generate_perturbations(self):
with self._lock:
rng, noise_key = jax.random.split(self._evaluation_state.key)
self._evaluation_state = EvaluationState(
key=rng,
evaluation_queue=collections.deque(),
received_results={},
noises=[])
all_noise = jax.random.normal(
noise_key,
shape=(self._config.num_directions,) +
self._training_state.policy_params.shape,
dtype=self._training_state.policy_params.dtype)
for i in range(self._config.num_directions):
noise = all_noise[i]
self._evaluation_state.noises.append(noise)
for direction in (-1, 1):
self._evaluation_state.evaluation_queue.append(
EvaluationRequest(
PerturbationKey(self._training_state.training_iteration, i,
direction == -1),
self._training_state.policy_params +
direction * noise * self._config.exploration_noise_std,
self._training_state.normalizer_params))
def _read_results(self):
while len(self._evaluation_state.received_results
) != self._config.num_directions * 2:
data = next(self._iterator).data
data = acme_reverb.Step(*data)
# validation
params_key = data.extras['params_key']
training_step, perturbation_id, is_opposite = params_key
# If the incoming data does not correspond to the current iteration,
# we simply ignore it.
if not np.all(
training_step[:-1] == self._training_state.training_iteration):
continue
# The whole episode should be run with the same policy, so let's check
# for that.
assert np.all(perturbation_id[:-1] == perturbation_id[0])
assert np.all(is_opposite[:-1] == is_opposite[0])
perturbation_id = perturbation_id[0].item()
is_opposite = is_opposite[0].item()
total_reward = np.sum(data.reward - self._config.reward_shift)
k = PerturbationKey(self._training_state.training_iteration,
perturbation_id, is_opposite)
if k in self._evaluation_state.received_results:
continue
self._evaluation_state.received_results[k] = EvaluationResult(
total_reward, data.observation)
def _update_model(self) -> int:
# Update normalization params.
real_actor_steps = 0
normalizer_params = self._training_state.normalizer_params
for _, value in self._evaluation_state.received_results.items():
real_actor_steps += value.observation.shape[0] - 1
normalizer_params = self._normalizer_update_fn(normalizer_params,
value.observation)
# Keep only top directions.
top_directions = []
for i in range(self._config.num_directions):
reward_forward = self._evaluation_state.received_results[PerturbationKey(
self._training_state.training_iteration, i, False)].total_reward
reward_reverse = self._evaluation_state.received_results[PerturbationKey(
self._training_state.training_iteration, i, True)].total_reward
top_directions.append((max(reward_forward, reward_reverse), i))
top_directions.sort()
top_directions = top_directions[-self._config.top_directions:]
# Compute reward_std.
reward = []
for _, i in top_directions:
reward.append(self._evaluation_state.received_results[PerturbationKey(
self._training_state.training_iteration, i, False)].total_reward)
reward.append(self._evaluation_state.received_results[PerturbationKey(
self._training_state.training_iteration, i, True)].total_reward)
reward_std = np.std(reward)
# Compute new policy params.
policy_params = self._training_state.policy_params
curr_sum = np.zeros_like(policy_params)
for _, i in top_directions:
reward_forward = self._evaluation_state.received_results[PerturbationKey(
self._training_state.training_iteration, i, False)].total_reward
reward_reverse = self._evaluation_state.received_results[PerturbationKey(
self._training_state.training_iteration, i, True)].total_reward
curr_sum += self._evaluation_state.noises[i] * (
reward_forward - reward_reverse)
policy_params = policy_params + self._config.step_size / (
self._config.top_directions * reward_std) * curr_sum
self._training_state = TrainingState(
key=self._training_state.key,
normalizer_params=normalizer_params,
policy_params=policy_params,
training_iteration=self._training_state.training_iteration)
return real_actor_steps
def step(self):
self._training_state = self._training_state._replace(
training_iteration=self._training_state.training_iteration + 1)
self._generate_perturbations()
self._read_results()
real_actor_steps = self._update_model()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(
steps=1,
real_actor_steps=real_actor_steps,
learner_episodes=2 * self._config.num_directions,
walltime=elapsed_time)
# Attempts to write the logs.
self._logger.write(counts)
def get_variables(self, names: List[str]) -> List[Any]:
assert (names == [ars_networks.BEHAVIOR_PARAMS_NAME] or
names == [ars_networks.EVAL_PARAMS_NAME])
if names == [ars_networks.EVAL_PARAMS_NAME]:
return [PerturbationKey(-1, -1, False),
self._training_state.policy_params,
self._training_state.normalizer_params]
should_sleep = False
while True:
if should_sleep:
time.sleep(0.1)
should_sleep = False
with self._lock:
if not self._evaluation_state.evaluation_queue:
should_sleep = True
continue
data = self._evaluation_state.evaluation_queue.pop()
# If this perturbation was already evaluated, we simply skip it.
if data.key in self._evaluation_state.received_results:
continue
# In case if an actor fails we still need to reevaluate the same
# perturbation, so we just add it to the end of the queue.
self._evaluation_state.evaluation_queue.append(data)
return [data]
def save(self) -> TrainingState:
return self._training_state
def restore(self, state: TrainingState):
self._training_state = state
|
acme-master
|
acme/agents/jax/ars/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config classes for CQL."""
import dataclasses
from typing import Optional
@dataclasses.dataclass
class CQLConfig:
"""Configuration options for CQL.
Attributes:
batch_size: batch size.
policy_learning_rate: learning rate for the policy optimizer.
critic_learning_rate: learning rate for the Q-function optimizer.
tau: Target smoothing coefficient.
fixed_cql_coefficient: the value for cql coefficient. If None an adaptive
coefficient will be used.
cql_lagrange_threshold: a threshold that controls the adaptive loss for the
cql coefficient.
cql_num_samples: number of samples used to compute logsumexp(Q) via
importance sampling.
num_sgd_steps_per_step: how many gradient updates to perform per batch.
Batch is split into this many smaller batches thus should be a multiple of
num_sgd_steps_per_step
reward_scale: reward scale.
discount: discount to use for TD updates.
fixed_entropy_coefficient: coefficient applied to the entropy bonus. If None
an adaptative coefficient will be used.
target_entropy: target entropy when using adapdative entropy bonus.
num_bc_iters: number of BC steps for actor initialization.
"""
batch_size: int = 256
policy_learning_rate: float = 3e-5
critic_learning_rate: float = 3e-4
fixed_cql_coefficient: float = 5.
tau: float = 0.005
fixed_cql_coefficient: Optional[float] = 5.
cql_lagrange_threshold: Optional[float] = None
cql_num_samples: int = 10
num_sgd_steps_per_step: int = 1
reward_scale: float = 1.0
discount: float = 0.99
fixed_entropy_coefficient: Optional[float] = 0.
target_entropy: Optional[float] = 0
num_bc_iters: int = 50_000
|
acme-master
|
acme/agents/jax/cql/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the CQL agent."""
from acme.agents.jax.cql.builder import CQLBuilder
from acme.agents.jax.cql.config import CQLConfig
from acme.agents.jax.cql.learning import CQLLearner
from acme.agents.jax.cql.networks import CQLNetworks
from acme.agents.jax.cql.networks import make_networks
|
acme-master
|
acme/agents/jax/cql/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CQL Builder."""
from typing import Iterator, Optional
from acme import core
from acme import specs
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.cql import config as cql_config
from acme.agents.jax.cql import learning
from acme.agents.jax.cql import networks as cql_networks
from acme.jax import networks as networks_lib
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import optax
class CQLBuilder(builders.OfflineBuilder[cql_networks.CQLNetworks,
actor_core_lib.FeedForwardPolicy,
types.Transition]):
"""CQL Builder."""
def __init__(
self,
config: cql_config.CQLConfig,
):
"""Creates a CQL learner, an evaluation policy and an eval actor.
Args:
config: a config with CQL hps.
"""
self._config = config
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: cql_networks.CQLNetworks,
dataset: Iterator[types.Transition],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
*,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec
return learning.CQLLearner(
batch_size=self._config.batch_size,
networks=networks,
random_key=random_key,
demonstrations=dataset,
policy_optimizer=optax.adam(self._config.policy_learning_rate),
critic_optimizer=optax.adam(self._config.critic_learning_rate),
tau=self._config.tau,
fixed_cql_coefficient=self._config.fixed_cql_coefficient,
cql_lagrange_threshold=self._config.cql_lagrange_threshold,
cql_num_samples=self._config.cql_num_samples,
num_sgd_steps_per_step=self._config.num_sgd_steps_per_step,
reward_scale=self._config.reward_scale,
discount=self._config.discount,
fixed_entropy_coefficient=self._config.fixed_entropy_coefficient,
target_entropy=self._config.target_entropy,
num_bc_iters=self._config.num_bc_iters,
logger=logger_fn('learner'),
counter=counter)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: actor_core_lib.FeedForwardPolicy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
) -> core.Actor:
del environment_spec
assert variable_source is not None
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(policy)
variable_client = variable_utils.VariableClient(
variable_source, 'policy', device='cpu')
return actors.GenericActor(
actor_core, random_key, variable_client, backend='cpu')
def make_policy(self, networks: cql_networks.CQLNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool) -> actor_core_lib.FeedForwardPolicy:
"""Construct the policy."""
del environment_spec, evaluation
def evaluation_policy(
params: networks_lib.Params, key: networks_lib.PRNGKey,
observation: networks_lib.Observation) -> networks_lib.Action:
dist_params = networks.policy_network.apply(params, observation)
return networks.sample_eval(dist_params, key)
return evaluation_policy
|
acme-master
|
acme/agents/jax/cql/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks definitions for the CQL agent."""
import dataclasses
from typing import Optional, Tuple
from acme import specs
from acme.agents.jax import sac
from acme.jax import networks as networks_lib
import jax
import jax.numpy as jnp
@dataclasses.dataclass
class CQLNetworks:
"""Network and pure functions for the CQL agent."""
policy_network: networks_lib.FeedForwardNetwork
critic_network: networks_lib.FeedForwardNetwork
log_prob: networks_lib.LogProbFn
sample: Optional[networks_lib.SampleFn]
sample_eval: Optional[networks_lib.SampleFn]
environment_specs: specs.EnvironmentSpec
def apply_and_sample_n(key: networks_lib.PRNGKey,
networks: CQLNetworks,
params: networks_lib.Params, obs: jnp.ndarray,
num_samples: int) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Applies the policy and samples num_samples actions."""
dist_params = networks.policy_network.apply(params, obs)
sampled_actions = jnp.array([
networks.sample(dist_params, key_n)
for key_n in jax.random.split(key, num_samples)
])
sampled_log_probs = networks.log_prob(dist_params, sampled_actions)
return sampled_actions, sampled_log_probs
def make_networks(
spec: specs.EnvironmentSpec, **kwargs) -> CQLNetworks:
sac_networks = sac.make_networks(spec, **kwargs)
return CQLNetworks(
policy_network=sac_networks.policy_network,
critic_network=sac_networks.q_network,
log_prob=sac_networks.log_prob,
sample=sac_networks.sample,
sample_eval=sac_networks.sample_eval,
environment_specs=spec)
|
acme-master
|
acme/agents/jax/cql/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the CQL agent."""
from acme import specs
from acme.agents.jax import cql
from acme.testing import fakes
import jax
import optax
from absl.testing import absltest
class CQLTest(absltest.TestCase):
def test_train(self):
seed = 0
num_iterations = 6
batch_size = 64
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(
episode_length=10, bounded=True, action_dim=6)
spec = specs.make_environment_spec(environment)
# Construct the agent.
networks = cql.make_networks(
spec, hidden_layer_sizes=(8, 8))
dataset = fakes.transition_iterator(environment)
key = jax.random.PRNGKey(seed)
learner = cql.CQLLearner(
batch_size,
networks,
key,
demonstrations=dataset(batch_size),
policy_optimizer=optax.adam(3e-5),
critic_optimizer=optax.adam(3e-4),
fixed_cql_coefficient=5.,
cql_lagrange_threshold=None,
target_entropy=0.1,
num_bc_iters=2,
num_sgd_steps_per_step=1)
# Train the agent
for _ in range(num_iterations):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/cql/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CQL learner implementation."""
import time
from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Tuple
import acme
from acme import types
from acme.agents.jax.cql.networks import apply_and_sample_n
from acme.agents.jax.cql.networks import CQLNetworks
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
_CQL_COEFFICIENT_MAX_VALUE = 1E6
_CQL_GRAD_CLIPPING_VALUE = 40
class TrainingState(NamedTuple):
"""Contains training state for the learner."""
policy_optimizer_state: optax.OptState
critic_optimizer_state: optax.OptState
policy_params: networks_lib.Params
critic_params: networks_lib.Params
target_critic_params: networks_lib.Params
key: networks_lib.PRNGKey
# Optimizer and value of the alpha parameter from SAC (entropy temperature).
# These fields are only used with an adaptive coefficient (when
# fixed_entropy_coefficeint is None in the CQLLearner)
alpha_optimizer_state: Optional[optax.OptState] = None
log_sac_alpha: Optional[networks_lib.Params] = None
# Optimizer and value of the alpha parameter from CQL (regularization
# coefficient).
# These fields are only used with an adaptive coefficient (when
# fixed_cql_coefficiennt is None in the CQLLearner)
cql_optimizer_state: Optional[optax.OptState] = None
log_cql_alpha: Optional[networks_lib.Params] = None
steps: int = 0
class CQLLearner(acme.Learner):
"""CQL learner.
Learning component of the Conservative Q-Learning algorithm from
[Kumar et al., 2020] https://arxiv.org/abs/2006.04779.
"""
_state: TrainingState
def __init__(self,
batch_size: int,
networks: CQLNetworks,
random_key: networks_lib.PRNGKey,
demonstrations: Iterator[types.Transition],
policy_optimizer: optax.GradientTransformation,
critic_optimizer: optax.GradientTransformation,
tau: float = 0.005,
fixed_cql_coefficient: Optional[float] = None,
cql_lagrange_threshold: Optional[float] = None,
cql_num_samples: int = 10,
num_sgd_steps_per_step: int = 1,
reward_scale: float = 1.0,
discount: float = 0.99,
fixed_entropy_coefficient: Optional[float] = None,
target_entropy: Optional[float] = 0,
num_bc_iters: int = 50_000,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None):
"""Initializes the CQL learner.
Args:
batch_size: batch size.
networks: CQL networks.
random_key: a key for random number generation.
demonstrations: an iterator over training data.
policy_optimizer: the policy optimizer.
critic_optimizer: the Q-function optimizer.
tau: target smoothing coefficient.
fixed_cql_coefficient: the value for cql coefficient. If None, an adaptive
coefficient will be used.
cql_lagrange_threshold: a threshold that controls the adaptive loss for
the cql coefficient.
cql_num_samples: number of samples used to compute logsumexp(Q) via
importance sampling.
num_sgd_steps_per_step: how many gradient updated to perform per batch.
batch is split into this many smaller batches, thus should be a multiple
of num_sgd_steps_per_step
reward_scale: reward scale.
discount: discount to use for TD updates.
fixed_entropy_coefficient: coefficient applied to the entropy bonus. If
None, an adaptative coefficient will be used.
target_entropy: Target entropy when using adapdative entropy bonus.
num_bc_iters: Number of BC steps for actor initialization.
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
"""
self._num_bc_iters = num_bc_iters
adaptive_entropy_coefficient = fixed_entropy_coefficient is None
action_spec = networks.environment_specs.actions
if adaptive_entropy_coefficient:
# sac_alpha is the temperature parameter that determines the relative
# importance of the entropy term versus the reward.
log_sac_alpha = jnp.asarray(0., dtype=jnp.float32)
alpha_optimizer = optax.adam(learning_rate=3e-4)
alpha_optimizer_state = alpha_optimizer.init(log_sac_alpha)
else:
if target_entropy:
raise ValueError('target_entropy should not be set when '
'fixed_entropy_coefficient is provided')
adaptive_cql_coefficient = fixed_cql_coefficient is None
if adaptive_cql_coefficient:
log_cql_alpha = jnp.asarray(0., dtype=jnp.float32)
cql_optimizer = optax.adam(learning_rate=3e-4)
cql_optimizer_state = cql_optimizer.init(log_cql_alpha)
else:
if cql_lagrange_threshold:
raise ValueError('cql_lagrange_threshold should not be set when '
'fixed_cql_coefficient is provided')
def alpha_loss(log_sac_alpha: jnp.ndarray,
policy_params: networks_lib.Params,
transitions: types.Transition,
key: jnp.ndarray) -> jnp.ndarray:
"""Eq 18 from https://arxiv.org/pdf/1812.05905.pdf."""
dist_params = networks.policy_network.apply(policy_params,
transitions.observation)
action = networks.sample(dist_params, key)
log_prob = networks.log_prob(dist_params, action)
sac_alpha = jnp.exp(log_sac_alpha)
sac_alpha_loss = sac_alpha * jax.lax.stop_gradient(-log_prob -
target_entropy)
return jnp.mean(sac_alpha_loss)
def sac_critic_loss(q_old_action: jnp.ndarray,
policy_params: networks_lib.Params,
target_critic_params: networks_lib.Params,
transitions: types.Transition,
key: networks_lib.PRNGKey) -> jnp.ndarray:
"""Computes the SAC part of the loss."""
next_dist_params = networks.policy_network.apply(
policy_params, transitions.next_observation)
next_action = networks.sample(next_dist_params, key)
next_q = networks.critic_network.apply(target_critic_params,
transitions.next_observation,
next_action)
next_v = jnp.min(next_q, axis=-1)
target_q = jax.lax.stop_gradient(transitions.reward * reward_scale +
transitions.discount * discount * next_v)
return jnp.mean(jnp.square(q_old_action - jnp.expand_dims(target_q, -1)))
def batched_critic(actions: jnp.ndarray, critic_params: networks_lib.Params,
observation: jnp.ndarray) -> jnp.ndarray:
"""Applies the critic network to a batch of sampled actions."""
actions = jax.lax.stop_gradient(actions)
tiled_actions = jnp.reshape(actions, (batch_size * cql_num_samples, -1))
tiled_states = jnp.tile(observation, [cql_num_samples, 1])
tiled_q = networks.critic_network.apply(critic_params, tiled_states,
tiled_actions)
return jnp.reshape(tiled_q, (cql_num_samples, batch_size, -1))
def cql_critic_loss(q_old_action: jnp.ndarray,
critic_params: networks_lib.Params,
policy_params: networks_lib.Params,
transitions: types.Transition,
key: networks_lib.PRNGKey) -> jnp.ndarray:
"""Computes the CQL part of the loss."""
# The CQL part of the loss is
# logsumexp(Q(s,·)) - Q(s,a),
# where s is the currrent state, and a the action in the dataset (so
# Q(s,a) is simply q_old_action.
# We need to estimate logsumexp(Q). This is done with importance sampling
# (IS). This function implements the unlabeled equation page 29, Appx. F,
# in https://arxiv.org/abs/2006.04779.
# Here, IS is done with the uniform distribution and the policy in the
# current state s. In their implementation, the authors also add the
# policy in the transiting state s':
# https://github.com/aviralkumar2907/CQL/blob/master/d4rl/rlkit/torch/sac/cql.py,
# (l. 233-236).
key_policy, key_policy_next, key_uniform = jax.random.split(key, 3)
def sampled_q(obs, key):
actions, log_probs = apply_and_sample_n(
key, networks, policy_params, obs, cql_num_samples)
return batched_critic(actions, critic_params,
transitions.observation) - jax.lax.stop_gradient(
jnp.expand_dims(log_probs, -1))
# Sample wrt policy in s
sampled_q_from_policy = sampled_q(transitions.observation, key_policy)
# Sample wrt policy in s'
sampled_q_from_policy_next = sampled_q(transitions.next_observation,
key_policy_next)
# Sample wrt uniform
actions_uniform = jax.random.uniform(
key_uniform, (cql_num_samples, batch_size) + action_spec.shape,
minval=action_spec.minimum, maxval=action_spec.maximum)
log_prob_uniform = -jnp.sum(
jnp.log(action_spec.maximum - action_spec.minimum))
sampled_q_from_uniform = (
batched_critic(actions_uniform, critic_params,
transitions.observation) - log_prob_uniform)
# Combine the samplings
combined = jnp.concatenate(
(sampled_q_from_uniform, sampled_q_from_policy,
sampled_q_from_policy_next),
axis=0)
lse_q = jax.nn.logsumexp(combined, axis=0, b=1. / (3 * cql_num_samples))
return jnp.mean(lse_q - q_old_action)
def critic_loss(critic_params: networks_lib.Params,
policy_params: networks_lib.Params,
target_critic_params: networks_lib.Params,
cql_alpha: jnp.ndarray, transitions: types.Transition,
key: networks_lib.PRNGKey) -> jnp.ndarray:
"""Computes the full critic loss."""
key_cql, key_sac = jax.random.split(key, 2)
q_old_action = networks.critic_network.apply(critic_params,
transitions.observation,
transitions.action)
cql_loss = cql_critic_loss(q_old_action, critic_params, policy_params,
transitions, key_cql)
sac_loss = sac_critic_loss(q_old_action, policy_params,
target_critic_params, transitions, key_sac)
return cql_alpha * cql_loss + sac_loss
def cql_lagrange_loss(log_cql_alpha: jnp.ndarray,
critic_params: networks_lib.Params,
policy_params: networks_lib.Params,
transitions: types.Transition,
key: jnp.ndarray) -> jnp.ndarray:
"""Computes the loss that optimizes the cql coefficient."""
cql_alpha = jnp.exp(log_cql_alpha)
q_old_action = networks.critic_network.apply(critic_params,
transitions.observation,
transitions.action)
return -cql_alpha * (
cql_critic_loss(q_old_action, critic_params, policy_params,
transitions, key) - cql_lagrange_threshold)
def actor_loss(policy_params: networks_lib.Params,
critic_params: networks_lib.Params, sac_alpha: jnp.ndarray,
transitions: types.Transition, key: jnp.ndarray,
in_initial_bc_iters: bool) -> jnp.ndarray:
"""Computes the loss for the policy."""
dist_params = networks.policy_network.apply(policy_params,
transitions.observation)
if in_initial_bc_iters:
log_prob = networks.log_prob(dist_params, transitions.action)
actor_loss = -jnp.mean(log_prob)
else:
action = networks.sample(dist_params, key)
log_prob = networks.log_prob(dist_params, action)
q_action = networks.critic_network.apply(critic_params,
transitions.observation,
action)
min_q = jnp.min(q_action, axis=-1)
actor_loss = jnp.mean(sac_alpha * log_prob - min_q)
return actor_loss
alpha_grad = jax.value_and_grad(alpha_loss)
cql_lagrange_grad = jax.value_and_grad(cql_lagrange_loss)
critic_grad = jax.value_and_grad(critic_loss)
actor_grad = jax.value_and_grad(actor_loss)
def update_step(
state: TrainingState,
rb_transitions: types.Transition,
in_initial_bc_iters: bool,
) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:
key, key_alpha, key_critic, key_actor = jax.random.split(state.key, 4)
if adaptive_entropy_coefficient:
alpha_loss, alpha_grads = alpha_grad(state.log_sac_alpha,
state.policy_params,
rb_transitions, key_alpha)
sac_alpha = jnp.exp(state.log_sac_alpha)
else:
sac_alpha = fixed_entropy_coefficient
if adaptive_cql_coefficient:
cql_lagrange_loss, cql_lagrange_grads = cql_lagrange_grad(
state.log_cql_alpha, state.critic_params, state.policy_params,
rb_transitions, key_critic)
cql_lagrange_grads = jnp.clip(cql_lagrange_grads,
-_CQL_GRAD_CLIPPING_VALUE,
_CQL_GRAD_CLIPPING_VALUE)
cql_alpha = jnp.exp(state.log_cql_alpha)
cql_alpha = jnp.clip(
cql_alpha, a_min=0., a_max=_CQL_COEFFICIENT_MAX_VALUE)
else:
cql_alpha = fixed_cql_coefficient
critic_loss, critic_grads = critic_grad(state.critic_params,
state.policy_params,
state.target_critic_params,
cql_alpha, rb_transitions,
key_critic)
actor_loss, actor_grads = actor_grad(state.policy_params,
state.critic_params, sac_alpha,
rb_transitions, key_actor,
in_initial_bc_iters)
# Apply policy gradients
actor_update, policy_optimizer_state = policy_optimizer.update(
actor_grads, state.policy_optimizer_state)
policy_params = optax.apply_updates(state.policy_params, actor_update)
# Apply critic gradients
critic_update, critic_optimizer_state = critic_optimizer.update(
critic_grads, state.critic_optimizer_state)
critic_params = optax.apply_updates(state.critic_params, critic_update)
new_target_critic_params = jax.tree_map(
lambda x, y: x * (1 - tau) + y * tau, state.target_critic_params,
critic_params)
metrics = {
'critic_loss': critic_loss,
'actor_loss': actor_loss,
}
new_state = TrainingState(
policy_optimizer_state=policy_optimizer_state,
critic_optimizer_state=critic_optimizer_state,
policy_params=policy_params,
critic_params=critic_params,
target_critic_params=new_target_critic_params,
key=key,
alpha_optimizer_state=state.alpha_optimizer_state,
log_sac_alpha=state.log_sac_alpha,
steps=state.steps + 1,
)
if adaptive_entropy_coefficient and (not in_initial_bc_iters):
# Apply sac_alpha gradients
alpha_update, alpha_optimizer_state = alpha_optimizer.update(
alpha_grads, state.alpha_optimizer_state)
log_sac_alpha = optax.apply_updates(state.log_sac_alpha, alpha_update)
metrics.update({
'alpha_loss': alpha_loss,
'sac_alpha': jnp.exp(log_sac_alpha),
})
new_state = new_state._replace(
alpha_optimizer_state=alpha_optimizer_state,
log_sac_alpha=log_sac_alpha)
else:
metrics['alpha_loss'] = 0.
metrics['sac_alpha'] = fixed_cql_coefficient
if adaptive_cql_coefficient:
# Apply cql coeff gradients
cql_update, cql_optimizer_state = cql_optimizer.update(
cql_lagrange_grads, state.cql_optimizer_state)
log_cql_alpha = optax.apply_updates(state.log_cql_alpha, cql_update)
metrics.update({
'cql_lagrange_loss': cql_lagrange_loss,
'cql_alpha': jnp.exp(log_cql_alpha),
})
new_state = new_state._replace(
cql_optimizer_state=cql_optimizer_state,
log_cql_alpha=log_cql_alpha)
return new_state, metrics
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
steps_key=self._counter.get_steps_key())
# Iterator on demonstration transitions.
self._demonstrations = demonstrations
# Use the JIT compiler.
update_step_in_initial_bc_iters = utils.process_multiple_batches(
lambda x, y: update_step(x, y, True), num_sgd_steps_per_step)
update_step_rest = utils.process_multiple_batches(
lambda x, y: update_step(x, y, False), num_sgd_steps_per_step)
self._update_step_in_initial_bc_iters = jax.jit(
update_step_in_initial_bc_iters)
self._update_step_rest = jax.jit(update_step_rest)
# Create initial state.
key_policy, key_q, training_state_key = jax.random.split(random_key, 3)
del random_key
policy_params = networks.policy_network.init(key_policy)
policy_optimizer_state = policy_optimizer.init(policy_params)
critic_params = networks.critic_network.init(key_q)
critic_optimizer_state = critic_optimizer.init(critic_params)
self._state = TrainingState(
policy_optimizer_state=policy_optimizer_state,
critic_optimizer_state=critic_optimizer_state,
policy_params=policy_params,
critic_params=critic_params,
target_critic_params=critic_params,
key=training_state_key,
steps=0)
if adaptive_entropy_coefficient:
self._state = self._state._replace(
alpha_optimizer_state=alpha_optimizer_state,
log_sac_alpha=log_sac_alpha)
if adaptive_cql_coefficient:
self._state = self._state._replace(
cql_optimizer_state=cql_optimizer_state, log_cql_alpha=log_cql_alpha)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
def step(self):
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
transitions = next(self._demonstrations)
counts = self._counter.get_counts()
if 'learner_steps' not in counts:
cur_step = 0
else:
cur_step = counts['learner_steps']
in_initial_bc_iters = cur_step < self._num_bc_iters
if in_initial_bc_iters:
self._state, metrics = self._update_step_in_initial_bc_iters(
self._state, transitions)
else:
self._state, metrics = self._update_step_rest(self._state, transitions)
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(steps=1, walltime=elapsed_time)
# Attempts to write the logs.
self._logger.write({**metrics, **counts})
def get_variables(self, names: List[str]) -> List[Any]:
variables = {
'policy': self._state.policy_params,
}
return [variables[name] for name in names]
def save(self) -> TrainingState:
return self._state
def restore(self, state: TrainingState):
self._state = state
|
acme-master
|
acme/agents/jax/cql/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PWIL config."""
import dataclasses
from typing import Iterator
from acme import types
@dataclasses.dataclass
class PWILConfig:
"""Configuration options for PWIL.
The default values correspond to the experiment setup from the PWIL
publication http://arxiv.org/abs/2006.04678.
"""
# Number of transitions to fill the replay buffer with for pretraining.
num_transitions_rb: int = 50000
# If False, uses only observations for computing the distance; if True, also
# uses the actions.
use_actions_for_distance: bool = True
# Scaling for the reward function, see equation (6) in
# http://arxiv.org/abs/2006.04678.
alpha: float = 5.
# Controls the kernel size of the reward function, see equation (6)
# in http://arxiv.org/abs/2006.04678.
beta: float = 5.
# When False, uses the reward signal from the dataset during prefilling.
prefill_constant_reward: bool = True
num_sgd_steps_per_step: int = 1
@dataclasses.dataclass
class PWILDemonstrations:
"""Unbatched, unshuffled transitions with approximate episode length."""
demonstrations: Iterator[types.Transition]
episode_length: int
|
acme-master
|
acme/agents/jax/pwil/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rewarder class implementation."""
from typing import Iterator
from acme import types
import jax
import jax.numpy as jnp
import numpy as np
class WassersteinDistanceRewarder:
"""Computes PWIL rewards along a trajectory.
The rewards measure similarity to the demonstration transitions and are based
on a greedy approximation to the Wasserstein distance between trajectories.
"""
def __init__(self,
demonstrations_it: Iterator[types.Transition],
episode_length: int,
use_actions_for_distance: bool = False,
alpha: float = 5.,
beta: float = 5.):
"""Initializes the rewarder.
Args:
demonstrations_it: An iterator over acme.types.Transition.
episode_length: a target episode length (policies will be encouraged by
the imitation reward to have that length).
use_actions_for_distance: whether to use action to compute reward.
alpha: float scaling the reward function.
beta: float controling the kernel size of the reward function.
"""
self._episode_length = episode_length
self._use_actions_for_distance = use_actions_for_distance
self._vectorized_demonstrations = self._vectorize(demonstrations_it)
# Observations and actions are flat.
atom_dims = self._vectorized_demonstrations.shape[1]
self._reward_sigma = beta * self._episode_length / np.sqrt(atom_dims)
self._reward_scale = alpha
self._std = np.std(self._vectorized_demonstrations, axis=0, dtype='float64')
# The std is set to 1 if the observation values are below a threshold.
# This prevents normalizing observation values that are constant (which can
# be problematic with e.g. demonstrations coming from a different version
# of the environment and where the constant values are slightly different).
self._std = (self._std < 1e-6) + self._std
self.expert_atoms = self._vectorized_demonstrations / self._std
self._compute_norm = jax.jit(lambda a, b: jnp.linalg.norm(a - b, axis=1),
device=jax.devices('cpu')[0])
def _vectorize(self,
demonstrations_it: Iterator[types.Transition]) -> np.ndarray:
"""Converts filtered expert demonstrations to numpy array.
Args:
demonstrations_it: list of expert demonstrations
Returns:
numpy array with dimension:
[num_expert_transitions, dim_observation] if not use_actions_for_distance
[num_expert_transitions, (dim_observation + dim_action)] otherwise
"""
if self._use_actions_for_distance:
demonstrations = [
np.concatenate([t.observation, t.action]) for t in demonstrations_it
]
else:
demonstrations = [t.observation for t in demonstrations_it]
return np.array(demonstrations)
def reset(self) -> None:
"""Makes all expert transitions available and initialize weights."""
num_expert_atoms = len(self.expert_atoms)
self._all_expert_weights_zero = False
self.expert_weights = np.ones(num_expert_atoms) / num_expert_atoms
def append_and_compute_reward(self, observation: jnp.ndarray,
action: jnp.ndarray) -> np.float32:
"""Computes reward and updates state, advancing it along a trajectory.
Subsequent calls to append_and_compute_reward assume inputs are subsequent
trajectory points.
Args:
observation: observation on a trajectory, to compare with the expert
demonstration(s).
action: the action following the observation on the trajectory.
Returns:
the reward value: the return contribution from the trajectory point.
"""
# If we run out of demonstrations, penalize further action.
if self._all_expert_weights_zero:
return np.float32(0.)
# Scale observation and action.
if self._use_actions_for_distance:
agent_atom = np.concatenate([observation, action])
else:
agent_atom = observation
agent_atom /= self._std
cost = 0.
# A special marker for records with zero expert weight. Has to be large so
# that argmin will not return it.
DELETED = 1e10 # pylint: disable=invalid-name
# As we match the expert's weights with the agent's weights, we might
# raise an error due to float precision, we substract a small epsilon from
# the agent's weights to prevent that.
weight = 1. / self._episode_length - 1e-6
norms = np.array(self._compute_norm(self.expert_atoms, agent_atom))
# We need to mask out states with zero weight, so that 'argmin' would not
# return them.
adjusted_norms = (1 - np.sign(self.expert_weights)) * DELETED + norms
while weight > 0:
# Get closest expert state action to agent's state action.
argmin = adjusted_norms.argmin()
effective_weight = min(weight, self.expert_weights[argmin])
if adjusted_norms[argmin] >= DELETED:
self._all_expert_weights_zero = True
break
# Update cost and weights.
weight -= effective_weight
self.expert_weights[argmin] -= effective_weight
cost += effective_weight * norms[argmin]
adjusted_norms[argmin] = DELETED
if weight > 0:
# We have a 'partial' cost if we ran out of demonstrations in the reward
# computation loop. We assign a high cost (infinite) in this case which
# makes the reward equal to 0.
reward = np.array(0.)
else:
reward = self._reward_scale * np.exp(-self._reward_sigma * cost)
return reward.astype('float32')
|
acme-master
|
acme/agents/jax/pwil/rewarder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PWIL agent."""
from acme.agents.jax.pwil.builder import PWILBuilder
from acme.agents.jax.pwil.config import PWILConfig
from acme.agents.jax.pwil.config import PWILDemonstrations
|
acme-master
|
acme/agents/jax/pwil/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PWIL agent implementation, using JAX."""
import threading
from typing import Callable, Generic, Iterator, List, Optional, Sequence
from acme import adders
from acme import core
from acme import specs
from acme import types
from acme.agents.jax import builders
from acme.agents.jax.pwil import adder as pwil_adder
from acme.agents.jax.pwil import config as pwil_config
from acme.agents.jax.pwil import rewarder
from acme.jax import networks as networks_lib
from acme.jax.imitation_learning_types import DirectPolicyNetwork, DirectRLNetworks # pylint: disable=g-multiple-import
from acme.jax.types import PRNGKey
from acme.utils import counting
from acme.utils import loggers
import dm_env
import numpy as np
import reverb
def _prefill_with_demonstrations(adder: adders.Adder,
demonstrations: Sequence[types.Transition],
reward: Optional[float],
min_num_transitions: int = 0) -> None:
"""Fill the adder's replay buffer with expert transitions.
Assumes that the demonstrations dataset stores transitions in order.
Args:
adder: the agent which adds the demonstrations.
demonstrations: the expert demonstrations to iterate over.
reward: if non-None, populates the environment reward entry of transitions.
min_num_transitions: the lower bound on transitions processed, the dataset
will be iterated over multiple times if needed. Once at least
min_num_transitions are added, the processing is interrupted at the
nearest episode end.
"""
if not demonstrations:
return
reward = np.float32(reward) if reward is not None else reward
remaining_transitions = min_num_transitions
step_type = None
action = None
ts = dm_env.TimeStep(None, None, None, None) # Unused.
while remaining_transitions > 0:
# In case we share the adder or demonstrations don't end with
# end-of-episode, reset the adder prior to add_first.
adder.reset()
for transition_num, transition in enumerate(demonstrations):
remaining_transitions -= 1
discount = np.float32(1.0)
ts_reward = reward if reward is not None else transition.reward
if step_type == dm_env.StepType.LAST or transition_num == 0:
ts = dm_env.TimeStep(dm_env.StepType.FIRST, ts_reward, discount,
transition.observation)
adder.add_first(ts)
observation = transition.next_observation
action = transition.action
if transition.discount == 0. or transition_num == len(demonstrations) - 1:
step_type = dm_env.StepType.LAST
discount = np.float32(0.0)
else:
step_type = dm_env.StepType.MID
ts = dm_env.TimeStep(step_type, ts_reward, discount, observation)
adder.add(action, ts)
if remaining_transitions <= 0:
# Note: we could check `step_type == dm_env.StepType.LAST` to stop at an
# episode end if possible.
break
# Explicitly finalize the Reverb client writes.
adder.reset()
class PWILBuilder(builders.ActorLearnerBuilder[DirectRLNetworks,
DirectPolicyNetwork,
reverb.ReplaySample],
Generic[DirectRLNetworks, DirectPolicyNetwork]):
"""PWIL Agent builder."""
def __init__(self,
rl_agent: builders.ActorLearnerBuilder[DirectRLNetworks,
DirectPolicyNetwork,
reverb.ReplaySample],
config: pwil_config.PWILConfig,
demonstrations_fn: Callable[[], pwil_config.PWILDemonstrations]):
"""Initialize the agent.
Args:
rl_agent: the standard RL algorithm.
config: PWIL-specific configuration.
demonstrations_fn: A function that returns an iterator over contiguous
demonstration transitions, and the average demonstration episode length.
"""
self._rl_agent = rl_agent
self._config = config
self._demonstrations_fn = demonstrations_fn
super().__init__()
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: DirectRLNetworks,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
return self._rl_agent.make_learner(
random_key=random_key,
networks=networks,
dataset=dataset,
logger_fn=logger_fn,
environment_spec=environment_spec,
replay_client=replay_client,
counter=counter)
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: DirectPolicyNetwork,
) -> List[reverb.Table]:
return self._rl_agent.make_replay_tables(environment_spec, policy)
def make_dataset_iterator( # pytype: disable=signature-mismatch # overriding-return-type-checks
self,
replay_client: reverb.Client) -> Optional[Iterator[reverb.ReplaySample]]:
# make_dataset_iterator is only called once (per learner), to pass the
# iterator to make_learner. By using adders we ensure the transition types
# (e.g. n-step transitions) that the direct RL agent expects.
if self._config.num_transitions_rb > 0:
def prefill_thread():
# Populating the replay buffer with the direct RL agent guarantees that
# a constant reward will be used, not the imitation reward.
prefill_reward = (
self._config.alpha
if self._config.prefill_constant_reward else None)
_prefill_with_demonstrations(
adder=self._rl_agent.make_adder(replay_client, None, None),
demonstrations=list(self._demonstrations_fn().demonstrations),
min_num_transitions=self._config.num_transitions_rb,
reward=prefill_reward)
# Populate the replay buffer in a separate thread, so that the learner
# can sample from the buffer, to avoid blocking on the buffer being full.
threading.Thread(target=prefill_thread, daemon=True).start()
return self._rl_agent.make_dataset_iterator(replay_client)
def make_adder(
self,
replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[DirectPolicyNetwork],
) -> Optional[adders.Adder]:
"""Creates the adder substituting imitation reward."""
pwil_demonstrations = self._demonstrations_fn()
return pwil_adder.PWILAdder(
direct_rl_adder=self._rl_agent.make_adder(replay_client,
environment_spec, policy),
pwil_rewarder=rewarder.WassersteinDistanceRewarder(
demonstrations_it=pwil_demonstrations.demonstrations,
episode_length=pwil_demonstrations.episode_length,
use_actions_for_distance=self._config.use_actions_for_distance,
alpha=self._config.alpha,
beta=self._config.beta))
def make_actor(
self,
random_key: PRNGKey,
policy: DirectPolicyNetwork,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
return self._rl_agent.make_actor(random_key, policy, environment_spec,
variable_source, adder)
def make_policy(self,
networks: DirectRLNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> DirectPolicyNetwork:
return self._rl_agent.make_policy(networks, environment_spec, evaluation)
|
acme-master
|
acme/agents/jax/pwil/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reward-substituting adder wrapper."""
from acme import adders
from acme import types
from acme.agents.jax.pwil import rewarder
import dm_env
class PWILAdder(adders.Adder):
"""Adder wrapper substituting PWIL rewards."""
def __init__(self, direct_rl_adder: adders.Adder,
pwil_rewarder: rewarder.WassersteinDistanceRewarder):
self._adder = direct_rl_adder
self._rewarder = pwil_rewarder
self._latest_observation = None
def add_first(self, timestep: dm_env.TimeStep):
self._rewarder.reset()
self._latest_observation = timestep.observation
self._adder.add_first(timestep)
def add(self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
extras: types.NestedArray = ()):
updated_timestep = next_timestep._replace(
reward=self._rewarder.append_and_compute_reward(
observation=self._latest_observation, action=action))
self._latest_observation = next_timestep.observation
self._adder.add(action, updated_timestep, extras)
def reset(self):
self._latest_observation = None
self._adder.reset()
|
acme-master
|
acme/agents/jax/pwil/adder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config classes for D4PG."""
import dataclasses
from typing import Optional
from acme.adders import reverb as adders_reverb
@dataclasses.dataclass
class D4PGConfig:
"""Configuration options for D4PG."""
sigma: float = 0.3
target_update_period: int = 100
samples_per_insert: Optional[float] = 32.0
# Loss options
n_step: int = 5
discount: float = 0.99
batch_size: int = 256
learning_rate: float = 1e-4
clipping: bool = True
# Replay options
min_replay_size: int = 1000
max_replay_size: int = 1000000
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
prefetch_size: int = 4
# Rate to be used for the SampleToInsertRatio rate limitter tolerance.
# See a formula in make_replay_tables for more details.
samples_per_insert_tolerance_rate: float = 0.1
# How many gradient updates to perform per step.
num_sgd_steps_per_step: int = 1
|
acme-master
|
acme/agents/jax/d4pg/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a D4PG agent."""
from acme.agents.jax.d4pg.builder import D4PGBuilder
from acme.agents.jax.d4pg.config import D4PGConfig
from acme.agents.jax.d4pg.learning import D4PGLearner
from acme.agents.jax.d4pg.networks import D4PGNetworks
from acme.agents.jax.d4pg.networks import get_default_behavior_policy
from acme.agents.jax.d4pg.networks import get_default_eval_policy
from acme.agents.jax.d4pg.networks import make_networks
|
acme-master
|
acme/agents/jax/d4pg/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""D4PG Builder."""
from typing import Iterator, List, Optional
import acme
from acme import adders
from acme import core
from acme import specs
from acme import types
from acme.adders import reverb as adders_reverb
from acme.adders.reverb import base as reverb_base
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.d4pg import config as d4pg_config
from acme.agents.jax.d4pg import learning
from acme.agents.jax.d4pg import networks as d4pg_networks
from acme.datasets import reverb as datasets
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import optax
import reverb
from reverb import rate_limiters
from reverb import structured_writer as sw
import tensorflow as tf
import tree
def _make_adder_config(step_spec: reverb_base.Step, n_step: int,
table: str) -> List[sw.Config]:
return adders_reverb.create_n_step_transition_config(
step_spec=step_spec, n_step=n_step, table=table)
def _as_n_step_transition(flat_trajectory: reverb.ReplaySample,
agent_discount: float) -> reverb.ReplaySample:
"""Compute discounted return and total discount for N-step transitions.
For N greater than 1, transitions are of the form:
(s_t, a_t, r_{t:t+n}, r_{t:t+n}, s_{t+N}, e_t),
where:
s_t = State (observation) at time t.
a_t = Action taken from state s_t.
g = the additional discount, used by the agent to discount future returns.
r_{t:t+n} = A vector of N-step rewards: [r_t r_{t+1} ... r_{t+n}]
d_{t:t+n} = A vector of N-step environment: [d_t d_{t+1} ... d_{t+n}]
For most environments d_i is 1 for all steps except the last,
i.e. it is the episode termination signal.
s_{t+n}: The "arrival" state, i.e. the state at time t+n.
e_t [Optional]: A nested structure of any 'extras' the user wishes to add.
As such postprocessing is necessary to calculate the N-Step discounted return
and the total discount as follows:
(s_t, a_t, R_{t:t+n}, D_{t:t+n}, s_{t+N}, e_t),
where:
R_{t:t+n} = N-step discounted return, i.e. accumulated over N rewards:
R_{t:t+n} := r_t + g * d_t * r_{t+1} + ...
+ g^{n-1} * d_t * ... * d_{t+n-2} * r_{t+n-1}.
D_{t:t+n}: N-step product of agent discounts g_i and environment
"discounts" d_i.
D_{t:t+n} := g^{n-1} * d_{t} * ... * d_{t+n-1},
Args:
flat_trajectory: An trajectory with n-step rewards and discounts to be
process.
agent_discount: An additional discount factor used by the agent to discount
futrue returns.
Returns:
A reverb.ReplaySample with computed discounted return and total discount.
"""
trajectory = flat_trajectory.data
def compute_discount_and_reward(
state: types.NestedTensor,
discount_and_reward: types.NestedTensor) -> types.NestedTensor:
compounded_discount, discounted_reward = state
return (agent_discount * discount_and_reward[0] * compounded_discount,
discounted_reward + discount_and_reward[1] * compounded_discount)
initializer = (tf.constant(1, dtype=tf.float32),
tf.constant(0, dtype=tf.float32))
elems = tf.stack((trajectory.discount, trajectory.reward), axis=-1)
total_discount, n_step_return = tf.scan(
compute_discount_and_reward, elems, initializer, reverse=True)
return reverb.ReplaySample(
info=flat_trajectory.info,
data=types.Transition(
observation=tree.map_structure(lambda x: x[0],
trajectory.observation),
action=tree.map_structure(lambda x: x[0], trajectory.action),
reward=n_step_return[0],
discount=total_discount[0],
next_observation=tree.map_structure(lambda x: x[-1],
trajectory.observation),
extras=tree.map_structure(lambda x: x[0], trajectory.extras)))
class D4PGBuilder(builders.ActorLearnerBuilder[d4pg_networks.D4PGNetworks,
actor_core_lib.ActorCore,
reverb.ReplaySample]):
"""D4PG Builder."""
def __init__(
self,
config: d4pg_config.D4PGConfig,
):
"""Creates a D4PG learner, a behavior policy and an eval actor.
Args:
config: a config with D4PG hps
"""
self._config = config
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: d4pg_networks.D4PGNetworks,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec, replay_client
policy_optimizer = optax.adam(self._config.learning_rate)
critic_optimizer = optax.adam(self._config.learning_rate)
if self._config.clipping:
policy_optimizer = optax.chain(
optax.clip_by_global_norm(40.), policy_optimizer)
critic_optimizer = optax.chain(
optax.clip_by_global_norm(40.), critic_optimizer)
# The learner updates the parameters (and initializes them).
return learning.D4PGLearner(
policy_network=networks.policy_network,
critic_network=networks.critic_network,
random_key=random_key,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
clipping=self._config.clipping,
discount=self._config.discount,
target_update_period=self._config.target_update_period,
iterator=dataset,
counter=counter,
logger=logger_fn('learner'),
num_sgd_steps_per_step=self._config.num_sgd_steps_per_step)
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: actor_core_lib.ActorCore,
) -> List[reverb.Table]:
"""Create tables to insert data into."""
dummy_actor_state = policy.init(jax.random.PRNGKey(0))
extras_spec = policy.get_extras(dummy_actor_state)
step_spec = adders_reverb.create_step_spec(
environment_spec=environment_spec, extras_spec=extras_spec)
# Create the rate limiter.
if self._config.samples_per_insert:
samples_per_insert_tolerance = (
self._config.samples_per_insert_tolerance_rate *
self._config.samples_per_insert)
error_buffer = self._config.min_replay_size * samples_per_insert_tolerance
limiter = rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._config.min_replay_size,
samples_per_insert=self._config.samples_per_insert,
error_buffer=error_buffer)
else:
limiter = rate_limiters.MinSize(self._config.min_replay_size)
return [
reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=limiter,
signature=sw.infer_signature(
configs=_make_adder_config(step_spec, self._config.n_step,
self._config.replay_table_name),
step_spec=step_spec))
]
def make_dataset_iterator(
self,
replay_client: reverb.Client,
) -> Iterator[reverb.ReplaySample]:
"""Create a dataset iterator to use for learning/updating the agent."""
def postprocess(
flat_trajectory: reverb.ReplaySample) -> reverb.ReplaySample:
return _as_n_step_transition(flat_trajectory, self._config.discount)
batch_size_per_device = self._config.batch_size // jax.device_count()
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=replay_client.server_address,
batch_size=batch_size_per_device * self._config.num_sgd_steps_per_step,
prefetch_size=self._config.prefetch_size,
postprocess=postprocess,
)
return utils.multi_device_put(dataset.as_numpy_iterator(),
jax.local_devices())
def make_adder(
self,
replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[actor_core_lib.ActorCore],
) -> Optional[adders.Adder]:
"""Create an adder which records data generated by the actor/environment."""
if environment_spec is None or policy is None:
raise ValueError('`environment_spec` and `policy` cannot be None.')
dummy_actor_state = policy.init(jax.random.PRNGKey(0))
extras_spec = policy.get_extras(dummy_actor_state)
step_spec = adders_reverb.create_step_spec(
environment_spec=environment_spec, extras_spec=extras_spec)
return adders_reverb.StructuredAdder(
client=replay_client,
max_in_flight_items=5,
configs=_make_adder_config(step_spec, self._config.n_step,
self._config.replay_table_name),
step_spec=step_spec)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: actor_core_lib.ActorCore,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> acme.Actor:
del environment_spec
assert variable_source is not None
# Inference happens on CPU, so it's better to move variables there too.
variable_client = variable_utils.VariableClient(
variable_source, 'policy', device='cpu')
return actors.GenericActor(
policy, random_key, variable_client, adder, backend='cpu')
def make_policy(self,
networks: d4pg_networks.D4PGNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> actor_core_lib.ActorCore:
"""Create the policy."""
del environment_spec
if evaluation:
policy = d4pg_networks.get_default_eval_policy(networks)
else:
policy = d4pg_networks.get_default_behavior_policy(networks, self._config)
return actor_core_lib.batched_feed_forward_to_actor_core(policy)
|
acme-master
|
acme/agents/jax/d4pg/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""D4PG networks definition."""
import dataclasses
from typing import Sequence
from acme import specs
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax.d4pg import config as d4pg_config
from acme.jax import networks as networks_lib
from acme.jax import utils
import haiku as hk
import jax.numpy as jnp
import numpy as np
import rlax
@dataclasses.dataclass
class D4PGNetworks:
"""Network and pure functions for the D4PG agent.."""
policy_network: networks_lib.FeedForwardNetwork
critic_network: networks_lib.FeedForwardNetwork
def get_default_behavior_policy(
networks: D4PGNetworks,
config: d4pg_config.D4PGConfig) -> actor_core_lib.FeedForwardPolicy:
"""Selects action according to the training policy."""
def behavior_policy(params: networks_lib.Params, key: networks_lib.PRNGKey,
observation: types.NestedArray):
action = networks.policy_network.apply(params, observation)
if config.sigma != 0:
action = rlax.add_gaussian_noise(key, action, config.sigma)
return action
return behavior_policy
def get_default_eval_policy(
networks: D4PGNetworks) -> actor_core_lib.FeedForwardPolicy:
"""Selects action according to the training policy."""
def behavior_policy(params: networks_lib.Params, key: networks_lib.PRNGKey,
observation: types.NestedArray):
del key
action = networks.policy_network.apply(params, observation)
return action
return behavior_policy
def make_networks(
spec: specs.EnvironmentSpec,
policy_layer_sizes: Sequence[int] = (300, 200),
critic_layer_sizes: Sequence[int] = (400, 300),
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
) -> D4PGNetworks:
"""Creates networks used by the agent."""
action_spec = spec.actions
num_dimensions = np.prod(action_spec.shape, dtype=int)
critic_atoms = jnp.linspace(vmin, vmax, num_atoms)
def _actor_fn(obs):
network = hk.Sequential([
utils.batch_concat,
networks_lib.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks_lib.NearZeroInitializedLinear(num_dimensions),
networks_lib.TanhToSpec(action_spec),
])
return network(obs)
def _critic_fn(obs, action):
network = hk.Sequential([
utils.batch_concat,
networks_lib.LayerNormMLP(layer_sizes=[*critic_layer_sizes, num_atoms]),
])
value = network([obs, action])
return value, critic_atoms
policy = hk.without_apply_rng(hk.transform(_actor_fn))
critic = hk.without_apply_rng(hk.transform(_critic_fn))
# Create dummy observations and actions to create network parameters.
dummy_action = utils.zeros_like(spec.actions)
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.add_batch_dim(dummy_action)
dummy_obs = utils.add_batch_dim(dummy_obs)
return D4PGNetworks(
policy_network=networks_lib.FeedForwardNetwork(
lambda rng: policy.init(rng, dummy_obs), policy.apply),
critic_network=networks_lib.FeedForwardNetwork(
lambda rng: critic.init(rng, dummy_obs, dummy_action), critic.apply))
|
acme-master
|
acme/agents/jax/d4pg/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""D4PG learner implementation."""
import time
from typing import Dict, Iterator, List, NamedTuple, Optional, Tuple
import acme
from acme import types
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
import reverb
import rlax
_PMAP_AXIS_NAME = 'data'
class TrainingState(NamedTuple):
"""Contains training state for the learner."""
policy_params: networks_lib.Params
target_policy_params: networks_lib.Params
critic_params: networks_lib.Params
target_critic_params: networks_lib.Params
policy_opt_state: optax.OptState
critic_opt_state: optax.OptState
steps: int
class D4PGLearner(acme.Learner):
"""D4PG learner.
This is the learning component of a D4PG agent. IE it takes a dataset as input
and implements update functionality to learn from this dataset.
"""
_state: TrainingState
def __init__(self,
policy_network: networks_lib.FeedForwardNetwork,
critic_network: networks_lib.FeedForwardNetwork,
random_key: networks_lib.PRNGKey,
discount: float,
target_update_period: int,
iterator: Iterator[reverb.ReplaySample],
policy_optimizer: Optional[optax.GradientTransformation] = None,
critic_optimizer: Optional[optax.GradientTransformation] = None,
clipping: bool = True,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
jit: bool = True,
num_sgd_steps_per_step: int = 1):
def critic_mean(
critic_params: networks_lib.Params,
observation: types.NestedArray,
action: types.NestedArray,
) -> jnp.ndarray:
# We add batch dimension to make sure batch concat in critic_network
# works correctly.
observation = utils.add_batch_dim(observation)
action = utils.add_batch_dim(action)
# Computes the mean action-value estimate.
logits, atoms = critic_network.apply(critic_params, observation, action)
logits = utils.squeeze_batch_dim(logits)
probabilities = jax.nn.softmax(logits)
return jnp.sum(probabilities * atoms, axis=-1)
def policy_loss(
policy_params: networks_lib.Params,
critic_params: networks_lib.Params,
o_t: types.NestedArray,
) -> jnp.ndarray:
# Computes the discrete policy gradient loss.
dpg_a_t = policy_network.apply(policy_params, o_t)
grad_critic = jax.vmap(
jax.grad(critic_mean, argnums=2), in_axes=(None, 0, 0))
dq_da = grad_critic(critic_params, o_t, dpg_a_t)
dqda_clipping = 1. if clipping else None
batch_dpg_learning = jax.vmap(rlax.dpg_loss, in_axes=(0, 0, None))
loss = batch_dpg_learning(dpg_a_t, dq_da, dqda_clipping)
return jnp.mean(loss)
def critic_loss(
critic_params: networks_lib.Params,
state: TrainingState,
transition: types.Transition,
):
# Computes the distributional critic loss.
q_tm1, atoms_tm1 = critic_network.apply(critic_params,
transition.observation,
transition.action)
a = policy_network.apply(state.target_policy_params,
transition.next_observation)
q_t, atoms_t = critic_network.apply(state.target_critic_params,
transition.next_observation, a)
batch_td_learning = jax.vmap(
rlax.categorical_td_learning, in_axes=(None, 0, 0, 0, None, 0))
loss = batch_td_learning(atoms_tm1, q_tm1, transition.reward,
discount * transition.discount, atoms_t, q_t)
return jnp.mean(loss)
def sgd_step(
state: TrainingState,
transitions: types.Transition,
) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:
# TODO(jaslanides): Use a shared forward pass for efficiency.
policy_loss_and_grad = jax.value_and_grad(policy_loss)
critic_loss_and_grad = jax.value_and_grad(critic_loss)
# Compute losses and their gradients.
policy_loss_value, policy_gradients = policy_loss_and_grad(
state.policy_params, state.critic_params,
transitions.next_observation)
critic_loss_value, critic_gradients = critic_loss_and_grad(
state.critic_params, state, transitions)
# Average over all devices.
policy_loss_value, policy_gradients = jax.lax.pmean(
(policy_loss_value, policy_gradients), _PMAP_AXIS_NAME)
critic_loss_value, critic_gradients = jax.lax.pmean(
(critic_loss_value, critic_gradients), _PMAP_AXIS_NAME)
# Get optimizer updates and state.
policy_updates, policy_opt_state = policy_optimizer.update( # pytype: disable=attribute-error
policy_gradients, state.policy_opt_state)
critic_updates, critic_opt_state = critic_optimizer.update( # pytype: disable=attribute-error
critic_gradients, state.critic_opt_state)
# Apply optimizer updates to parameters.
policy_params = optax.apply_updates(state.policy_params, policy_updates)
critic_params = optax.apply_updates(state.critic_params, critic_updates)
steps = state.steps + 1
# Periodically update target networks.
target_policy_params, target_critic_params = optax.periodic_update( # pytype: disable=wrong-arg-types # numpy-scalars
(policy_params, critic_params),
(state.target_policy_params, state.target_critic_params), steps,
self._target_update_period)
new_state = TrainingState(
policy_params=policy_params,
critic_params=critic_params,
target_policy_params=target_policy_params,
target_critic_params=target_critic_params,
policy_opt_state=policy_opt_state,
critic_opt_state=critic_opt_state,
steps=steps,
)
metrics = {
'policy_loss': policy_loss_value,
'critic_loss': critic_loss_value,
}
return new_state, metrics
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
steps_key=self._counter.get_steps_key())
# Necessary to track when to update target networks.
self._target_update_period = target_update_period
# Create prefetching dataset iterator.
self._iterator = iterator
# Maybe use the JIT compiler.
sgd_step = utils.process_multiple_batches(sgd_step, num_sgd_steps_per_step)
self._sgd_step = (
jax.pmap(sgd_step, _PMAP_AXIS_NAME, devices=jax.devices())
if jit else sgd_step)
# Create the network parameters and copy into the target network parameters.
key_policy, key_critic = jax.random.split(random_key)
initial_policy_params = policy_network.init(key_policy)
initial_critic_params = critic_network.init(key_critic)
initial_target_policy_params = initial_policy_params
initial_target_critic_params = initial_critic_params
# Create optimizers if they aren't given.
critic_optimizer = critic_optimizer or optax.adam(1e-4)
policy_optimizer = policy_optimizer or optax.adam(1e-4)
# Initialize optimizers.
initial_policy_opt_state = policy_optimizer.init(initial_policy_params) # pytype: disable=attribute-error
initial_critic_opt_state = critic_optimizer.init(initial_critic_params) # pytype: disable=attribute-error
# Create the initial state and replicate it in all devices.
self._state = utils.replicate_in_all_devices(
TrainingState(
policy_params=initial_policy_params,
target_policy_params=initial_target_policy_params,
critic_params=initial_critic_params,
target_critic_params=initial_target_critic_params,
policy_opt_state=initial_policy_opt_state,
critic_opt_state=initial_critic_opt_state,
steps=0,
))
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
def step(self):
# Sample from replay and pack the data in a Transition.
sample = next(self._iterator)
transitions = types.Transition(*sample.data)
self._state, metrics = self._sgd_step(self._state, transitions)
# Take the metrics from the first device, since they've been pmeaned over
# all devices and are therefore identical.
metrics = utils.get_from_first_device(metrics)
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(steps=1, walltime=elapsed_time)
# Attempts to write the logs.
self._logger.write({**metrics, **counts})
def get_variables(self, names: List[str]) -> List[networks_lib.Params]:
variables = {
'policy': self._state.target_policy_params,
'critic': self._state.target_critic_params,
}
return utils.get_from_first_device([variables[name] for name in names])
def save(self) -> TrainingState:
return utils.get_from_first_device(self._state)
def restore(self, state: TrainingState):
self._state = utils.replicate_in_all_devices(state)
|
acme-master
|
acme/agents/jax/d4pg/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IMPALA config."""
import dataclasses
from typing import Optional, Union
from acme import types
from acme.adders import reverb as adders_reverb
import numpy as np
import optax
@dataclasses.dataclass
class IMPALAConfig:
"""Configuration options for IMPALA."""
seed: int = 0
discount: float = 0.99
sequence_length: int = 20
sequence_period: Optional[int] = None
variable_update_period: int = 1000
# Optimizer configuration.
batch_size: int = 32
learning_rate: Union[float, optax.Schedule] = 2e-4
adam_momentum_decay: float = 0.0
adam_variance_decay: float = 0.99
adam_eps: float = 1e-8
adam_eps_root: float = 0.0
max_gradient_norm: float = 40.0
# Loss configuration.
baseline_cost: float = 0.5
entropy_cost: float = 0.01
max_abs_reward: float = np.inf
# Replay options
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
num_prefetch_threads: Optional[int] = None
samples_per_insert: Optional[float] = 1.0
max_queue_size: Union[int, types.Batches] = types.Batches(10)
def __post_init__(self):
if isinstance(self.max_queue_size, types.Batches):
self.max_queue_size *= self.batch_size
assert self.max_queue_size > self.batch_size + 1, ("""
max_queue_size must be strictly larger than the batch size:
- during the last step in an episode we might write 2 sequences to
Reverb at once (that's how SequenceAdder works)
- Reverb does insertion/sampling in multiple threads, so data is
added asynchronously at unpredictable times. Therefore we need
additional buffer size in order to avoid deadlocks.""")
|
acme-master
|
acme/agents/jax/impala/config.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.