python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Importance-weighted actor-learner architecture (IMPALA) agent."""
from acme.agents.jax.impala.builder import IMPALABuilder
from acme.agents.jax.impala.config import IMPALAConfig
from acme.agents.jax.impala.learning import IMPALALearner
from acme.agents.jax.impala.networks import IMPALANetworks
from acme.agents.jax.impala.networks import make_atari_networks
|
acme-master
|
acme/agents/jax/impala/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some types/assumptions used in the IMPALA agent."""
from typing import Callable, Tuple
from acme.agents.jax.actor_core import RecurrentState
from acme.jax import networks
from acme.jax import types as jax_types
import jax.numpy as jnp
# Only simple observations & discrete action spaces for now.
Observation = jnp.ndarray
Action = int
Outputs = Tuple[Tuple[networks.Logits, networks.Value], RecurrentState]
PolicyValueInitFn = Callable[[networks.PRNGKey, RecurrentState],
networks.Params]
PolicyValueFn = Callable[[networks.Params, Observation, RecurrentState],
Outputs]
RecurrentStateFn = Callable[[jax_types.PRNGKey], RecurrentState]
|
acme-master
|
acme/agents/jax/impala/types.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IMPALA Builder."""
from typing import Any, Callable, Generic, Iterator, List, Optional
import acme
from acme import adders
from acme import core
from acme import specs
from acme.adders import reverb as reverb_adders
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors as actors_lib
from acme.agents.jax import builders
from acme.agents.jax.impala import acting
from acme.agents.jax.impala import config as impala_config
from acme.agents.jax.impala import learning
from acme.agents.jax.impala import networks as impala_networks
from acme.datasets import reverb as datasets
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import optax
import reverb
class IMPALABuilder(Generic[actor_core_lib.RecurrentState],
builders.ActorLearnerBuilder[impala_networks.IMPALANetworks,
acting.ImpalaPolicy,
reverb.ReplaySample]):
"""IMPALA Builder."""
def __init__(
self,
config: impala_config.IMPALAConfig,
table_extension: Optional[Callable[[], Any]] = None,
):
"""Creates an IMPALA learner."""
self._config = config
self._sequence_length = self._config.sequence_length
self._table_extension = table_extension
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: acting.ImpalaPolicy,
) -> List[reverb.Table]:
"""The queue; use XData or INFO log."""
dummy_actor_state = policy.init(jax.random.PRNGKey(0))
signature = reverb_adders.SequenceAdder.signature(
environment_spec,
policy.get_extras(dummy_actor_state),
sequence_length=self._config.sequence_length)
# Maybe create rate limiter.
# Setting the samples_per_insert ratio less than the default of 1.0, allows
# the agent to drop data for the benefit of using data from most up-to-date
# policies to compute its learner updates.
samples_per_insert = self._config.samples_per_insert
if samples_per_insert:
if samples_per_insert > 1.0 or samples_per_insert <= 0.0:
raise ValueError(
'Impala requires a samples_per_insert ratio in the range (0, 1],'
f' but received {samples_per_insert}.')
limiter = reverb.rate_limiters.SampleToInsertRatio(
samples_per_insert=samples_per_insert,
min_size_to_sample=1,
error_buffer=self._config.batch_size)
else:
limiter = reverb.rate_limiters.MinSize(1)
table_extensions = []
if self._table_extension is not None:
table_extensions = [self._table_extension()]
queue = reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_queue_size,
max_times_sampled=1,
rate_limiter=limiter,
extensions=table_extensions,
signature=signature)
return [queue]
def make_dataset_iterator(
self, replay_client: reverb.Client) -> Iterator[reverb.ReplaySample]:
"""Creates a dataset."""
batch_size_per_learner = self._config.batch_size // jax.process_count()
batch_size_per_device, ragged = divmod(self._config.batch_size,
jax.device_count())
if ragged:
raise ValueError(
'Learner batch size must be divisible by total number of devices!')
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=replay_client.server_address,
batch_size=batch_size_per_device,
num_parallel_calls=None,
max_in_flight_samples_per_worker=2 * batch_size_per_learner)
return utils.multi_device_put(dataset.as_numpy_iterator(),
jax.local_devices())
def make_adder(
self,
replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[acting.ImpalaPolicy],
) -> Optional[adders.Adder]:
"""Creates an adder which handles observations."""
del environment_spec, policy
# Note that the last transition in the sequence is used for bootstrapping
# only and is ignored otherwise. So we need to make sure that sequences
# overlap on one transition, thus "-1" in the period length computation.
return reverb_adders.SequenceAdder(
client=replay_client,
priority_fns={self._config.replay_table_name: None},
period=self._config.sequence_period or (self._sequence_length - 1),
sequence_length=self._sequence_length,
)
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: impala_networks.IMPALANetworks,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec, replay_client
optimizer = optax.chain(
optax.clip_by_global_norm(self._config.max_gradient_norm),
optax.adam(
self._config.learning_rate,
b1=self._config.adam_momentum_decay,
b2=self._config.adam_variance_decay,
eps=self._config.adam_eps,
eps_root=self._config.adam_eps_root))
return learning.IMPALALearner(
networks=networks,
iterator=dataset,
optimizer=optimizer,
random_key=random_key,
discount=self._config.discount,
entropy_cost=self._config.entropy_cost,
baseline_cost=self._config.baseline_cost,
max_abs_reward=self._config.max_abs_reward,
counter=counter,
logger=logger_fn('learner'),
)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: acting.ImpalaPolicy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> acme.Actor:
del environment_spec
variable_client = variable_utils.VariableClient(
client=variable_source,
key='network',
update_period=self._config.variable_update_period)
return actors_lib.GenericActor(policy, random_key, variable_client, adder)
def make_policy(self,
networks: impala_networks.IMPALANetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> acting.ImpalaPolicy:
return acting.get_actor_core(networks, environment_spec, evaluation)
|
acme-master
|
acme/agents/jax/impala/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IMPALA networks definition."""
from acme import specs
from acme.jax import networks as networks_lib
IMPALANetworks = networks_lib.UnrollableNetwork
def make_atari_networks(env_spec: specs.EnvironmentSpec) -> IMPALANetworks:
"""Builds default IMPALA networks for Atari games."""
def make_core_module() -> networks_lib.DeepIMPALAAtariNetwork:
return networks_lib.DeepIMPALAAtariNetwork(env_spec.actions.num_values)
return networks_lib.make_unrollable_network(env_spec, make_core_module)
|
acme-master
|
acme/agents/jax/impala/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learner for the IMPALA actor-critic agent."""
import time
from typing import Dict, Iterator, List, NamedTuple, Optional, Sequence, Tuple
from absl import logging
import acme
from acme.agents.jax.impala import networks as impala_networks
from acme.jax import losses
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import numpy as np
import optax
import reverb
_PMAP_AXIS_NAME = 'data'
class TrainingState(NamedTuple):
"""Training state consists of network parameters and optimiser state."""
params: networks_lib.Params
opt_state: optax.OptState
class IMPALALearner(acme.Learner):
"""Learner for an importanced-weighted advantage actor-critic."""
def __init__(
self,
networks: impala_networks.IMPALANetworks,
iterator: Iterator[reverb.ReplaySample],
optimizer: optax.GradientTransformation,
random_key: networks_lib.PRNGKey,
discount: float = 0.99,
entropy_cost: float = 0.0,
baseline_cost: float = 1.0,
max_abs_reward: float = np.inf,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
devices: Optional[Sequence[jax.Device]] = None,
prefetch_size: int = 2,
):
local_devices = jax.local_devices()
process_id = jax.process_index()
logging.info('Learner process id: %s. Devices passed: %s', process_id,
devices)
logging.info('Learner process id: %s. Local devices from JAX API: %s',
process_id, local_devices)
self._devices = devices or local_devices
self._local_devices = [d for d in self._devices if d in local_devices]
self._iterator = iterator
def unroll_without_rng(
params: networks_lib.Params, observations: networks_lib.Observation,
initial_state: networks_lib.RecurrentState
) -> Tuple[networks_lib.NetworkOutput, networks_lib.RecurrentState]:
unused_rng = jax.random.PRNGKey(0)
return networks.unroll(params, unused_rng, observations, initial_state)
loss_fn = losses.impala_loss(
# TODO(b/244319884): Consider supporting the use of RNG in impala_loss.
unroll_fn=unroll_without_rng,
discount=discount,
max_abs_reward=max_abs_reward,
baseline_cost=baseline_cost,
entropy_cost=entropy_cost)
@jax.jit
def sgd_step(
state: TrainingState, sample: reverb.ReplaySample
) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:
"""Computes an SGD step, returning new state and metrics for logging."""
# Compute gradients.
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss_value, metrics), gradients = grad_fn(state.params, sample)
# Average gradients over pmap replicas before optimizer update.
gradients = jax.lax.pmean(gradients, _PMAP_AXIS_NAME)
# Apply updates.
updates, new_opt_state = optimizer.update(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
metrics.update({
'loss': loss_value,
'param_norm': optax.global_norm(new_params),
'param_updates_norm': optax.global_norm(updates),
})
new_state = TrainingState(params=new_params, opt_state=new_opt_state)
return new_state, metrics
def make_initial_state(key: jnp.ndarray) -> TrainingState:
"""Initialises the training state (parameters and optimiser state)."""
initial_params = networks.init(key)
return TrainingState(
params=initial_params, opt_state=optimizer.init(initial_params))
# Initialise training state (parameters and optimiser state).
state = make_initial_state(random_key)
self._state = utils.replicate_in_all_devices(state, self._local_devices)
self._sgd_step = jax.pmap(
sgd_step, axis_name=_PMAP_AXIS_NAME, devices=self._devices)
# Set up logging/counting.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner', steps_key=self._counter.get_steps_key())
def step(self):
"""Does a step of SGD and logs the results."""
samples = next(self._iterator)
# Do a batch of SGD.
start = time.time()
self._state, results = self._sgd_step(self._state, samples)
# Take results from first replica.
# NOTE: This measure will be a noisy estimate for the purposes of the logs
# as it does not pmean over all devices.
results = utils.get_from_first_device(results)
# Update our counts and record them.
counts = self._counter.increment(steps=1, time_elapsed=time.time() - start)
# Maybe write logs.
self._logger.write({**results, **counts})
def get_variables(self, names: Sequence[str]) -> List[networks_lib.Params]:
# Return first replica of parameters.
return utils.get_from_first_device([self._state.params], as_numpy=False)
def save(self) -> TrainingState:
# Serialize only the first replica of parameters and optimizer state.
return utils.get_from_first_device(self._state)
def restore(self, state: TrainingState):
self._state = utils.replicate_in_all_devices(state, self._local_devices)
|
acme-master
|
acme/agents/jax/impala/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IMPALA actor implementation."""
from typing import Generic, Mapping, Tuple
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax.impala import networks as impala_networks
from acme.jax import networks as networks_lib
from acme.jax import types as jax_types
import chex
import jax
import jax.numpy as jnp
ImpalaExtras = Mapping[str, jnp.ndarray]
@chex.dataclass(frozen=True, mappable_dataclass=False)
class ImpalaActorState(Generic[actor_core_lib.RecurrentState]):
rng: jax_types.PRNGKey
logits: networks_lib.Logits
recurrent_state: actor_core_lib.RecurrentState
prev_recurrent_state: actor_core_lib.RecurrentState
ImpalaPolicy = actor_core_lib.ActorCore[
ImpalaActorState[actor_core_lib.RecurrentState], ImpalaExtras]
def get_actor_core(
networks: impala_networks.IMPALANetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False,
) -> ImpalaPolicy:
"""Creates an Impala ActorCore."""
dummy_logits = jnp.zeros(environment_spec.actions.num_values)
def init(
rng: jax_types.PRNGKey
) -> ImpalaActorState[actor_core_lib.RecurrentState]:
rng, init_state_rng = jax.random.split(rng)
initial_state = networks.init_recurrent_state(init_state_rng, None)
return ImpalaActorState(
rng=rng,
logits=dummy_logits,
recurrent_state=initial_state,
prev_recurrent_state=initial_state)
def select_action(
params: networks_lib.Params,
observation: networks_lib.Observation,
state: ImpalaActorState[actor_core_lib.RecurrentState],
) -> Tuple[networks_lib.Action,
ImpalaActorState[actor_core_lib.RecurrentState]]:
rng, apply_rng, policy_rng = jax.random.split(state.rng, 3)
(logits, _), new_recurrent_state = networks.apply(
params,
apply_rng,
observation,
state.recurrent_state,
)
if evaluation:
action = jnp.argmax(logits, axis=-1)
else:
action = jax.random.categorical(policy_rng, logits)
return action, ImpalaActorState(
rng=rng,
logits=logits,
recurrent_state=new_recurrent_state,
prev_recurrent_state=state.recurrent_state)
def get_extras(
state: ImpalaActorState[actor_core_lib.RecurrentState]) -> ImpalaExtras:
return {'logits': state.logits, 'core_state': state.prev_recurrent_state}
return actor_core_lib.ActorCore(
init=init, select_action=select_action, get_extras=get_extras)
|
acme-master
|
acme/agents/jax/impala/acting.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RND config."""
import dataclasses
@dataclasses.dataclass
class RNDConfig:
"""Configuration options for RND."""
# Learning rate for the predictor.
predictor_learning_rate: float = 1e-4
# If True, the direct rl algorithm is using the SequenceAdder data format.
is_sequence_based: bool = False
# How many gradient updates to perform per step.
num_sgd_steps_per_step: int = 1
|
acme-master
|
acme/agents/jax/rnd/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RND agent."""
from acme.agents.jax.rnd.builder import RNDBuilder
from acme.agents.jax.rnd.config import RNDConfig
from acme.agents.jax.rnd.learning import rnd_loss
from acme.agents.jax.rnd.learning import rnd_update_step
from acme.agents.jax.rnd.learning import RNDLearner
from acme.agents.jax.rnd.learning import RNDTrainingState
from acme.agents.jax.rnd.networks import compute_rnd_reward
from acme.agents.jax.rnd.networks import make_networks
from acme.agents.jax.rnd.networks import rnd_reward_fn
from acme.agents.jax.rnd.networks import RNDNetworks
|
acme-master
|
acme/agents/jax/rnd/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RND Builder."""
from typing import Callable, Generic, Iterator, List, Optional
from acme import adders
from acme import core
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import builders
from acme.agents.jax.rnd import config as rnd_config
from acme.agents.jax.rnd import learning as rnd_learning
from acme.agents.jax.rnd import networks as rnd_networks
from acme.jax import networks as networks_lib
from acme.jax.types import Policy
from acme.utils import counting
from acme.utils import loggers
import jax
import optax
import reverb
class RNDBuilder(Generic[rnd_networks.DirectRLNetworks, Policy],
builders.ActorLearnerBuilder[rnd_networks.RNDNetworks, Policy,
reverb.ReplaySample]):
"""RND Builder."""
def __init__(
self,
rl_agent: builders.ActorLearnerBuilder[rnd_networks.DirectRLNetworks,
Policy, reverb.ReplaySample],
config: rnd_config.RNDConfig,
logger_fn: Callable[[], loggers.Logger] = lambda: None,
):
"""Implements a builder for RND using rl_agent as forward RL algorithm.
Args:
rl_agent: The standard RL agent used by RND to optimize the generator.
config: A config with RND HPs.
logger_fn: a logger factory for the rl_agent's learner.
"""
self._rl_agent = rl_agent
self._config = config
self._logger_fn = logger_fn
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: rnd_networks.RNDNetworks[rnd_networks.DirectRLNetworks],
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
direct_rl_learner_key, rnd_learner_key = jax.random.split(random_key)
counter = counter or counting.Counter()
direct_rl_counter = counting.Counter(counter, 'direct_rl')
def direct_rl_learner_factory(
networks: rnd_networks.DirectRLNetworks,
dataset: Iterator[reverb.ReplaySample]) -> core.Learner:
return self._rl_agent.make_learner(
direct_rl_learner_key,
networks,
dataset,
logger_fn=lambda name: self._logger_fn(),
environment_spec=environment_spec,
replay_client=replay_client,
counter=direct_rl_counter)
optimizer = optax.adam(learning_rate=self._config.predictor_learning_rate)
return rnd_learning.RNDLearner(
direct_rl_learner_factory=direct_rl_learner_factory,
iterator=dataset,
optimizer=optimizer,
rnd_network=networks,
rng_key=rnd_learner_key,
is_sequence_based=self._config.is_sequence_based,
grad_updates_per_batch=self._config.num_sgd_steps_per_step,
counter=counter,
logger=logger_fn('learner'))
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: Policy,
) -> List[reverb.Table]:
return self._rl_agent.make_replay_tables(environment_spec, policy)
def make_dataset_iterator( # pytype: disable=signature-mismatch # overriding-return-type-checks
self,
replay_client: reverb.Client) -> Optional[Iterator[reverb.ReplaySample]]:
return self._rl_agent.make_dataset_iterator(replay_client)
def make_adder(self, replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[Policy]) -> Optional[adders.Adder]:
return self._rl_agent.make_adder(replay_client, environment_spec, policy)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: Policy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
return self._rl_agent.make_actor(random_key, policy, environment_spec,
variable_source, adder)
def make_policy(self,
networks: rnd_networks.RNDNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> actor_core_lib.FeedForwardPolicy:
"""Construct the policy."""
return self._rl_agent.make_policy(networks.direct_rl_networks,
environment_spec, evaluation)
|
acme-master
|
acme/agents/jax/rnd/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks definitions for the BC agent."""
import dataclasses
import functools
from typing import Callable, Generic, Tuple, TypeVar
from acme import specs
from acme import types
from acme.jax import networks as networks_lib
from acme.jax import utils
import haiku as hk
import jax.numpy as jnp
DirectRLNetworks = TypeVar('DirectRLNetworks')
@dataclasses.dataclass
class RNDNetworks(Generic[DirectRLNetworks]):
"""Container of RND networks factories."""
target: networks_lib.FeedForwardNetwork
predictor: networks_lib.FeedForwardNetwork
# Function from predictor output, target output, and original reward to reward
get_reward: Callable[
[networks_lib.NetworkOutput, networks_lib.NetworkOutput, jnp.ndarray],
jnp.ndarray]
direct_rl_networks: DirectRLNetworks = None
# See Appendix A.2 of https://arxiv.org/pdf/1810.12894.pdf
def rnd_reward_fn(
predictor_output: networks_lib.NetworkOutput,
target_output: networks_lib.NetworkOutput,
original_reward: jnp.ndarray,
intrinsic_reward_coefficient: float = 1.0,
extrinsic_reward_coefficient: float = 0.0,
) -> jnp.ndarray:
intrinsic_reward = jnp.mean(
jnp.square(predictor_output - target_output), axis=-1)
return (intrinsic_reward_coefficient * intrinsic_reward +
extrinsic_reward_coefficient * original_reward)
def make_networks(
spec: specs.EnvironmentSpec,
direct_rl_networks: DirectRLNetworks,
layer_sizes: Tuple[int, ...] = (256, 256),
intrinsic_reward_coefficient: float = 1.0,
extrinsic_reward_coefficient: float = 0.0,
) -> RNDNetworks[DirectRLNetworks]:
"""Creates networks used by the agent and returns RNDNetworks.
Args:
spec: Environment spec.
direct_rl_networks: Networks used by a direct rl algorithm.
layer_sizes: Layer sizes.
intrinsic_reward_coefficient: Multiplier on intrinsic reward.
extrinsic_reward_coefficient: Multiplier on extrinsic reward.
Returns:
The RND networks.
"""
def _rnd_fn(obs, act):
# RND does not use the action but other variants like RED do.
del act
network = networks_lib.LayerNormMLP(list(layer_sizes))
return network(obs)
target = hk.without_apply_rng(hk.transform(_rnd_fn))
predictor = hk.without_apply_rng(hk.transform(_rnd_fn))
# Create dummy observations and actions to create network parameters.
dummy_obs = utils.zeros_like(spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
return RNDNetworks(
target=networks_lib.FeedForwardNetwork(
lambda key: target.init(key, dummy_obs, ()), target.apply),
predictor=networks_lib.FeedForwardNetwork(
lambda key: predictor.init(key, dummy_obs, ()), predictor.apply),
direct_rl_networks=direct_rl_networks,
get_reward=functools.partial(
rnd_reward_fn,
intrinsic_reward_coefficient=intrinsic_reward_coefficient,
extrinsic_reward_coefficient=extrinsic_reward_coefficient))
def compute_rnd_reward(predictor_params: networks_lib.Params,
target_params: networks_lib.Params,
transitions: types.Transition,
networks: RNDNetworks) -> jnp.ndarray:
"""Computes the intrinsic RND reward for a given transition.
Args:
predictor_params: Parameters of the predictor network.
target_params: Parameters of the target network.
transitions: The sample to compute rewards for.
networks: RND networks
Returns:
The rewards as an ndarray.
"""
target_output = networks.target.apply(target_params, transitions.observation,
transitions.action)
predictor_output = networks.predictor.apply(predictor_params,
transitions.observation,
transitions.action)
return networks.get_reward(predictor_output, target_output,
transitions.reward)
|
acme-master
|
acme/agents/jax/rnd/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RND learner implementation."""
import functools
import time
from typing import Any, Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple
import acme
from acme import types
from acme.agents.jax.rnd import networks as rnd_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
from acme.utils import reverb_utils
import jax
import jax.numpy as jnp
import optax
import reverb
class RNDTrainingState(NamedTuple):
"""Contains training state for the learner."""
optimizer_state: optax.OptState
params: networks_lib.Params
target_params: networks_lib.Params
steps: int
class GlobalTrainingState(NamedTuple):
"""Contains training state of the RND learner."""
rewarder_state: RNDTrainingState
learner_state: Any
RNDLoss = Callable[[networks_lib.Params, networks_lib.Params, types.Transition],
float]
def rnd_update_step(
state: RNDTrainingState, transitions: types.Transition,
loss_fn: RNDLoss, optimizer: optax.GradientTransformation
) -> Tuple[RNDTrainingState, Dict[str, jnp.ndarray]]:
"""Run an update steps on the given transitions.
Args:
state: The learner state.
transitions: Transitions to update on.
loss_fn: The loss function.
optimizer: The optimizer of the predictor network.
Returns:
A new state and metrics.
"""
loss, grads = jax.value_and_grad(loss_fn)(
state.params,
state.target_params,
transitions=transitions)
update, optimizer_state = optimizer.update(grads, state.optimizer_state)
params = optax.apply_updates(state.params, update)
new_state = RNDTrainingState(
optimizer_state=optimizer_state,
params=params,
target_params=state.target_params,
steps=state.steps + 1,
)
return new_state, {'rnd_loss': loss}
def rnd_loss(
predictor_params: networks_lib.Params,
target_params: networks_lib.Params,
transitions: types.Transition,
networks: rnd_networks.RNDNetworks,
) -> float:
"""The Random Network Distillation loss.
See https://arxiv.org/pdf/1810.12894.pdf A.2
Args:
predictor_params: Parameters of the predictor
target_params: Parameters of the target
transitions: Transitions to compute the loss on.
networks: RND networks
Returns:
The MSE loss as a float.
"""
target_output = networks.target.apply(target_params,
transitions.observation,
transitions.action)
predictor_output = networks.predictor.apply(predictor_params,
transitions.observation,
transitions.action)
return jnp.mean(jnp.square(target_output - predictor_output))
class RNDLearner(acme.Learner):
"""RND learner."""
def __init__(
self,
direct_rl_learner_factory: Callable[[Any, Iterator[reverb.ReplaySample]],
acme.Learner],
iterator: Iterator[reverb.ReplaySample],
optimizer: optax.GradientTransformation,
rnd_network: rnd_networks.RNDNetworks,
rng_key: jnp.ndarray,
grad_updates_per_batch: int,
is_sequence_based: bool,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None):
self._is_sequence_based = is_sequence_based
target_key, predictor_key = jax.random.split(rng_key)
target_params = rnd_network.target.init(target_key)
predictor_params = rnd_network.predictor.init(predictor_key)
optimizer_state = optimizer.init(predictor_params)
self._state = RNDTrainingState(
optimizer_state=optimizer_state,
params=predictor_params,
target_params=target_params,
steps=0)
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
steps_key=self._counter.get_steps_key())
loss = functools.partial(rnd_loss, networks=rnd_network)
self._update = functools.partial(rnd_update_step,
loss_fn=loss,
optimizer=optimizer)
self._update = utils.process_multiple_batches(self._update,
grad_updates_per_batch)
self._update = jax.jit(self._update)
self._get_reward = jax.jit(
functools.partial(
rnd_networks.compute_rnd_reward, networks=rnd_network))
# Generator expression that works the same as an iterator.
# https://pymbook.readthedocs.io/en/latest/igd.html#generator-expressions
updated_iterator = (self._process_sample(sample) for sample in iterator)
self._direct_rl_learner = direct_rl_learner_factory(
rnd_network.direct_rl_networks, updated_iterator)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
def _process_sample(self, sample: reverb.ReplaySample) -> reverb.ReplaySample:
"""Uses the replay sample to train and update its reward.
Args:
sample: Replay sample to train on.
Returns:
The sample replay sample with an updated reward.
"""
transitions = reverb_utils.replay_sample_to_sars_transition(
sample, is_sequence=self._is_sequence_based)
self._state, metrics = self._update(self._state, transitions)
rewards = self._get_reward(self._state.params, self._state.target_params,
transitions)
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(steps=1, walltime=elapsed_time)
# Attempts to write the logs.
self._logger.write({**metrics, **counts})
return sample._replace(data=sample.data._replace(reward=rewards))
def step(self):
self._direct_rl_learner.step()
def get_variables(self, names: List[str]) -> List[Any]:
rnd_variables = {
'target_params': self._state.target_params,
'predictor_params': self._state.params
}
learner_names = [name for name in names if name not in rnd_variables]
learner_dict = {}
if learner_names:
learner_dict = dict(
zip(learner_names,
self._direct_rl_learner.get_variables(learner_names)))
variables = [
rnd_variables.get(name, learner_dict.get(name, None)) for name in names
]
return variables
def save(self) -> GlobalTrainingState:
return GlobalTrainingState(
rewarder_state=self._state,
learner_state=self._direct_rl_learner.save())
def restore(self, state: GlobalTrainingState):
self._state = state.rewarder_state
self._direct_rl_learner.restore(state.learner_state)
|
acme-master
|
acme/agents/jax/rnd/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN config."""
import dataclasses
from typing import Callable, Optional, Sequence, Union
from acme.adders import reverb as adders_reverb
import jax.numpy as jnp
import numpy as np
@dataclasses.dataclass
class DQNConfig:
"""Configuration options for DQN agent.
Attributes:
epsilon: for use by epsilon-greedy policies. If multiple, the epsilons are
alternated randomly per-episode.
eval_epsilon: for use by evaluation epsilon-greedy policies.
seed: Random seed.
learning_rate: Learning rate for Adam optimizer. Could be a number or a
function defining a schedule.
adam_eps: Epsilon for Adam optimizer.
discount: Discount rate applied to value per timestep.
n_step: N-step TD learning.
target_update_period: Update target network every period.
max_gradient_norm: For gradient clipping.
batch_size: Number of transitions per batch.
min_replay_size: Minimum replay size.
max_replay_size: Maximum replay size.
replay_table_name: Reverb table, defaults to DEFAULT_PRIORITY_TABLE.
importance_sampling_exponent: Importance sampling for replay.
priority_exponent: Priority exponent for replay.
prefetch_size: Prefetch size for reverb replay performance.
samples_per_insert: Ratio of learning samples to insert.
samples_per_insert_tolerance_rate: Rate to be used for
the SampleToInsertRatio rate limitter tolerance.
See a formula in make_replay_tables for more details.
num_sgd_steps_per_step: How many gradient updates to perform per learner
step.
"""
epsilon: Union[float, Sequence[float]] = 0.05
eval_epsilon: Optional[float] = None
# TODO(b/191706065): update all clients and remove this field.
seed: int = 1
# Learning rule
learning_rate: Union[float, Callable[[int], float]] = 1e-3
adam_eps: float = 1e-8 # Eps for Adam optimizer.
discount: float = 0.99 # Discount rate applied to value per timestep.
n_step: int = 5 # N-step TD learning.
target_update_period: int = 100 # Update target network every period.
max_gradient_norm: float = np.inf # For gradient clipping.
# Replay options
batch_size: int = 256
min_replay_size: int = 1_000
max_replay_size: int = 1_000_000
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
importance_sampling_exponent: float = 0.2
priority_exponent: float = 0.6
prefetch_size: int = 4
samples_per_insert: float = 0.5
samples_per_insert_tolerance_rate: float = 0.1
num_sgd_steps_per_step: int = 1
def logspace_epsilons(
num_epsilons: int, epsilon: float = 0.017
) -> Union[Sequence[float], jnp.ndarray]:
"""`num_epsilons` of logspace-distributed values, with median `epsilon`."""
if num_epsilons <= 1:
return (epsilon,)
return jnp.logspace(1, 8, num_epsilons, base=epsilon ** (2./9.))
|
acme-master
|
acme/agents/jax/dqn/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SgdLearner takes steps of SGD on a LossFn."""
import functools
import time
from typing import Dict, Iterator, List, NamedTuple, Optional, Tuple
import acme
from acme.adders import reverb as adders
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import async_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
import reverb
import tree
import typing_extensions
# The pmap axis name. Data means data parallelization.
PMAP_AXIS_NAME = 'data'
class ReverbUpdate(NamedTuple):
"""Tuple for updating reverb priority information."""
keys: jnp.ndarray
priorities: jnp.ndarray
class LossExtra(NamedTuple):
"""Extra information that is returned along with loss value."""
metrics: Dict[str, jax.Array]
# New optional updated priorities for the samples.
reverb_priorities: Optional[jax.Array] = None
class LossFn(typing_extensions.Protocol):
"""A LossFn calculates a loss on a single batch of data."""
def __call__(
self,
network: networks_lib.TypedFeedForwardNetwork,
params: networks_lib.Params,
target_params: networks_lib.Params,
batch: reverb.ReplaySample,
key: networks_lib.PRNGKey,
) -> Tuple[jax.Array, LossExtra]:
"""Calculates a loss on a single batch of data."""
class TrainingState(NamedTuple):
"""Holds the agent's training state."""
params: networks_lib.Params
target_params: networks_lib.Params
opt_state: optax.OptState
steps: int
rng_key: networks_lib.PRNGKey
class SGDLearner(acme.Learner):
"""An Acme learner based around SGD on batches.
This learner currently supports optional prioritized replay and assumes a
TrainingState as described above.
"""
def __init__(self,
network: networks_lib.TypedFeedForwardNetwork,
loss_fn: LossFn,
optimizer: optax.GradientTransformation,
data_iterator: Iterator[utils.PrefetchingSplit],
target_update_period: int,
random_key: networks_lib.PRNGKey,
replay_client: Optional[reverb.Client] = None,
replay_table_name: str = adders.DEFAULT_PRIORITY_TABLE,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
num_sgd_steps_per_step: int = 1):
"""Initialize the SGD learner."""
self.network = network
# Internalize the loss_fn with network.
self._loss = jax.jit(functools.partial(loss_fn, self.network))
# SGD performs the loss, optimizer update and periodic target net update.
def sgd_step(state: TrainingState,
batch: reverb.ReplaySample) -> Tuple[TrainingState, LossExtra]:
next_rng_key, rng_key = jax.random.split(state.rng_key)
# Implements one SGD step of the loss and updates training state
(loss, extra), grads = jax.value_and_grad(
self._loss, has_aux=True)(state.params, state.target_params, batch,
rng_key)
loss = jax.lax.pmean(loss, axis_name=PMAP_AXIS_NAME)
# Average gradients over pmap replicas before optimizer update.
grads = jax.lax.pmean(grads, axis_name=PMAP_AXIS_NAME)
# Apply the optimizer updates
updates, new_opt_state = optimizer.update(grads, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
extra.metrics.update({'total_loss': loss})
# Periodically update target networks.
steps = state.steps + 1
target_params = optax.periodic_update(new_params, state.target_params, # pytype: disable=wrong-arg-types # numpy-scalars
steps, target_update_period)
new_training_state = TrainingState(
new_params, target_params, new_opt_state, steps, next_rng_key)
return new_training_state, extra
def postprocess_aux(extra: LossExtra) -> LossExtra:
reverb_priorities = jax.tree_util.tree_map(
lambda a: jnp.reshape(a, (-1, *a.shape[2:])), extra.reverb_priorities)
return extra._replace(
metrics=jax.tree_util.tree_map(jnp.mean, extra.metrics),
reverb_priorities=reverb_priorities)
self._num_sgd_steps_per_step = num_sgd_steps_per_step
sgd_step = utils.process_multiple_batches(sgd_step, num_sgd_steps_per_step,
postprocess_aux)
self._sgd_step = jax.pmap(
sgd_step, axis_name=PMAP_AXIS_NAME, devices=jax.devices())
# Internalise agent components
self._data_iterator = data_iterator
self._target_update_period = target_update_period
self._counter = counter or counting.Counter()
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
# Initialize the network parameters
key_params, key_target, key_state = jax.random.split(random_key, 3)
initial_params = self.network.init(key_params)
initial_target_params = self.network.init(key_target)
state = TrainingState(
params=initial_params,
target_params=initial_target_params,
opt_state=optimizer.init(initial_params),
steps=0,
rng_key=key_state,
)
self._state = utils.replicate_in_all_devices(state, jax.local_devices())
# Update replay priorities
def update_priorities(reverb_update: ReverbUpdate) -> None:
if replay_client is None:
return
keys, priorities = tree.map_structure(
# Fetch array and combine device and batch dimensions.
lambda x: utils.fetch_devicearray(x).reshape((-1,) + x.shape[2:]),
(reverb_update.keys, reverb_update.priorities))
replay_client.mutate_priorities(
table=replay_table_name,
updates=dict(zip(keys, priorities)))
self._replay_client = replay_client
self._async_priority_updater = async_utils.AsyncExecutor(update_priorities)
self._current_step = 0
def step(self):
"""Takes one SGD step on the learner."""
with jax.profiler.StepTraceAnnotation('step', step_num=self._current_step):
prefetching_split = next(self._data_iterator)
# In this case the host property of the prefetching split contains only
# replay keys and the device property is the prefetched full original
# sample. Key is on host since it's uint64 type.
reverb_keys = prefetching_split.host
batch: reverb.ReplaySample = prefetching_split.device
self._state, extra = self._sgd_step(self._state, batch)
# Compute elapsed time.
timestamp = time.time()
elapsed = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
if self._replay_client and extra.reverb_priorities is not None:
reverb_update = ReverbUpdate(reverb_keys, extra.reverb_priorities)
self._async_priority_updater.put(reverb_update)
steps_per_sec = (self._num_sgd_steps_per_step / elapsed) if elapsed else 0
self._current_step, metrics = utils.get_from_first_device(
(self._state.steps, extra.metrics))
metrics['steps_per_second'] = steps_per_sec
# Update our counts and record it.
result = self._counter.increment(
steps=self._num_sgd_steps_per_step, walltime=elapsed)
result.update(metrics)
self._logger.write(result)
def get_variables(self, names: List[str]) -> List[networks_lib.Params]:
# Return first replica of parameters.
return utils.get_from_first_device([self._state.params])
def save(self) -> TrainingState:
# Serialize only the first replica of parameters and optimizer state.
return utils.get_from_first_device(self._state)
def restore(self, state: TrainingState):
self._state = utils.replicate_in_all_devices(state, jax.local_devices())
|
acme-master
|
acme/agents/jax/dqn/learning_lib.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a deep Q-networks (DQN) agent."""
from acme.agents.jax.dqn.actor import behavior_policy
from acme.agents.jax.dqn.actor import default_behavior_policy
from acme.agents.jax.dqn.actor import DQNPolicy
from acme.agents.jax.dqn.actor import Epsilon
from acme.agents.jax.dqn.actor import EpsilonPolicy
from acme.agents.jax.dqn.builder import DistributionalDQNBuilder
from acme.agents.jax.dqn.builder import DQNBuilder
from acme.agents.jax.dqn.config import DQNConfig
from acme.agents.jax.dqn.learning import DQNLearner
from acme.agents.jax.dqn.learning_lib import LossExtra
from acme.agents.jax.dqn.learning_lib import LossFn
from acme.agents.jax.dqn.learning_lib import ReverbUpdate
from acme.agents.jax.dqn.learning_lib import SGDLearner
from acme.agents.jax.dqn.losses import PrioritizedCategoricalDoubleQLearning
from acme.agents.jax.dqn.losses import PrioritizedDoubleQLearning
from acme.agents.jax.dqn.losses import QLearning
from acme.agents.jax.dqn.losses import QrDqn
from acme.agents.jax.dqn.networks import DQNNetworks
|
acme-master
|
acme/agents/jax/dqn/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN Builder."""
from typing import Iterator, List, Optional, Sequence
from acme import adders
from acme import core
from acme import specs
from acme.adders import reverb as adders_reverb
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.dqn import actor as dqn_actor
from acme.agents.jax.dqn import config as dqn_config
from acme.agents.jax.dqn import learning_lib
from acme.agents.jax.dqn import networks as dqn_networks
from acme.datasets import reverb as datasets
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import optax
import reverb
from reverb import rate_limiters
class DQNBuilder(builders.ActorLearnerBuilder[dqn_networks.DQNNetworks,
dqn_actor.DQNPolicy,
utils.PrefetchingSplit]):
"""DQN Builder."""
def __init__(self,
config: dqn_config.DQNConfig,
loss_fn: learning_lib.LossFn,
actor_backend: Optional[str] = 'cpu'):
"""Creates DQN learner and the behavior policies.
Args:
config: DQN config.
loss_fn: A loss function.
actor_backend: Which backend to use when jitting the policy.
"""
self._config = config
self._loss_fn = loss_fn
self._actor_backend = actor_backend
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: dqn_networks.DQNNetworks,
dataset: Iterator[utils.PrefetchingSplit],
logger_fn: loggers.LoggerFactory,
environment_spec: Optional[specs.EnvironmentSpec],
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec
return learning_lib.SGDLearner(
network=networks.policy_network,
random_key=random_key,
optimizer=optax.adam(
self._config.learning_rate, eps=self._config.adam_eps),
target_update_period=self._config.target_update_period,
data_iterator=dataset,
loss_fn=self._loss_fn,
replay_client=replay_client,
replay_table_name=self._config.replay_table_name,
counter=counter,
num_sgd_steps_per_step=self._config.num_sgd_steps_per_step,
logger=logger_fn('learner'))
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: dqn_actor.DQNPolicy,
environment_spec: Optional[specs.EnvironmentSpec],
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
del environment_spec
assert variable_source is not None
# Inference happens on CPU, so it's better to move variables there too.
variable_client = variable_utils.VariableClient(
variable_source, '', device='cpu')
return actors.GenericActor(
actor=policy,
random_key=random_key,
variable_client=variable_client,
adder=adder,
backend=self._actor_backend)
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: dqn_actor.DQNPolicy,
) -> List[reverb.Table]:
"""Creates reverb tables for the algorithm."""
del policy
samples_per_insert_tolerance = (
self._config.samples_per_insert_tolerance_rate *
self._config.samples_per_insert)
error_buffer = self._config.min_replay_size * samples_per_insert_tolerance
limiter = rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._config.min_replay_size,
samples_per_insert=self._config.samples_per_insert,
error_buffer=error_buffer)
return [
reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Prioritized(
self._config.priority_exponent),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=limiter,
signature=adders_reverb.NStepTransitionAdder.signature(
environment_spec))
]
@property
def batch_size_per_device(self) -> int:
"""Splits the batch size across local devices."""
# Account for the number of SGD steps per step.
batch_size = self._config.batch_size * self._config.num_sgd_steps_per_step
num_devices = jax.local_device_count()
# TODO(bshahr): Using jax.device_count will not be valid when colocating
# learning and inference.
if batch_size % num_devices != 0:
raise ValueError(
'The DQN learner received a batch size that is not divisible by the '
f'number of available learner devices. Got: batch_size={batch_size}, '
f'num_devices={num_devices}.')
return batch_size // num_devices
def make_dataset_iterator(
self,
replay_client: reverb.Client,
) -> Iterator[utils.PrefetchingSplit]:
"""Creates a dataset iterator to use for learning."""
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=replay_client.server_address,
batch_size=self.batch_size_per_device,
prefetch_size=self._config.prefetch_size)
return utils.multi_device_put(
dataset.as_numpy_iterator(),
jax.local_devices(),
split_fn=utils.keep_key_on_host)
def make_adder(
self,
replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[dqn_actor.DQNPolicy],
) -> Optional[adders.Adder]:
"""Creates an adder which handles observations."""
del environment_spec, policy
return adders_reverb.NStepTransitionAdder(
priority_fns={self._config.replay_table_name: None},
client=replay_client,
n_step=self._config.n_step,
discount=self._config.discount)
def _policy_epsilons(self, evaluation: bool) -> Sequence[float]:
if evaluation and self._config.eval_epsilon:
epsilon = self._config.eval_epsilon
else:
epsilon = self._config.epsilon
epsilons = epsilon if isinstance(epsilon, Sequence) else (epsilon,)
return epsilons
def make_policy(self,
networks: dqn_networks.DQNNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> dqn_actor.DQNPolicy:
"""Creates the policy."""
del environment_spec
return dqn_actor.alternating_epsilons_actor_core(
dqn_actor.behavior_policy(networks),
epsilons=self._policy_epsilons(evaluation))
class DistributionalDQNBuilder(DQNBuilder):
"""Distributional DQN Builder."""
def make_policy(self,
networks: dqn_networks.DQNNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> dqn_actor.DQNPolicy:
"""Creates the policy.
Expects network head which returns a tuple with the first entry
representing q-values.
Creates the agent policy given the collection of network components and
environment spec. An optional boolean can be given to indicate if the
policy will be used for evaluation.
Args:
networks: struct describing the networks needed to generate the policy.
environment_spec: struct describing the specs of the environment.
evaluation: when true, a version of the policy to use for evaluation
should be returned. This is algorithm-specific so if an algorithm makes
no distinction between behavior and evaluation policies this boolean may
be ignored.
Returns:
Behavior policy or evaluation policy for the agent.
"""
del environment_spec
def get_action_values(params: networks_lib.Params,
observation: networks_lib.Observation, *args,
**kwargs) -> networks_lib.NetworkOutput:
return networks.policy_network.apply(params, observation, *args,
**kwargs)[0]
typed_network = networks_lib.TypedFeedForwardNetwork(
init=networks.policy_network.init, apply=get_action_values)
behavior_policy = dqn_actor.behavior_policy(
dqn_networks.DQNNetworks(policy_network=typed_network))
return dqn_actor.alternating_epsilons_actor_core(
behavior_policy, epsilons=self._policy_epsilons(evaluation))
|
acme-master
|
acme/agents/jax/dqn/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network definitions for DQN."""
import dataclasses
from typing import Callable, Optional
from acme.jax import networks as networks_lib
from acme.jax import types
import rlax
Epsilon = float
EpsilonPolicy = Callable[[
networks_lib.Params, networks_lib.PRNGKey, networks_lib.Observation, Epsilon
], networks_lib.Action]
EpsilonSampleFn = Callable[[networks_lib.NetworkOutput, types.PRNGKey, Epsilon],
networks_lib.Action]
EpsilonLogProbFn = Callable[
[networks_lib.NetworkOutput, networks_lib.Action, Epsilon],
networks_lib.LogProb]
def default_sample_fn(action_values: networks_lib.NetworkOutput,
key: types.PRNGKey,
epsilon: Epsilon) -> networks_lib.Action:
return rlax.epsilon_greedy(epsilon).sample(key, action_values)
@dataclasses.dataclass
class DQNNetworks:
"""The network and pure functions for the DQN agent.
Attributes:
policy_network: The policy network.
sample_fn: A pure function. Samples an action based on the network output.
log_prob: A pure function. Computes log-probability for an action.
"""
policy_network: networks_lib.TypedFeedForwardNetwork
sample_fn: EpsilonSampleFn = default_sample_fn
log_prob: Optional[EpsilonLogProbFn] = None
|
acme-master
|
acme/agents/jax/dqn/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN losses."""
import dataclasses
from typing import Tuple
from acme import types
from acme.agents.jax.dqn import learning_lib
from acme.jax import networks as networks_lib
import chex
import jax
import jax.numpy as jnp
import reverb
import rlax
@dataclasses.dataclass
class PrioritizedDoubleQLearning(learning_lib.LossFn):
"""Clipped double q learning with prioritization on TD error."""
discount: float = 0.99
importance_sampling_exponent: float = 0.2
max_abs_reward: float = 1.
huber_loss_parameter: float = 1.
def __call__(
self,
network: networks_lib.TypedFeedForwardNetwork,
params: networks_lib.Params,
target_params: networks_lib.Params,
batch: reverb.ReplaySample,
key: networks_lib.PRNGKey,
) -> Tuple[jax.Array, learning_lib.LossExtra]:
"""Calculate a loss on a single batch of data."""
transitions: types.Transition = batch.data
probs = batch.info.probability
# Forward pass.
key1, key2, key3 = jax.random.split(key, 3)
q_tm1 = network.apply(
params, transitions.observation, is_training=True, key=key1)
q_t_value = network.apply(
target_params, transitions.next_observation, is_training=True, key=key2)
q_t_selector = network.apply(
params, transitions.next_observation, is_training=True, key=key3)
# Cast and clip rewards.
d_t = (transitions.discount * self.discount).astype(jnp.float32)
r_t = jnp.clip(transitions.reward, -self.max_abs_reward,
self.max_abs_reward).astype(jnp.float32)
# Compute double Q-learning n-step TD-error.
batch_error = jax.vmap(rlax.double_q_learning)
td_error = batch_error(q_tm1, transitions.action, r_t, d_t, q_t_value,
q_t_selector)
batch_loss = rlax.huber_loss(td_error, self.huber_loss_parameter)
# Importance weighting.
importance_weights = (1. / probs).astype(jnp.float32)
importance_weights **= self.importance_sampling_exponent
importance_weights /= jnp.max(importance_weights)
# Reweight.
loss = jnp.mean(importance_weights * batch_loss) # []
extra = learning_lib.LossExtra(
metrics={}, reverb_priorities=jnp.abs(td_error).astype(jnp.float64))
return loss, extra
@dataclasses.dataclass
class QrDqn(learning_lib.LossFn):
"""Quantile Regression DQN.
https://arxiv.org/abs/1710.10044
"""
num_atoms: int = 51
huber_param: float = 1.0
def __call__(
self,
network: networks_lib.TypedFeedForwardNetwork,
params: networks_lib.Params,
target_params: networks_lib.Params,
batch: reverb.ReplaySample,
key: networks_lib.PRNGKey,
) -> Tuple[jax.Array, learning_lib.LossExtra]:
"""Calculate a loss on a single batch of data."""
transitions: types.Transition = batch.data
key1, key2 = jax.random.split(key)
_, dist_q_tm1 = network.apply(
params, transitions.observation, is_training=True, key=key1)
_, dist_q_target_t = network.apply(
target_params, transitions.next_observation, is_training=True, key=key2)
batch_size = len(transitions.observation)
chex.assert_shape(
dist_q_tm1, (
batch_size,
None,
self.num_atoms,
),
custom_message=f'Expected (batch_size, num_actions, num_atoms), got: {dist_q_tm1.shape}',
include_default_message=True)
chex.assert_shape(
dist_q_target_t, (
batch_size,
None,
self.num_atoms,
),
custom_message=f'Expected (batch_size, num_actions, num_atoms), got: {dist_q_target_t.shape}',
include_default_message=True)
# Swap distribution and action dimension, since
# rlax.quantile_q_learning expects it that way.
dist_q_tm1 = jnp.swapaxes(dist_q_tm1, 1, 2)
dist_q_target_t = jnp.swapaxes(dist_q_target_t, 1, 2)
quantiles = (
(jnp.arange(self.num_atoms, dtype=jnp.float32) + 0.5) / self.num_atoms)
batch_quantile_q_learning = jax.vmap(
rlax.quantile_q_learning, in_axes=(0, None, 0, 0, 0, 0, 0, None))
losses = batch_quantile_q_learning(
dist_q_tm1,
quantiles,
transitions.action,
transitions.reward,
transitions.discount,
dist_q_target_t, # No double Q-learning here.
dist_q_target_t,
self.huber_param,
)
loss = jnp.mean(losses)
chex.assert_shape(losses, (batch_size,))
extra = learning_lib.LossExtra(metrics={'mean_loss': loss})
return loss, extra
@dataclasses.dataclass
class PrioritizedCategoricalDoubleQLearning(learning_lib.LossFn):
"""Categorical double q learning with prioritization on TD error."""
discount: float = 0.99
importance_sampling_exponent: float = 0.2
max_abs_reward: float = 1.
def __call__(
self,
network: networks_lib.TypedFeedForwardNetwork,
params: networks_lib.Params,
target_params: networks_lib.Params,
batch: reverb.ReplaySample,
key: networks_lib.PRNGKey,
) -> Tuple[jax.Array, learning_lib.LossExtra]:
"""Calculate a loss on a single batch of data."""
transitions: types.Transition = batch.data
probs = batch.info.probability
# Forward pass.
key1, key2, key3 = jax.random.split(key, 3)
_, logits_tm1, atoms_tm1 = network.apply(
params, transitions.observation, is_training=True, key=key1)
_, logits_t, atoms_t = network.apply(
target_params, transitions.next_observation, is_training=True, key=key2)
q_t_selector, _, _ = network.apply(
params, transitions.next_observation, is_training=True, key=key3)
# Cast and clip rewards.
d_t = (transitions.discount * self.discount).astype(jnp.float32)
r_t = jnp.clip(transitions.reward, -self.max_abs_reward,
self.max_abs_reward).astype(jnp.float32)
# Compute categorical double Q-learning loss.
batch_loss_fn = jax.vmap(
rlax.categorical_double_q_learning,
in_axes=(None, 0, 0, 0, 0, None, 0, 0))
batch_loss = batch_loss_fn(atoms_tm1, logits_tm1, transitions.action, r_t,
d_t, atoms_t, logits_t, q_t_selector)
# Importance weighting.
importance_weights = (1. / probs).astype(jnp.float32)
importance_weights **= self.importance_sampling_exponent
importance_weights /= jnp.max(importance_weights)
# Reweight.
loss = jnp.mean(importance_weights * batch_loss) # []
extra = learning_lib.LossExtra(
metrics={}, reverb_priorities=jnp.abs(batch_loss).astype(jnp.float64))
return loss, extra
@dataclasses.dataclass
class QLearning(learning_lib.LossFn):
"""Deep q learning.
This matches the original DQN loss: https://arxiv.org/abs/1312.5602.
It differs by two aspects that improve it on the optimization side
- it uses Adam instead of RMSProp as an optimizer
- it uses a square loss instead of the Huber one.
"""
discount: float = 0.99
max_abs_reward: float = 1.
def __call__(
self,
network: networks_lib.TypedFeedForwardNetwork,
params: networks_lib.Params,
target_params: networks_lib.Params,
batch: reverb.ReplaySample,
key: networks_lib.PRNGKey,
) -> Tuple[jax.Array, learning_lib.LossExtra]:
"""Calculate a loss on a single batch of data."""
transitions: types.Transition = batch.data
# Forward pass.
key1, key2 = jax.random.split(key)
q_tm1 = network.apply(
params, transitions.observation, is_training=True, key=key1)
q_t = network.apply(
target_params, transitions.next_observation, is_training=True, key=key2)
# Cast and clip rewards.
d_t = (transitions.discount * self.discount).astype(jnp.float32)
r_t = jnp.clip(transitions.reward, -self.max_abs_reward,
self.max_abs_reward).astype(jnp.float32)
# Compute Q-learning TD-error.
batch_error = jax.vmap(rlax.q_learning)
td_error = batch_error(q_tm1, transitions.action, r_t, d_t, q_t)
batch_loss = jnp.square(td_error)
loss = jnp.mean(batch_loss)
extra = learning_lib.LossExtra(metrics={})
return loss, extra
@dataclasses.dataclass
class RegularizedQLearning(learning_lib.LossFn):
"""Regularized Q-learning.
Implements DQNReg loss function: https://arxiv.org/abs/2101.03958.
This is almost identical to QLearning except: 1) Adds a regularization term;
2) Uses vanilla TD error without huber loss. 3) No reward clipping.
"""
discount: float = 0.99
regularizer_coeff = 0.1
def __call__(
self,
network: networks_lib.TypedFeedForwardNetwork,
params: networks_lib.Params,
target_params: networks_lib.Params,
batch: reverb.ReplaySample,
key: networks_lib.PRNGKey,
) -> Tuple[jax.Array, learning_lib.LossExtra]:
"""Calculate a loss on a single batch of data."""
transitions: types.Transition = batch.data
# Forward pass.
key1, key2 = jax.random.split(key)
q_tm1 = network.apply(
params, transitions.observation, is_training=True, key=key1)
q_t = network.apply(
target_params, transitions.next_observation, is_training=True, key=key2)
d_t = (transitions.discount * self.discount).astype(jnp.float32)
# Compute Q-learning TD-error.
batch_error = jax.vmap(rlax.q_learning)
td_error = batch_error(
q_tm1, transitions.action, transitions.reward, d_t, q_t)
td_error = 0.5 * jnp.square(td_error)
def select(qtm1, action):
return qtm1[action]
q_regularizer = jax.vmap(select)(q_tm1, transitions.action)
loss = self.regularizer_coeff * jnp.mean(q_regularizer) + jnp.mean(td_error)
extra = learning_lib.LossExtra(metrics={})
return loss, extra
@dataclasses.dataclass
class MunchausenQLearning(learning_lib.LossFn):
"""Munchausen q learning.
Implements M-DQN: https://arxiv.org/abs/2007.14430.
"""
entropy_temperature: float = 0.03 # tau parameter
munchausen_coefficient: float = 0.9 # alpha parameter
clip_value_min: float = -1e3
discount: float = 0.99
max_abs_reward: float = 1.
huber_loss_parameter: float = 1.
def __call__(
self,
network: networks_lib.TypedFeedForwardNetwork,
params: networks_lib.Params,
target_params: networks_lib.Params,
batch: reverb.ReplaySample,
key: networks_lib.PRNGKey,
) -> Tuple[jax.Array, learning_lib.LossExtra]:
"""Calculate a loss on a single batch of data."""
transitions: types.Transition = batch.data
# Forward pass.
key1, key2, key3 = jax.random.split(key, 3)
q_online_s = network.apply(
params, transitions.observation, is_training=True, key=key1)
action_one_hot = jax.nn.one_hot(transitions.action, q_online_s.shape[-1])
q_online_sa = jnp.sum(action_one_hot * q_online_s, axis=-1)
q_target_s = network.apply(
target_params, transitions.observation, is_training=True, key=key2)
q_target_next = network.apply(
target_params, transitions.next_observation, is_training=True, key=key3)
# Cast and clip rewards.
d_t = (transitions.discount * self.discount).astype(jnp.float32)
r_t = jnp.clip(transitions.reward, -self.max_abs_reward,
self.max_abs_reward).astype(jnp.float32)
# Munchausen term : tau * log_pi(a|s)
munchausen_term = self.entropy_temperature * jax.nn.log_softmax(
q_target_s / self.entropy_temperature, axis=-1)
munchausen_term_a = jnp.sum(action_one_hot * munchausen_term, axis=-1)
munchausen_term_a = jnp.clip(munchausen_term_a,
a_min=self.clip_value_min,
a_max=0.)
# Soft Bellman operator applied to q
next_v = self.entropy_temperature * jax.nn.logsumexp(
q_target_next / self.entropy_temperature, axis=-1)
target_q = jax.lax.stop_gradient(r_t + self.munchausen_coefficient *
munchausen_term_a + d_t * next_v)
batch_loss = rlax.huber_loss(target_q - q_online_sa,
self.huber_loss_parameter)
loss = jnp.mean(batch_loss)
extra = learning_lib.LossExtra(metrics={})
return loss, extra
|
acme-master
|
acme/agents/jax/dqn/losses.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN learner implementation."""
from typing import Iterator, Optional
from acme.adders import reverb as adders
from acme.agents.jax.dqn import learning_lib
from acme.agents.jax.dqn import losses
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import optax
import reverb
class DQNLearner(learning_lib.SGDLearner):
"""DQN learner.
We are in the process of migrating towards a more general SGDLearner to allow
for easy configuration of the loss. This is maintained now for compatibility.
"""
def __init__(self,
network: networks_lib.TypedFeedForwardNetwork,
discount: float,
importance_sampling_exponent: float,
target_update_period: int,
iterator: Iterator[utils.PrefetchingSplit],
optimizer: optax.GradientTransformation,
random_key: networks_lib.PRNGKey,
max_abs_reward: float = 1.,
huber_loss_parameter: float = 1.,
replay_client: Optional[reverb.Client] = None,
replay_table_name: str = adders.DEFAULT_PRIORITY_TABLE,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
num_sgd_steps_per_step: int = 1):
"""Initializes the learner."""
loss_fn = losses.PrioritizedDoubleQLearning(
discount=discount,
importance_sampling_exponent=importance_sampling_exponent,
max_abs_reward=max_abs_reward,
huber_loss_parameter=huber_loss_parameter,
)
super().__init__(
network=network,
loss_fn=loss_fn,
optimizer=optimizer,
data_iterator=iterator,
target_update_period=target_update_period,
random_key=random_key,
replay_client=replay_client,
replay_table_name=replay_table_name,
counter=counter,
logger=logger,
num_sgd_steps_per_step=num_sgd_steps_per_step,
)
|
acme-master
|
acme/agents/jax/dqn/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN actor helpers."""
from typing import Callable, Sequence
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax.dqn import networks as dqn_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
import chex
import jax
import jax.numpy as jnp
Epsilon = float
EpsilonPolicy = Callable[[
networks_lib.Params, networks_lib.PRNGKey, networks_lib
.Observation, Epsilon
], networks_lib.Action]
@chex.dataclass(frozen=True, mappable_dataclass=False)
class EpsilonActorState:
rng: networks_lib.PRNGKey
epsilon: jnp.ndarray
DQNPolicy = actor_core_lib.ActorCore[EpsilonActorState, None]
def alternating_epsilons_actor_core(policy_network: EpsilonPolicy,
epsilons: Sequence[float]) -> DQNPolicy:
"""Returns actor components for alternating epsilon exploration.
Args:
policy_network: A feedforward action selecting function.
epsilons: epsilons to alternate per-episode for epsilon-greedy exploration.
Returns:
A feedforward policy.
"""
epsilons = jnp.array(epsilons)
def apply_and_sample(params: networks_lib.Params,
observation: networks_lib.Observation,
state: EpsilonActorState):
random_key, key = jax.random.split(state.rng)
actions = policy_network(params, key, observation, state.epsilon) # pytype: disable=wrong-arg-types # jax-ndarray
return (actions.astype(jnp.int32),
EpsilonActorState(rng=random_key, epsilon=state.epsilon))
def policy_init(random_key: networks_lib.PRNGKey):
random_key, key = jax.random.split(random_key)
epsilon = jax.random.choice(key, epsilons)
return EpsilonActorState(rng=random_key, epsilon=epsilon)
return actor_core_lib.ActorCore(
init=policy_init, select_action=apply_and_sample,
get_extras=lambda _: None)
def behavior_policy(networks: dqn_networks.DQNNetworks) -> EpsilonPolicy:
"""A policy with parameterized epsilon-greedy exploration."""
def apply_and_sample(params: networks_lib.Params, key: networks_lib.PRNGKey,
observation: networks_lib.Observation, epsilon: Epsilon
) -> networks_lib.Action:
# TODO(b/161332815): Make JAX Actor work with batched or unbatched inputs.
observation = utils.add_batch_dim(observation)
action_values = networks.policy_network.apply(
params, observation, is_training=False)
action_values = utils.squeeze_batch_dim(action_values)
return networks.sample_fn(action_values, key, epsilon)
return apply_and_sample
def default_behavior_policy(networks: dqn_networks.DQNNetworks,
epsilon: Epsilon) -> EpsilonPolicy:
"""A policy with a fixed-epsilon epsilon-greedy exploration.
DEPRECATED: use behavior_policy instead.
Args:
networks: DQN networks
epsilon: sampling parameter that overrides the one in EpsilonPolicy
Returns:
epsilon-greedy behavior policy with fixed epsilon
"""
# TODO(lukstafi): remove this function and migrate its users.
def apply_and_sample(params: networks_lib.Params, key: networks_lib.PRNGKey,
observation: networks_lib.Observation, _: Epsilon
) -> networks_lib.Action:
# TODO(b/161332815): Make JAX Actor work with batched or unbatched inputs.
observation = utils.add_batch_dim(observation)
action_values = networks.policy_network.apply(
params, observation, is_training=False)
action_values = utils.squeeze_batch_dim(action_values)
return networks.sample_fn(action_values, key, epsilon)
return apply_and_sample
|
acme-master
|
acme/agents/jax/dqn/actor.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines Rainbow DQN, using JAX."""
import dataclasses
from typing import Callable
from acme import specs
from acme.agents.jax.dqn import actor as dqn_actor
from acme.agents.jax.dqn import builder
from acme.agents.jax.dqn import config as dqn_config
from acme.agents.jax.dqn import losses
from acme.jax import networks as networks_lib
from acme.jax import utils
import rlax
NetworkFactory = Callable[[specs.EnvironmentSpec],
networks_lib.FeedForwardNetwork]
@dataclasses.dataclass
class RainbowConfig(dqn_config.DQNConfig):
"""(Additional) configuration options for RainbowDQN."""
max_abs_reward: float = 1.0 # For clipping reward
def apply_policy_and_sample(
network: networks_lib.FeedForwardNetwork,) -> dqn_actor.EpsilonPolicy:
"""Returns a function that computes actions.
Note that this differs from default_behavior_policy with that it
expects c51-style network head which returns a tuple with the first entry
representing q-values.
Args:
network: A c51-style feedforward network.
Returns:
A feedforward policy.
"""
def apply_and_sample(params, key, obs, epsilon):
# TODO(b/161332815): Make JAX Actor work with batched or unbatched inputs.
obs = utils.add_batch_dim(obs)
action_values = network.apply(params, obs)[0]
action_values = utils.squeeze_batch_dim(action_values)
return rlax.epsilon_greedy(epsilon).sample(key, action_values)
return apply_and_sample
def eval_policy(network: networks_lib.FeedForwardNetwork,
eval_epsilon: float) -> dqn_actor.EpsilonPolicy:
"""Returns a function that computes actions.
Note that this differs from default_behavior_policy with that it
expects c51-style network head which returns a tuple with the first entry
representing q-values.
Args:
network: A c51-style feedforward network.
eval_epsilon: for epsilon-greedy exploration.
Returns:
A feedforward policy.
"""
policy = apply_policy_and_sample(network)
def apply_and_sample(params, key, obs, _):
return policy(params, key, obs, eval_epsilon)
return apply_and_sample
def make_builder(config: RainbowConfig):
"""Returns a DQNBuilder with a pre-built loss function."""
loss_fn = losses.PrioritizedCategoricalDoubleQLearning(
discount=config.discount,
importance_sampling_exponent=config.importance_sampling_exponent,
max_abs_reward=config.max_abs_reward,
)
return builder.DQNBuilder(config, loss_fn=loss_fn)
|
acme-master
|
acme/agents/jax/dqn/rainbow.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MBOP config."""
import dataclasses
from acme.agents.jax.mbop import mppi
@dataclasses.dataclass(frozen=True)
class MBOPConfig:
"""Configuration options for the MBOP agent.
Attributes:
mppi_config: Planner hyperparameters.
learning_rate: Learning rate.
num_networks: Number of networks in the ensembles.
num_sgd_steps_per_step: How many gradient updates to perform per learner
step.
"""
mppi_config: mppi.MPPIConfig = dataclasses.field(
default_factory=mppi.MPPIConfig
)
learning_rate: float = 3e-4
num_networks: int = 5
num_sgd_steps_per_step: int = 1
|
acme-master
|
acme/agents/jax/mbop/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MBOP models."""
import functools
from typing import Callable, Generic, Optional, Tuple
from acme import specs
from acme.agents.jax import actor_core
from acme.agents.jax.mbop import ensemble
from acme.agents.jax.mbop import networks as mbop_networks
from acme.jax import networks
from acme.jax import utils
import chex
import jax
# World, policy prior and n-step return models. These are backed by the
# corresponding networks.
WorldModel = Callable[[networks.Params, networks.Observation, networks.Action],
Tuple[networks.Observation, networks.Value]]
MakeWorldModel = Callable[[mbop_networks.WorldModelNetwork], WorldModel]
PolicyPrior = actor_core.ActorCore
MakePolicyPrior = Callable[
[mbop_networks.PolicyPriorNetwork, specs.EnvironmentSpec], PolicyPrior]
NStepReturn = Callable[[networks.Params, networks.Observation, networks.Action],
networks.Value]
MakeNStepReturn = Callable[[mbop_networks.NStepReturnNetwork], NStepReturn]
@chex.dataclass(frozen=True, mappable_dataclass=False)
class PolicyPriorState(Generic[actor_core.RecurrentState]):
"""State of a policy prior.
Attributes:
rng: Random key.
action_tm1: Previous action.
recurrent_state: Recurrent state. It will be none for non-recurrent, e.g.
feed forward, policies.
"""
rng: networks.PRNGKey
action_tm1: networks.Action
recurrent_state: Optional[actor_core.RecurrentState] = None
FeedForwardPolicyState = PolicyPriorState[actor_core.NoneType]
def feed_forward_policy_prior_to_actor_core(
policy: actor_core.RecurrentPolicy, initial_action_tm1: networks.Action
) -> actor_core.ActorCore[PolicyPriorState, actor_core.NoneType]:
"""A convenience adaptor from a feed forward policy prior to ActorCore.
Args:
policy: A feed forward policy prior. In the planner and other components,
the previous action is explicitly passed as an argument to the policy
prior together with the observation to infer the next action. Therefore,
we model feed forward policy priors as recurrent ActorCore policies with
previous action being the recurrent state.
initial_action_tm1: Initial previous action. This will usually be a zero
tensor.
Returns:
an ActorCore representing the feed forward policy prior.
"""
def select_action(params: networks.Params, observation: networks.Observation,
state: FeedForwardPolicyState):
rng, policy_rng = jax.random.split(state.rng)
action = policy(params, policy_rng, observation, state.action_tm1)
return action, PolicyPriorState(rng, action)
def init(rng: networks.PRNGKey) -> FeedForwardPolicyState:
return PolicyPriorState(rng, initial_action_tm1)
def get_extras(unused_state: FeedForwardPolicyState) -> actor_core.NoneType:
return None
return actor_core.ActorCore(
init=init, select_action=select_action, get_extras=get_extras)
def make_ensemble_world_model(
world_model_network: mbop_networks.WorldModelNetwork) -> WorldModel:
"""Creates an ensemble world model from its network."""
return functools.partial(ensemble.apply_round_robin,
world_model_network.apply)
def make_ensemble_policy_prior(
policy_prior_network: mbop_networks.PolicyPriorNetwork,
spec: specs.EnvironmentSpec,
use_round_robin: bool = True) -> PolicyPrior:
"""Creates an ensemble policy prior from its network.
Args:
policy_prior_network: The policy prior network.
spec: Environment specification.
use_round_robin: Whether to use round robin or mean to calculate the policy
prior over the ensemble members.
Returns:
A policy prior.
"""
def _policy_prior(params: networks.Params, key: networks.PRNGKey,
observation_t: networks.Observation,
action_tm1: networks.Action) -> networks.Action:
# Regressor policies are deterministic.
del key
apply_fn = (
ensemble.apply_round_robin if use_round_robin else ensemble.apply_mean)
return apply_fn(
policy_prior_network.apply,
params,
observation_t=observation_t,
action_tm1=action_tm1)
dummy_action = utils.zeros_like(spec.actions)
dummy_action = utils.add_batch_dim(dummy_action)
return feed_forward_policy_prior_to_actor_core(_policy_prior, dummy_action)
def make_ensemble_n_step_return(
n_step_return_network: mbop_networks.NStepReturnNetwork) -> NStepReturn:
"""Creates an ensemble n-step return model from its network."""
return functools.partial(ensemble.apply_mean, n_step_return_network.apply)
|
acme-master
|
acme/agents/jax/mbop/models.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ensemble."""
import functools
from typing import Any
from acme.agents.jax.mbop import ensemble
from acme.jax import networks
from flax import linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from absl.testing import absltest
class RandomFFN(nn.Module):
@nn.compact
def __call__(self, x):
return nn.Dense(15)(x)
def params_adding_ffn(x: jnp.ndarray) -> networks.FeedForwardNetwork:
"""Apply adds the parameters to the inputs."""
return networks.FeedForwardNetwork(
init=lambda key, x=x: jax.random.uniform(key, x.shape),
apply=lambda params, x: params + x)
def funny_args_ffn(x: jnp.ndarray) -> networks.FeedForwardNetwork:
"""Apply takes additional parameters, returns `params + x + foo - bar`."""
return networks.FeedForwardNetwork(
init=lambda key, x=x: jax.random.uniform(key, x.shape),
apply=lambda params, x, foo, bar: params + x + foo - bar)
def struct_params_adding_ffn(sx: Any) -> networks.FeedForwardNetwork:
"""Like params_adding_ffn, but with pytree inputs, preserves structure."""
def init_fn(key, sx=sx):
return jax.tree_map(lambda x: jax.random.uniform(key, x.shape), sx)
def apply_fn(params, x):
return jax.tree_map(lambda p, v: p + v, params, x)
return networks.FeedForwardNetwork(init=init_fn, apply=apply_fn)
class EnsembleTest(absltest.TestCase):
def test_ensemble_init(self):
x = jnp.ones(10) # Base input
wrapped_ffn = params_adding_ffn(x)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_round_robin, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
self.assertTupleEqual(params.shape, (3,) + x.shape)
# The ensemble dimension is the lead dimension.
self.assertFalse((params[0, ...] == params[1, ...]).all())
def test_apply_all(self):
x = jnp.ones(10) # Base input
bx = jnp.ones((7, 10)) # Batched input
wrapped_ffn = params_adding_ffn(x)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_all, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
self.assertTupleEqual(params.shape, (3,) + x.shape)
y = rr_ensemble.apply(params, x)
self.assertTupleEqual(y.shape, (3,) + x.shape)
np.testing.assert_allclose(params, y - jnp.broadcast_to(x, (3,) + x.shape))
by = rr_ensemble.apply(params, bx)
# Note: the batch dimension is no longer the leading dimension.
self.assertTupleEqual(by.shape, (3,) + bx.shape)
def test_apply_round_robin(self):
x = jnp.ones(10) # Base input
bx = jnp.ones((7, 10)) # Batched input
wrapped_ffn = params_adding_ffn(x)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_round_robin, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
self.assertTupleEqual(params.shape, (3,) + x.shape)
y = rr_ensemble.apply(params, jnp.broadcast_to(x, (3,) + x.shape))
self.assertTupleEqual(y.shape, (3,) + x.shape)
np.testing.assert_allclose(params, y - x)
# Note: the ensemble dimension must lead, the batch dimension is no longer
# the leading dimension.
by = rr_ensemble.apply(
params, jnp.broadcast_to(jnp.expand_dims(bx, axis=0), (3,) + bx.shape))
self.assertTupleEqual(by.shape, (3,) + bx.shape)
# If num_networks=3, then `round_robin(params, input)[4]` should be equal
# to `apply(params[1], input[4])`, etc.
yy = rr_ensemble.apply(params, jnp.broadcast_to(x, (6,) + x.shape))
self.assertTupleEqual(yy.shape, (6,) + x.shape)
np.testing.assert_allclose(
jnp.concatenate([params, params], axis=0),
yy - jnp.expand_dims(x, axis=0))
def test_apply_mean(self):
x = jnp.ones(10) # Base input
bx = jnp.ones((7, 10)) # Batched input
wrapped_ffn = params_adding_ffn(x)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_mean, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
self.assertTupleEqual(params.shape, (3,) + x.shape)
self.assertFalse((params[0, ...] == params[1, ...]).all())
y = rr_ensemble.apply(params, x)
self.assertTupleEqual(y.shape, x.shape)
np.testing.assert_allclose(
jnp.mean(params, axis=0), y - x, atol=1E-5, rtol=1E-5)
by = rr_ensemble.apply(params, bx)
self.assertTupleEqual(by.shape, bx.shape)
def test_apply_all_multiargs(self):
x = jnp.ones(10) # Base input
wrapped_ffn = funny_args_ffn(x)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_all, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
self.assertTupleEqual(params.shape, (3,) + x.shape)
y = rr_ensemble.apply(params, x, 2 * x, x)
self.assertTupleEqual(y.shape, (3,) + x.shape)
np.testing.assert_allclose(
params,
y - jnp.broadcast_to(2 * x, (3,) + x.shape),
atol=1E-5,
rtol=1E-5)
y = rr_ensemble.apply(params, x, bar=x, foo=2 * x)
self.assertTupleEqual(y.shape, (3,) + x.shape)
np.testing.assert_allclose(
params,
y - jnp.broadcast_to(2 * x, (3,) + x.shape),
atol=1E-5,
rtol=1E-5)
def test_apply_all_structured(self):
x = jnp.ones(10)
sx = [(3 * x, 2 * x), 5 * x] # Base input
wrapped_ffn = struct_params_adding_ffn(sx)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_all, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
y = rr_ensemble.apply(params, sx)
ex = jnp.broadcast_to(x, (3,) + x.shape)
np.testing.assert_allclose(y[0][0], params[0][0] + 3 * ex)
def test_apply_round_robin_multiargs(self):
x = jnp.ones(10) # Base input
wrapped_ffn = funny_args_ffn(x)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_round_robin, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
self.assertTupleEqual(params.shape, (3,) + x.shape)
ex = jnp.broadcast_to(x, (3,) + x.shape)
y = rr_ensemble.apply(params, ex, 2 * ex, ex)
self.assertTupleEqual(y.shape, (3,) + x.shape)
np.testing.assert_allclose(
params,
y - jnp.broadcast_to(2 * x, (3,) + x.shape),
atol=1E-5,
rtol=1E-5)
y = rr_ensemble.apply(params, ex, bar=ex, foo=2 * ex)
self.assertTupleEqual(y.shape, (3,) + x.shape)
np.testing.assert_allclose(
params,
y - jnp.broadcast_to(2 * x, (3,) + x.shape),
atol=1E-5,
rtol=1E-5)
def test_apply_round_robin_structured(self):
x = jnp.ones(10)
sx = [(3 * x, 2 * x), 5 * x] # Base input
wrapped_ffn = struct_params_adding_ffn(sx)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_round_robin, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
ex = jnp.broadcast_to(x, (3,) + x.shape)
esx = [(3 * ex, 2 * ex), 5 * ex]
y = rr_ensemble.apply(params, esx)
np.testing.assert_allclose(y[0][0], params[0][0] + 3 * ex)
def test_apply_mean_multiargs(self):
x = jnp.ones(10) # Base input
wrapped_ffn = funny_args_ffn(x)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_mean, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
self.assertTupleEqual(params.shape, (3,) + x.shape)
y = rr_ensemble.apply(params, x, 2 * x, x)
self.assertTupleEqual(y.shape, x.shape)
np.testing.assert_allclose(
jnp.mean(params, axis=0), y - 2 * x, atol=1E-5, rtol=1E-5)
y = rr_ensemble.apply(params, x, bar=x, foo=2 * x)
self.assertTupleEqual(y.shape, x.shape)
np.testing.assert_allclose(
jnp.mean(params, axis=0), y - 2 * x, atol=1E-5, rtol=1E-5)
def test_apply_mean_structured(self):
x = jnp.ones(10)
sx = [(3 * x, 2 * x), 5 * x] # Base input
wrapped_ffn = struct_params_adding_ffn(sx)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_mean, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
y = rr_ensemble.apply(params, sx)
np.testing.assert_allclose(
y[0][0], jnp.mean(params[0][0], axis=0) + 3 * x, atol=1E-5, rtol=1E-5)
def test_round_robin_random(self):
x = jnp.ones(10) # Base input
bx = jnp.ones((9, 10)) # Batched input
ffn = RandomFFN()
wrapped_ffn = networks.FeedForwardNetwork(
init=functools.partial(ffn.init, x=x), apply=ffn.apply)
rr_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_round_robin, num_networks=3)
key = jax.random.PRNGKey(0)
params = rr_ensemble.init(key)
out = rr_ensemble.apply(params, bx)
# The output should be the same every 3 rows:
blocks = jnp.split(out, 3, axis=0)
np.testing.assert_array_equal(blocks[0], blocks[1])
np.testing.assert_array_equal(blocks[0], blocks[2])
self.assertTrue((out[0] != out[1]).any())
for i in range(9):
np.testing.assert_allclose(
out[i],
ffn.apply(jax.tree_map(lambda p, i=i: p[i % 3], params), bx[i]),
atol=1E-5,
rtol=1E-5)
def test_mean_random(self):
x = jnp.ones(10)
bx = jnp.ones((9, 10))
ffn = RandomFFN()
wrapped_ffn = networks.FeedForwardNetwork(
init=functools.partial(ffn.init, x=x), apply=ffn.apply)
mean_ensemble = ensemble.make_ensemble(
wrapped_ffn, ensemble.apply_mean, num_networks=3)
key = jax.random.PRNGKey(0)
params = mean_ensemble.init(key)
single_output = mean_ensemble.apply(params, x)
self.assertEqual(single_output.shape, (15,))
batch_output = mean_ensemble.apply(params, bx)
# Make sure all rows are equal:
np.testing.assert_allclose(
jnp.broadcast_to(batch_output[0], batch_output.shape),
batch_output,
atol=1E-5,
rtol=1E-5)
# Check results explicitly:
all_members = jnp.concatenate([
jnp.expand_dims(
ffn.apply(jax.tree_map(lambda p, i=i: p[i], params), bx), axis=0)
for i in range(3)
])
batch_means = jnp.mean(all_members, axis=0)
np.testing.assert_allclose(batch_output, batch_means, atol=1E-5, rtol=1E-5)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/mbop/ensemble_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the extended MPPI planner used in MBOP [https://arxiv.org/abs/2008.05556].
In this context, MPPI refers to Model-Predictive Path Integral control,
originally introduced in "Model Predictive Path Integral Control: From Theory to
Parallel Computation" Grady Williams, Andrew Aldrich and Evangelos A. Theodorou.
This is a modified implementation of MPPI that adds a policy prior and n-step
return extension as described in Algorithm 2 of "Model-Based Offline Planning"
[https://arxiv.org/abs/2008.05556]. Notation is taken from the paper. This
planner can be 'degraded' to provide both 'basic' MPPI or PDDM-style
[https://arxiv.org/abs/1909.11652] planning by removing the n-step return,
providing a Gaussian policy prior, or single-head ensembles.
"""
import dataclasses
import functools
from typing import Callable, Optional
from acme import specs
from acme.agents.jax.mbop import models
from acme.jax import networks
import jax
from jax import random
import jax.numpy as jnp
# Function that takes (n_trajectories, horizon, action_dim) tensor of action
# trajectories and (n_trajectories) vector of corresponding cumulative rewards,
# i.e. returns, for each trajectory as input and returns a single action
# trajectory.
ActionAggregationFn = Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray]
def return_weighted_average(action_trajectories: jnp.ndarray,
cum_reward: jnp.ndarray,
kappa: float) -> jnp.ndarray:
r"""Calculates return-weighted average over all trajectories.
This will calculate the return-weighted average over a set of trajectories as
defined on l.17 of Alg. 2 in the MBOP paper:
[https://arxiv.org/abs/2008.05556].
Note: Clipping will be performed for `cum_reward` values > 80 to avoid NaNs.
Args:
action_trajectories: (n_trajectories, horizon, action_dim) tensor of action
trajectories, corresponds to `A` in Alg. 2.
cum_reward: (n_trajectories) vector of corresponding cumulative rewards
(returns) for each trajectory. Corresponds to `\mathcal{R}` in Alg. 2.
kappa: `\kappa` constant, changes the 'peakiness' of the exponential
averaging.
Returns:
Single action trajectory corresponding to the return-weighted average of the
trajectories.
"""
# Substract maximum reward to avoid NaNs:
cum_reward = cum_reward - cum_reward.max()
# Remove the batch dimension of cum_reward allows for an implicit broadcast in
# jnp.average:
exp_cum_reward = jnp.exp(kappa * jnp.squeeze(cum_reward))
return jnp.average(action_trajectories, weights=exp_cum_reward, axis=0)
def return_top_k_average(action_trajectories: jnp.ndarray,
cum_reward: jnp.ndarray,
k: int = 10) -> jnp.ndarray:
r"""Calculates the top-k average over all trajectories.
This will calculate the top-k average over a set of trajectories as
defined in the POIR Paper:
Note: top-k average is more numerically stable than the weighted average.
Args:
action_trajectories: (n_trajectories, horizon, action_dim) tensor of action
trajectories.
cum_reward: (n_trajectories) vector of corresponding cumulative rewards
(returns) for each trajectory.
k: the number of trajectories to average.
Returns:
Single action trajectory corresponding to the average of the k best
trajectories.
"""
top_k_trajectories = action_trajectories[jnp.argsort(
jnp.squeeze(cum_reward))[-int(k):]]
return jnp.mean(top_k_trajectories, axis=0)
@dataclasses.dataclass
class MPPIConfig:
"""Config dataclass for MPPI-style planning, used in mppi.py.
These variables correspond to different parameters of `MBOP-Trajopt` as
defined in MBOP [https://arxiv.org/abs/2008.05556] (Alg. 2).
Attributes:
sigma: Variance of action-additive noise.
beta: Mixture parameter between old trajectory and new action.
horizon: Planning horizon, corresponds to H in Alg. 2 line 8.
n_trajectories: Number of trajectories used in `mppi_planner` to sample the
best action. Corresponds to `N` in Alg. 2 line. 5.
previous_trajectory_clip: Value to clip the previous_trajectory's actions
to. Disabled if None.
action_aggregation_fn: Function that aggregates action trajectories and
returns a single action trajectory.
"""
sigma: float = 0.8
beta: float = 0.2
horizon: int = 15
n_trajectories: int = 1000
previous_trajectory_clip: Optional[float] = None
action_aggregation_fn: ActionAggregationFn = (
functools.partial(return_weighted_average, kappa=0.5))
def get_initial_trajectory(config: MPPIConfig, env_spec: specs.EnvironmentSpec):
"""Returns the initial empty trajectory `T_0`."""
return jnp.zeros((max(1, config.horizon),) + env_spec.actions.shape)
def _repeat_n(new_batch: int, data: jnp.ndarray) -> jnp.ndarray:
"""Create new batch dimension of size `new_batch` by repeating `data`."""
return jnp.broadcast_to(data, (new_batch,) + data.shape)
def mppi_planner(
config: MPPIConfig,
world_model: models.WorldModel,
policy_prior: models.PolicyPrior,
n_step_return: models.NStepReturn,
world_model_params: networks.Params,
policy_prior_params: networks.Params,
n_step_return_params: networks.Params,
random_key: networks.PRNGKey,
observation: networks.Observation,
previous_trajectory: jnp.ndarray,
) -> jnp.ndarray:
"""MPPI-extended trajectory optimizer.
This implements the trajectory optimizer described in MBOP
[https://arxiv.org/abs/2008.05556] (Alg. 2) which is an extended version of
MPPI that adds support for arbitrary sampling distributions and extends the
return horizon using an approximate model of returns. There are a couple
notation changes for readability:
A -> action_trajectories
T -> action_trajectory
If the horizon is set to 0, the planner will simply call the policy_prior
and average the action over the ensemble heads.
Args:
config: Base configuration parameters of MPPI.
world_model: Corresponds to `f_m(s_t, a_t)_s` in Alg. 2.
policy_prior: Corresponds to `f_b(s_t, a_tm1)` in Alg. 2.
n_step_return: Corresponds to `f_R(s_t, a_t)` in Alg. 2.
world_model_params: Parameters for world model.
policy_prior_params: Parameters for policy prior.
n_step_return_params: Parameters for n_step return.
random_key: JAX random key seed.
observation: Normalized current observation from the environment, `s` in
Alg. 2.
previous_trajectory: Normalized previous action trajectory. `T` in Alg 2.
Shape is [n_trajectories, horizon, action_dims].
Returns:
jnp.ndarray: Average action trajectory of shape [horizon, action_dims].
"""
action_trajectory_tm1 = previous_trajectory
policy_prior_state = policy_prior.init(random_key)
# Broadcast so that we have n_trajectories copies of each:
observation_t = jax.tree_map(
functools.partial(_repeat_n, config.n_trajectories), observation)
action_tm1 = jnp.broadcast_to(action_trajectory_tm1[0],
(config.n_trajectories,) +
action_trajectory_tm1[0].shape)
if config.previous_trajectory_clip is not None:
action_tm1 = jnp.clip(
action_tm1,
a_min=-config.previous_trajectory_clip,
a_max=config.previous_trajectory_clip)
# First check if planning is unnecessary:
if config.horizon == 0:
if hasattr(policy_prior_state, 'action_tm1'):
policy_prior_state = policy_prior_state.replace(action_tm1=action_tm1)
action_set, _ = policy_prior.select_action(policy_prior_params,
observation_t,
policy_prior_state)
# Need to re-create an action trajectory from a single action.
return jnp.broadcast_to(
jnp.mean(action_set, axis=0), (1, action_set.shape[-1]))
# Accumulators for returns and trajectories:
cum_reward = jnp.zeros((config.n_trajectories, 1))
# Generate noise once:
random_key, noise_key = random.split(random_key)
action_noise = config.sigma * random.normal(noise_key, (
(config.horizon,) + action_tm1.shape))
# Initialize empty set of action trajectories for concatenation in loop:
action_trajectories = jnp.zeros((config.n_trajectories, 0) +
action_trajectory_tm1[0].shape)
for t in range(config.horizon):
# Query policy prior for proposed action:
if hasattr(policy_prior_state, 'action_tm1'):
policy_prior_state = policy_prior_state.replace(action_tm1=action_tm1)
action_t, policy_prior_state = policy_prior.select_action(
policy_prior_params, observation_t, policy_prior_state)
# Add action noise:
action_t = action_t + action_noise[t]
# Mix action with previous trajectory's corresponding action:
action_t = (1 -
config.beta) * action_t + config.beta * action_trajectory_tm1[t]
# Query world model to get next observation and reward:
observation_tp1, reward_t = world_model(world_model_params, observation_t,
action_t)
cum_reward += reward_t
# Insert actions into trajectory matrix:
action_trajectories = jnp.concatenate(
[action_trajectories,
jnp.expand_dims(action_t, axis=1)], axis=1)
# Bump variable timesteps for next loop:
observation_t = observation_tp1
action_tm1 = action_t
# De-normalize and append the final n_step return prediction:
n_step_return_t = n_step_return(n_step_return_params, observation_t, action_t)
cum_reward += n_step_return_t
# Average the set of `n_trajectories` trajectories into a single trajectory.
return config.action_aggregation_fn(action_trajectories, cum_reward)
|
acme-master
|
acme/agents/jax/mbop/mppi.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the Model-Based Offline Planning (MBOP) agent."""
from acme.agents.jax.mbop.acting import ActorCore
from acme.agents.jax.mbop.acting import make_actor
from acme.agents.jax.mbop.acting import make_actor_core
from acme.agents.jax.mbop.acting import make_ensemble_actor_core
from acme.agents.jax.mbop.builder import MBOPBuilder
from acme.agents.jax.mbop.config import MBOPConfig
from acme.agents.jax.mbop.dataset import EPISODE_RETURN
from acme.agents.jax.mbop.dataset import episodes_to_timestep_batched_transitions
from acme.agents.jax.mbop.dataset import get_normalization_stats
from acme.agents.jax.mbop.dataset import N_STEP_RETURN
from acme.agents.jax.mbop.learning import LoggerFn
from acme.agents.jax.mbop.learning import make_ensemble_regressor_learner
from acme.agents.jax.mbop.learning import MakeNStepReturnLearner
from acme.agents.jax.mbop.learning import MakePolicyPriorLearner
from acme.agents.jax.mbop.learning import MakeWorldModelLearner
from acme.agents.jax.mbop.learning import MBOPLearner
from acme.agents.jax.mbop.learning import TrainingState
from acme.agents.jax.mbop.losses import MBOPLosses
from acme.agents.jax.mbop.losses import policy_prior_loss
from acme.agents.jax.mbop.losses import TransitionLoss
from acme.agents.jax.mbop.losses import world_model_loss
from acme.agents.jax.mbop.models import make_ensemble_n_step_return
from acme.agents.jax.mbop.models import make_ensemble_policy_prior
from acme.agents.jax.mbop.models import make_ensemble_world_model
from acme.agents.jax.mbop.models import MakeNStepReturn
from acme.agents.jax.mbop.models import MakePolicyPrior
from acme.agents.jax.mbop.models import MakeWorldModel
from acme.agents.jax.mbop.mppi import mppi_planner
from acme.agents.jax.mbop.mppi import MPPIConfig
from acme.agents.jax.mbop.mppi import return_top_k_average
from acme.agents.jax.mbop.mppi import return_weighted_average
from acme.agents.jax.mbop.networks import make_networks
from acme.agents.jax.mbop.networks import make_policy_prior_network
from acme.agents.jax.mbop.networks import make_world_model_network
from acme.agents.jax.mbop.networks import MBOPNetworks
|
acme-master
|
acme/agents/jax/mbop/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataset."""
from acme.agents.jax.mbop import dataset as dataset_lib
import rlds
from rlds.transformations import transformations_testlib
import tensorflow as tf
from absl.testing import absltest
def sample_episode() -> rlds.Episode:
"""Returns a sample episode."""
steps = {
rlds.OBSERVATION: [
[1, 1],
[2, 2],
[3, 3],
[4, 4],
[5, 5],
],
rlds.ACTION: [[1], [2], [3], [4], [5]],
rlds.REWARD: [1.0, 2.0, 3.0, 4.0, 5.0],
rlds.DISCOUNT: [1, 1, 1, 1, 1],
rlds.IS_FIRST: [True, False, False, False, False],
rlds.IS_LAST: [False, False, False, False, True],
rlds.IS_TERMINAL: [False, False, False, False, True],
}
return {rlds.STEPS: tf.data.Dataset.from_tensor_slices(steps)}
class DatasetTest(transformations_testlib.TransformationsTest):
def test_episode_to_timestep_batch(self):
batched = dataset_lib.episode_to_timestep_batch(
sample_episode(), return_horizon=2)
# Scalars should be expanded and the n-step return should be present. Each
# element of a step should be a triplet containing the previous, current and
# next values of the corresponding fields. Since the return horizon is 2 and
# the number of steps in the episode is 5, there can be only 2 triplets for
# time steps 1 and 2.
expected_steps = {
rlds.OBSERVATION: [
[[1, 1], [2, 2], [3, 3]],
[[2, 2], [3, 3], [4, 4]],
],
rlds.ACTION: [
[[1], [2], [3]],
[[2], [3], [4]],
],
rlds.REWARD: [
[[1.0], [2.0], [3.0]],
[[2.0], [3.0], [4.0]],
],
rlds.DISCOUNT: [
[[1], [1], [1]],
[[1], [1], [1]],
],
rlds.IS_FIRST: [
[[True], [False], [False]],
[[False], [False], [False]],
],
rlds.IS_LAST: [
[[False], [False], [False]],
[[False], [False], [False]],
],
rlds.IS_TERMINAL: [
[[False], [False], [False]],
[[False], [False], [False]],
],
dataset_lib.N_STEP_RETURN: [
[[3.0], [5.0], [7.0]],
[[5.0], [7.0], [9.0]],
],
}
self.expect_equal_datasets(
batched, tf.data.Dataset.from_tensor_slices(expected_steps))
def test_episode_to_timestep_batch_episode_return(self):
batched = dataset_lib.episode_to_timestep_batch(
sample_episode(), return_horizon=3, calculate_episode_return=True)
expected_steps = {
rlds.OBSERVATION: [[[1, 1], [2, 2], [3, 3]]],
rlds.ACTION: [[[1], [2], [3]]],
rlds.REWARD: [[[1.0], [2.0], [3.0]]],
rlds.DISCOUNT: [[[1], [1], [1]]],
rlds.IS_FIRST: [[[True], [False], [False]]],
rlds.IS_LAST: [[[False], [False], [False]]],
rlds.IS_TERMINAL: [[[False], [False], [False]]],
dataset_lib.N_STEP_RETURN: [[[6.0], [9.0], [12.0]]],
# This should match to the sum of the rewards in the input.
dataset_lib.EPISODE_RETURN: [[[15.0], [15.0], [15.0]]],
}
self.expect_equal_datasets(
batched, tf.data.Dataset.from_tensor_slices(expected_steps))
def test_episode_to_timestep_batch_no_return_horizon(self):
batched = dataset_lib.episode_to_timestep_batch(
sample_episode(), return_horizon=1)
expected_steps = {
rlds.OBSERVATION: [
[[1, 1], [2, 2], [3, 3]],
[[2, 2], [3, 3], [4, 4]],
[[3, 3], [4, 4], [5, 5]],
],
rlds.ACTION: [
[[1], [2], [3]],
[[2], [3], [4]],
[[3], [4], [5]],
],
rlds.REWARD: [
[[1.0], [2.0], [3.0]],
[[2.0], [3.0], [4.0]],
[[3.0], [4.0], [5.0]],
],
rlds.DISCOUNT: [
[[1], [1], [1]],
[[1], [1], [1]],
[[1], [1], [1]],
],
rlds.IS_FIRST: [
[[True], [False], [False]],
[[False], [False], [False]],
[[False], [False], [False]],
],
rlds.IS_LAST: [
[[False], [False], [False]],
[[False], [False], [False]],
[[False], [False], [True]],
],
rlds.IS_TERMINAL: [
[[False], [False], [False]],
[[False], [False], [False]],
[[False], [False], [True]],
],
# n-step return should be equal to the rewards.
dataset_lib.N_STEP_RETURN: [
[[1.0], [2.0], [3.0]],
[[2.0], [3.0], [4.0]],
[[3.0], [4.0], [5.0]],
],
}
self.expect_equal_datasets(
batched, tf.data.Dataset.from_tensor_slices(expected_steps))
def test_episode_to_timestep_batch_drop_return_horizon(self):
steps = {
rlds.OBSERVATION: [[1], [2], [3], [4], [5], [6]],
rlds.REWARD: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
}
episode = {rlds.STEPS: tf.data.Dataset.from_tensor_slices(steps)}
batched = dataset_lib.episode_to_timestep_batch(
episode,
return_horizon=2,
calculate_episode_return=True,
drop_return_horizon=True)
# The two steps of the episode should be dropped. There will be 4 steps left
# and since the return horizon is 2, only a single 3-batched step should be
# emitted. The episode return should be the sum of the rewards of the first
# 4 steps.
expected_steps = {
rlds.OBSERVATION: [[[1], [2], [3]]],
rlds.REWARD: [[[1.0], [2.0], [3.0]]],
dataset_lib.N_STEP_RETURN: [[[3.0], [5.0], [7.0]]],
dataset_lib.EPISODE_RETURN: [[[10.0], [10.0], [10.0]]],
}
self.expect_equal_datasets(
batched, tf.data.Dataset.from_tensor_slices(expected_steps))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/mbop/dataset_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MBOP Builder."""
import functools
from typing import Iterator, Optional
from acme import core
from acme import specs
from acme import types
from acme.agents.jax import builders
from acme.agents.jax.mbop import acting
from acme.agents.jax.mbop import config as mbop_config
from acme.agents.jax.mbop import learning
from acme.agents.jax.mbop import losses as mbop_losses
from acme.agents.jax.mbop import networks as mbop_networks
from acme.jax import networks as networks_lib
from acme.jax import running_statistics
from acme.utils import counting
from acme.utils import loggers
import optax
class MBOPBuilder(builders.OfflineBuilder[mbop_networks.MBOPNetworks,
acting.ActorCore, types.Transition]):
"""MBOP Builder.
This builder uses ensemble regressor learners for the world model, policy
prior and the n-step return models with fixed learning rates. The ensembles
and the learning rate are configured in the config.
"""
def __init__(
self,
config: mbop_config.MBOPConfig,
losses: mbop_losses.MBOPLosses,
mean_std: Optional[running_statistics.NestedMeanStd] = None,
):
"""Initializes an MBOP builder.
Args:
config: a config with MBOP hyperparameters.
losses: MBOP losses.
mean_std: NestedMeanStd used to normalize the samples.
"""
self._config = config
self._losses = losses
self._mean_std = mean_std
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: mbop_networks.MBOPNetworks,
dataset: Iterator[types.Transition],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
"""See base class."""
def make_ensemble_regressor_learner(
name: str,
logger_fn: loggers.LoggerFactory,
counter: counting.Counter,
rng_key: networks_lib.PRNGKey,
iterator: Iterator[types.Transition],
network: networks_lib.FeedForwardNetwork,
loss: mbop_losses.TransitionLoss,
) -> core.Learner:
"""Creates an ensemble regressor learner."""
return learning.make_ensemble_regressor_learner(
name,
self._config.num_networks,
logger_fn,
counter,
rng_key,
iterator,
network,
loss,
optax.adam(self._config.learning_rate),
self._config.num_sgd_steps_per_step,
)
make_world_model_learner = functools.partial(
make_ensemble_regressor_learner, 'world_model')
make_policy_prior_learner = functools.partial(
make_ensemble_regressor_learner, 'policy_prior')
make_n_step_return_learner = functools.partial(
make_ensemble_regressor_learner, 'n_step_return')
counter = counter or counting.Counter(time_delta=0.)
return learning.MBOPLearner(
networks,
self._losses,
dataset,
random_key,
logger_fn,
make_world_model_learner,
make_policy_prior_learner,
make_n_step_return_learner,
counter,
)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: acting.ActorCore,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
) -> core.Actor:
"""See base class."""
del environment_spec
return acting.make_actor(policy, random_key, variable_source)
def make_policy(
self,
networks: mbop_networks.MBOPNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool,
) -> acting.ActorCore:
"""See base class."""
return acting.make_ensemble_actor_core(
networks,
self._config.mppi_config,
environment_spec,
self._mean_std,
use_round_robin=not evaluation)
|
acme-master
|
acme/agents/jax/mbop/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset related definitions and methods."""
import functools
import itertools
from typing import Iterator, Optional
from acme import types
from acme.jax import running_statistics
import jax
import jax.numpy as jnp
import rlds
import tensorflow as tf
import tree
# Keys in extras dictionary of the transitions.
# Total return over n-steps.
N_STEP_RETURN: str = 'n_step_return'
# Total return of the episode that the transition belongs to.
EPISODE_RETURN: str = 'episode_return'
# Indices of the time-batched transitions.
PREVIOUS: int = 0
CURRENT: int = 1
NEXT: int = 2
def _append_n_step_return(output, n_step_return):
"""Append n-step return to an output step."""
output[N_STEP_RETURN] = n_step_return
return output
def _append_episode_return(output, episode_return):
"""Append episode return to an output step."""
output[EPISODE_RETURN] = episode_return
return output
def _expand_scalars(output):
"""If rewards are scalar, expand them."""
return tree.map_structure(tf.experimental.numpy.atleast_1d, output)
def episode_to_timestep_batch(
episode: rlds.BatchedStep,
return_horizon: int = 0,
drop_return_horizon: bool = False,
calculate_episode_return: bool = False) -> tf.data.Dataset:
"""Converts an episode into multi-timestep batches.
Args:
episode: Batched steps as provided directly by RLDS.
return_horizon: int describing the horizon to which we should accumulate the
return.
drop_return_horizon: bool whether we should drop the last `return_horizon`
steps to avoid mis-calculated returns near the end of the episode.
calculate_episode_return: Whether to calculate episode return. Can be an
expensive operation on datasets with many episodes.
Returns:
rl_dataset.DatasetType of 3-batched transitions, with scalar rewards
expanded to 1D rewards
This means that for every step, the corresponding elements will be a batch of
size 3, with the first batched element corresponding to *_t-1, the second to
*_t and the third to *_t+1, e.g. you can access the previous observation as:
```
o_tm1 = el[types.OBSERVATION][0]
```
Two additional keys can be added: 'R_t' which corresponds to the undiscounted
return for horizon `return_horizon` from time t (always present), and
'R_total' which corresponds to the total return of the associated episode (if
`calculate_episode_return` is True). Rewards are converted to be (at least)
one-dimensional, prior to batching (to avoid ()-shaped elements).
In this example, 0-valued observations correspond to o_{t-1}, 1-valued
observations correspond to o_t, and 2-valued observations correspond to
s_{t+1}. This same structure is true for all keys, except 'R_t' and 'R_total'
which are both scalars.
```
ipdb> el[types.OBSERVATION]
<tf.Tensor: shape=(3, 11), dtype=float32, numpy=
array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.]], dtype=float32)>
```
"""
steps = episode[rlds.STEPS]
if drop_return_horizon:
episode_length = steps.cardinality()
steps = steps.take(episode_length - return_horizon)
# Calculate n-step return:
rewards = steps.map(lambda step: step[rlds.REWARD])
batched_rewards = rlds.transformations.batch(
rewards, size=return_horizon, shift=1, stride=1, drop_remainder=True)
returns = batched_rewards.map(tf.math.reduce_sum)
output = tf.data.Dataset.zip((steps, returns)).map(_append_n_step_return)
# Calculate total episode return for potential filtering, use total # of steps
# to calculate return.
if calculate_episode_return:
dtype = jnp.float64 if jax.config.jax_enable_x64 else jnp.float32
# Need to redefine this here to avoid a tf.data crash.
rewards = steps.map(lambda step: step[rlds.REWARD])
episode_return = rewards.reduce(dtype(0), lambda x, y: x + y)
output = output.map(
functools.partial(
_append_episode_return, episode_return=episode_return))
output = output.map(_expand_scalars)
output = rlds.transformations.batch(
output, size=3, shift=1, drop_remainder=True)
return output
def _step_to_transition(rlds_step: rlds.BatchedStep) -> types.Transition:
"""Converts batched RLDS steps to batched transitions."""
return types.Transition(
observation=rlds_step[rlds.OBSERVATION],
action=rlds_step[rlds.ACTION],
reward=rlds_step[rlds.REWARD],
discount=rlds_step[rlds.DISCOUNT],
# We provide next_observation if an algorithm needs it, however note that
# it will only contain s_t and s_t+1, so will be one element short of all
# other attributes (which contain s_t-1, s_t, s_t+1).
next_observation=tree.map_structure(lambda x: x[1:],
rlds_step[rlds.OBSERVATION]),
extras={
N_STEP_RETURN: rlds_step[N_STEP_RETURN],
})
def episodes_to_timestep_batched_transitions(
episode_dataset: tf.data.Dataset,
return_horizon: int = 10,
drop_return_horizon: bool = False,
min_return_filter: Optional[float] = None) -> tf.data.Dataset:
"""Process an existing dataset converting it to episode to 3-transitions.
A 3-transition is an Transition with each attribute having an extra dimension
of size 3, representing 3 consecutive timesteps. Each 3-step object will be
in random order relative to each other. See `episode_to_timestep_batch` for
more information.
Args:
episode_dataset: An RLDS dataset to process.
return_horizon: The horizon we want calculate Monte-Carlo returns to.
drop_return_horizon: Whether we should drop the last `return_horizon` steps.
min_return_filter: Minimum episode return below which we drop an episode.
Returns:
A tf.data.Dataset of 3-transitions.
"""
dataset = episode_dataset.interleave(
functools.partial(
episode_to_timestep_batch,
return_horizon=return_horizon,
drop_return_horizon=drop_return_horizon,
calculate_episode_return=min_return_filter is not None),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=False)
if min_return_filter is not None:
def filter_on_return(step):
return step[EPISODE_RETURN][0][0] > min_return_filter
dataset = dataset.filter(filter_on_return)
dataset = dataset.map(
_step_to_transition, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def get_normalization_stats(
iterator: Iterator[types.Transition],
num_normalization_batches: int = 50
) -> running_statistics.RunningStatisticsState:
"""Precomputes normalization statistics over a fixed number of batches.
The iterator should contain batches of 3-transitions, i.e. with two leading
dimensions, the first one denoting the batch dimension and the second one the
previous, current and next timesteps. The statistics are calculated using the
data of the previous timestep.
Args:
iterator: Iterator of batchs of 3-transitions.
num_normalization_batches: Number of batches to calculate the statistics.
Returns:
RunningStatisticsState containing the normalization statistics.
"""
# Set up normalization:
example = next(iterator)
unbatched_single_example = jax.tree_map(lambda x: x[0, PREVIOUS, :], example)
mean_std = running_statistics.init_state(unbatched_single_example)
for batch in itertools.islice(iterator, num_normalization_batches - 1):
example = jax.tree_map(lambda x: x[:, PREVIOUS, :], batch)
mean_std = running_statistics.update(mean_std, example)
return mean_std
|
acme-master
|
acme/agents/jax/mbop/dataset.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MBOP networks definitions."""
import dataclasses
from typing import Any, Tuple
from acme import specs
from acme.jax import networks
from acme.jax import utils
import haiku as hk
import jax.numpy as jnp
import numpy as np
# The term network is used in a general sense, e.g. for the CRR policy prior, it
# will be a dataclass that encapsulates the networks used by the CRR (learner).
WorldModelNetwork = Any
PolicyPriorNetwork = Any
NStepReturnNetwork = Any
@dataclasses.dataclass
class MBOPNetworks:
"""Container class to hold MBOP networks."""
world_model_network: WorldModelNetwork
policy_prior_network: PolicyPriorNetwork
n_step_return_network: NStepReturnNetwork
def make_network_from_module(
module: hk.Transformed,
spec: specs.EnvironmentSpec) -> networks.FeedForwardNetwork:
"""Creates a network with dummy init arguments using the specified module.
Args:
module: Module that expects one batch axis and one features axis for its
inputs.
spec: EnvironmentSpec shapes to derive dummy inputs.
Returns:
FeedForwardNetwork whose `init` method only takes a random key, and `apply`
takes an observation and action and produces an output.
"""
dummy_obs = utils.add_batch_dim(utils.zeros_like(spec.observations))
dummy_action = utils.add_batch_dim(utils.zeros_like(spec.actions))
return networks.FeedForwardNetwork(
lambda key: module.init(key, dummy_obs, dummy_action), module.apply)
def make_world_model_network(
spec: specs.EnvironmentSpec, hidden_layer_sizes: Tuple[int, ...] = (64, 64)
) -> networks.FeedForwardNetwork:
"""Creates a world model network used by the agent."""
observation_size = np.prod(spec.observations.shape, dtype=int)
def _world_model_fn(observation_t, action_t, is_training=False, key=None):
# is_training and key allows to defined train/test dependant modules
# like dropout.
del is_training
del key
network = hk.nets.MLP(hidden_layer_sizes + (observation_size + 1,))
# World model returns both an observation and a reward.
observation_tp1, reward_t = jnp.split(
network(jnp.concatenate([observation_t, action_t], axis=-1)),
[observation_size],
axis=-1)
return observation_tp1, reward_t
world_model = hk.without_apply_rng(hk.transform(_world_model_fn))
return make_network_from_module(world_model, spec)
def make_policy_prior_network(
spec: specs.EnvironmentSpec, hidden_layer_sizes: Tuple[int, ...] = (64, 64)
) -> networks.FeedForwardNetwork:
"""Creates a policy prior network used by the agent."""
action_size = np.prod(spec.actions.shape, dtype=int)
def _policy_prior_fn(observation_t, action_tm1, is_training=False, key=None):
# is_training and key allows to defined train/test dependant modules
# like dropout.
del is_training
del key
network = hk.nets.MLP(hidden_layer_sizes + (action_size,))
# Policy prior returns an action.
return network(jnp.concatenate([observation_t, action_tm1], axis=-1))
policy_prior = hk.without_apply_rng(hk.transform(_policy_prior_fn))
return make_network_from_module(policy_prior, spec)
def make_n_step_return_network(
spec: specs.EnvironmentSpec, hidden_layer_sizes: Tuple[int, ...] = (64, 64)
) -> networks.FeedForwardNetwork:
"""Creates an N-step return network used by the agent."""
def _n_step_return_fn(observation_t, action_t, is_training=False, key=None):
# is_training and key allows to defined train/test dependant modules
# like dropout.
del is_training
del key
network = hk.nets.MLP(hidden_layer_sizes + (1,))
return network(jnp.concatenate([observation_t, action_t], axis=-1))
n_step_return = hk.without_apply_rng(hk.transform(_n_step_return_fn))
return make_network_from_module(n_step_return, spec)
def make_networks(
spec: specs.EnvironmentSpec,
hidden_layer_sizes: Tuple[int, ...] = (64, 64),
) -> MBOPNetworks:
"""Creates networks used by the agent."""
world_model_network = make_world_model_network(
spec, hidden_layer_sizes=hidden_layer_sizes)
policy_prior_network = make_policy_prior_network(
spec, hidden_layer_sizes=hidden_layer_sizes)
n_step_return_network = make_n_step_return_network(
spec, hidden_layer_sizes=hidden_layer_sizes)
return MBOPNetworks(
world_model_network=world_model_network,
policy_prior_network=policy_prior_network,
n_step_return_network=n_step_return_network)
|
acme-master
|
acme/agents/jax/mbop/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the MBOP agent."""
import functools
from acme import specs
from acme import types
from acme.agents.jax.mbop import learning
from acme.agents.jax.mbop import losses as mbop_losses
from acme.agents.jax.mbop import networks as mbop_networks
from acme.testing import fakes
from acme.utils import loggers
import chex
import jax
import optax
import rlds
from absl.testing import absltest
class MBOPTest(absltest.TestCase):
def test_learner(self):
with chex.fake_pmap_and_jit():
num_sgd_steps_per_step = 1
num_steps = 5
num_networks = 7
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(
episode_length=10, bounded=True, observation_dim=3, action_dim=2)
spec = specs.make_environment_spec(environment)
dataset = fakes.transition_dataset(environment)
# Add dummy n-step return to the transitions.
def _add_dummy_n_step_return(sample):
return types.Transition(*sample.data)._replace(
extras={'n_step_return': 1.0})
dataset = dataset.map(_add_dummy_n_step_return)
# Convert into time-batched format with previous, current and next
# transitions.
dataset = rlds.transformations.batch(dataset, 3)
dataset = dataset.batch(8).as_numpy_iterator()
# Use the default networks and losses.
networks = mbop_networks.make_networks(spec)
losses = mbop_losses.MBOPLosses()
def logger_fn(label: str, steps_key: str):
return loggers.make_default_logger(label, steps_key=steps_key)
def make_learner_fn(name, logger_fn, counter, rng_key, dataset, network,
loss):
return learning.make_ensemble_regressor_learner(name, num_networks,
logger_fn, counter,
rng_key, dataset,
network, loss,
optax.adam(0.01),
num_sgd_steps_per_step)
learner = learning.MBOPLearner(
networks, losses, dataset, jax.random.PRNGKey(0), logger_fn,
functools.partial(make_learner_fn, 'world_model'),
functools.partial(make_learner_fn, 'policy_prior'),
functools.partial(make_learner_fn, 'n_step_return'))
# Train the agent
for _ in range(num_steps):
learner.step()
# Save and restore.
learner_state = learner.save()
learner.restore(learner_state)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/mbop/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mppi."""
import functools
from typing import Any
from acme import specs
from acme.agents.jax.mbop import ensemble
from acme.agents.jax.mbop import models
from acme.agents.jax.mbop import mppi
from acme.jax import networks as networks_lib
import jax
import jax.numpy as jnp
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
def get_fake_world_model() -> networks_lib.FeedForwardNetwork:
def apply(params: Any, observation_t: jnp.ndarray, action_t: jnp.ndarray):
del params
return observation_t, jnp.ones((
action_t.shape[0],
1,
))
return networks_lib.FeedForwardNetwork(init=lambda: None, apply=apply)
def get_fake_policy_prior() -> networks_lib.FeedForwardNetwork:
return networks_lib.FeedForwardNetwork(
init=lambda: None,
apply=lambda params, observation_t, action_tm1: action_tm1)
def get_fake_n_step_return() -> networks_lib.FeedForwardNetwork:
def apply(params, observation_t, action_t):
del params, action_t
return jnp.ones((observation_t.shape[0], 1))
return networks_lib.FeedForwardNetwork(init=lambda: None, apply=apply)
class WeightedAverageTests(parameterized.TestCase):
@parameterized.parameters((np.array([1, 1, 1]), 1), (np.array([0, 1, 0]), 10),
(np.array([-1, 1, -1]), 4),
(np.array([-10, 30, 0]), -0.5))
def test_weighted_averages(self, cum_reward, kappa):
"""Compares method with a local version of the exp-weighted averaging."""
action_trajectories = jnp.reshape(
jnp.arange(3 * 10 * 4), (3, 10, 4), order='F')
averaged_trajectory = mppi.return_weighted_average(
action_trajectories=action_trajectories,
cum_reward=cum_reward,
kappa=kappa)
exp_weights = jnp.exp(kappa * cum_reward)
# Verify single-value averaging lines up with the global averaging call:
for i in range(10):
for j in range(4):
np.testing.assert_allclose(
averaged_trajectory[i, j],
jnp.sum(exp_weights * action_trajectories[:, i, j]) /
jnp.sum(exp_weights),
atol=1E-5,
rtol=1E-5)
class MPPITest(parameterized.TestCase):
"""This tests the MPPI planner to make sure it is correctly rolling out.
It does not check the actual performance of the planner, as this would be a
bit more complicated to set up.
"""
# TODO(dulacarnold): Look at how we can check this is actually finding an
# optimal path through the model.
def setUp(self):
super().setUp()
self.state_dims = 8
self.action_dims = 4
self.params = {
'world': jnp.ones((3,)),
'policy': jnp.ones((3,)),
'value': jnp.ones((3,))
}
self.env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(self.state_dims,), dtype=float),
actions=specs.Array(shape=(self.action_dims,), dtype=float),
rewards=specs.Array(shape=(1,), dtype=float, name='reward'),
discounts=specs.BoundedArray(
shape=(), dtype=float, minimum=0., maximum=1., name='discount'))
@parameterized.named_parameters(('NO-PLAN', 0), ('NORMAL', 10))
def test_planner_init(self, horizon: int):
world_model = get_fake_world_model()
rr_world_model = functools.partial(ensemble.apply_round_robin,
world_model.apply)
policy_prior = get_fake_policy_prior()
def _rr_policy_prior(params, key, observation_t, action_tm1):
del key
return ensemble.apply_round_robin(
policy_prior.apply,
params,
observation_t=observation_t,
action_tm1=action_tm1)
rr_policy_prior = models.feed_forward_policy_prior_to_actor_core(
_rr_policy_prior, jnp.zeros((1, self.action_dims)))
n_step_return = get_fake_n_step_return()
n_step_return = functools.partial(ensemble.apply_mean, n_step_return.apply)
config = mppi.MPPIConfig(
sigma=1,
beta=0.2,
horizon=horizon,
n_trajectories=9,
action_aggregation_fn=functools.partial(
mppi.return_weighted_average, kappa=1))
previous_trajectory = mppi.get_initial_trajectory(config, self.env_spec)
key = jax.random.PRNGKey(0)
for _ in range(5):
previous_trajectory = mppi.mppi_planner(
config,
world_model=rr_world_model,
policy_prior=rr_policy_prior,
n_step_return=n_step_return,
world_model_params=self.params,
policy_prior_params=self.params,
n_step_return_params=self.params,
random_key=key,
observation=jnp.ones(self.state_dims),
previous_trajectory=previous_trajectory)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/mbop/mppi_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to provide ensembling support on top of a base network."""
import functools
from typing import (Any, Callable)
from acme.jax import networks
import jax
import jax.numpy as jnp
def _split_batch_dimension(new_batch: int, data: jnp.ndarray) -> jnp.ndarray:
"""Splits the batch dimension and introduces new one with size `new_batch`.
The result has two batch dimensions, first one of size `new_batch`, second one
of size `data.shape[0]/new_batch`. It expects that `data.shape[0]` is
divisible by `new_batch`.
Args:
new_batch: Dimension of outer batch dimension.
data: jnp.ndarray to be reshaped.
Returns:
jnp.ndarray with extra batch dimension at start and updated second
dimension.
"""
# The first dimension will be used for allocating to a specific ensemble
# member, and the second dimension is the parallelized batch dimension, and
# the remaining dimensions are passed as-is to the wrapped network.
# We use Fortan (F) order so that each input batch i is allocated to
# ensemble member k = i % new_batch.
return jnp.reshape(data, (new_batch, -1) + data.shape[1:], order='F')
def _repeat_n(new_batch: int, data: jnp.ndarray) -> jnp.ndarray:
"""Create new batch dimension of size `new_batch` by repeating `data`."""
return jnp.broadcast_to(data, (new_batch,) + data.shape)
def ensemble_init(base_init: Callable[[networks.PRNGKey], networks.Params],
num_networks: int, rnd: jnp.ndarray):
"""Initializes the ensemble parameters.
Args:
base_init: An init function that takes only a PRNGKey, if a network's init
function requires other parameters such as example inputs they need to
have been previously wrapped i.e. with functool.partial using kwargs.
num_networks: Number of networks to generate parameters for.
rnd: PRNGKey to split from when generating parameters.
Returns:
`params` for the set of ensemble networks.
"""
rnds = jax.random.split(rnd, num_networks)
return jax.vmap(base_init)(rnds)
def apply_round_robin(base_apply: Callable[[networks.Params, Any], Any],
params: networks.Params, *args, **kwargs) -> Any:
"""Passes the input in a round-robin manner.
The round-robin application means that each element of the input batch will
be passed through a single ensemble member in a deterministic round-robin
manner, i.e. element_i -> member_k where k = i % num_networks.
It expects that:
* `base_apply(member_params, *member_args, **member_kwargs)` is a valid call,
where:
* `member_params.shape = params.shape[1:]`
* `member_args` and `member_kwargs` have the same structure as `args` and
`kwargs`.
* `params[k]` contains the params of the k-th member of the ensemble.
* All jax arrays in `args` and `kwargs` have a batch dimension at axis 0 of
the same size, which is divisible by `params.shape[0]`.
Args:
base_apply: Base network `apply` function that will be used for round-robin.
NOTE -- This will not work with mutable/stateful apply functions. --
params: Model parameters. Number of networks is deduced from this.
*args: Allows for arbitrary call signatures for `base_apply`.
**kwargs: Allows for arbitrary call signatures for `base_apply`.
Returns:
pytree of the round-robin application.
Output shape will be [initial_batch_size, <remaining dimensions>].
"""
# `num_networks` is the size of the batch dimension in `params`.
num_networks = jax.tree_util.tree_leaves(params)[0].shape[0]
# Reshape args and kwargs for the round-robin:
args = jax.tree_map(
functools.partial(_split_batch_dimension, num_networks), args)
kwargs = jax.tree_map(
functools.partial(_split_batch_dimension, num_networks), kwargs)
# `out.shape` is `(num_networks, initial_batch_size/num_networks, ...)
out = jax.vmap(base_apply)(params, *args, **kwargs)
# Reshape to [initial_batch_size, <remaining dimensions>]. Using the 'F' order
# forces the original values to the last dimension.
return jax.tree_map(lambda x: x.reshape((-1,) + x.shape[2:], order='F'), out)
def apply_all(base_apply: Callable[[networks.Params, Any], Any],
params: networks.Params, *args, **kwargs) -> Any:
"""Pass the input to all ensemble members.
Inputs can either have a batch dimension which will get implicitly vmapped
over, or can be a single vector which will get sent to all ensemble members.
e.g. [<inputs_dims>] or [batch_size, <input_dims>].
Args:
base_apply: Base network `apply` function that will be used for averaging.
NOTE -- This will not work with mutable/stateful apply functions. --
params: Model parameters. Number of networks is deduced from this.
*args: Allows for arbitrary call signatures for `base_apply`.
**kwargs: Allows for arbitrary call signatures for `base_apply`.
Returns:
pytree of the resulting output of passing input to all ensemble members.
Output shape will be [num_members, batch_size, <network output dims>].
"""
# `num_networks` is the size of the batch dimension in `params`.
num_networks = jax.tree_util.tree_leaves(params)[0].shape[0]
args = jax.tree_map(functools.partial(_repeat_n, num_networks), args)
kwargs = jax.tree_map(functools.partial(_repeat_n, num_networks), kwargs)
# `out` is of shape `(num_networks, batch_size, <remaining dimensions>)`.
return jax.vmap(base_apply)(params, *args, **kwargs)
def apply_mean(base_apply: Callable[[networks.Params, Any], Any],
params: networks.Params, *args, **kwargs) -> Any:
"""Calculates the mean over all ensemble members for each batch element.
Args:
base_apply: Base network `apply` function that will be used for averaging.
NOTE -- This will not work with mutable/stateful apply functions. --
params: Model parameters. Number of networks is deduced from this.
*args: Allows for arbitrary call signatures for `base_apply`.
**kwargs: Allows for arbitrary call signatures for `base_apply`.
Returns:
pytree of the average over all ensembles for each element.
Output shape will be [batch_size, <network output_dims>]
"""
out = apply_all(base_apply, params, *args, **kwargs)
return jax.tree_map(functools.partial(jnp.mean, axis=0), out)
def make_ensemble(base_network: networks.FeedForwardNetwork,
ensemble_apply: Callable[..., Any],
num_networks: int) -> networks.FeedForwardNetwork:
return networks.FeedForwardNetwork(
init=functools.partial(ensemble_init, base_network.init, num_networks),
apply=functools.partial(ensemble_apply, base_network.apply))
|
acme-master
|
acme/agents/jax/mbop/ensemble.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss function wrappers, assuming a leading batch axis."""
import dataclasses
from typing import Any, Callable, Optional, Tuple, Union
from acme import types
from acme.agents.jax.mbop import dataset
from acme.jax import networks
import jax
import jax.numpy as jnp
# The apply function takes an observation (and an action) as arguments, and is
# usually a network with bound parameters.
TransitionApplyFn = Callable[[networks.Observation, networks.Action], Any]
ObservationOnlyTransitionApplyFn = Callable[[networks.Observation], Any]
TransitionLoss = Callable[[
Union[TransitionApplyFn, ObservationOnlyTransitionApplyFn], types.Transition
], jnp.ndarray]
def mse(a: jnp.ndarray, b: jnp.ndarray) -> jnp.ndarray:
"""MSE distance."""
return jnp.mean(jnp.square(a - b))
def world_model_loss(apply_fn: Callable[[networks.Observation, networks.Action],
Tuple[networks.Observation,
jnp.ndarray]],
steps: types.Transition) -> jnp.ndarray:
"""Returns the loss for the world model.
Args:
apply_fn: applies a transition model (o_t, a_t) -> (o_t+1, r), expects the
leading axis to index the batch and the second axis to index the
transition triplet (t-1, t, t+1).
steps: RLDS dictionary of transition triplets as prepared by
`rlds_loader.episode_to_timestep_batch`.
Returns:
A scalar loss value as jnp.ndarray.
"""
observation_t = jax.tree_map(lambda obs: obs[:, dataset.CURRENT, ...],
steps.observation)
action_t = steps.action[:, dataset.CURRENT, ...]
observation_tp1 = jax.tree_map(lambda obs: obs[:, dataset.NEXT, ...],
steps.observation)
reward_t = steps.reward[:, dataset.CURRENT, ...]
(predicted_observation_tp1,
predicted_reward_t) = apply_fn(observation_t, action_t)
# predicted_* variables may have an extra outer dimension due to ensembling,
# the mse loss still works due to broadcasting however.
if len(observation_tp1.shape) != len(reward_t.shape):
# The rewards from the transitions may not have the last singular dimension.
reward_t = jnp.expand_dims(reward_t, axis=-1)
return mse(
jnp.concatenate([predicted_observation_tp1, predicted_reward_t], axis=-1),
jnp.concatenate([observation_tp1, reward_t], axis=-1))
def policy_prior_loss(
apply_fn: Callable[[networks.Observation, networks.Action],
networks.Action], steps: types.Transition):
"""Returns the loss for the policy prior.
Args:
apply_fn: applies a policy prior (o_t, a_t) -> a_t+1, expects the leading
axis to index the batch and the second axis to index the transition
triplet (t-1, t, t+1).
steps: RLDS dictionary of transition triplets as prepared by
`rlds_loader.episode_to_timestep_batch`.
Returns:
A scalar loss value as jnp.ndarray.
"""
observation_t = jax.tree_map(lambda obs: obs[:, dataset.CURRENT, ...],
steps.observation)
action_tm1 = steps.action[:, dataset.PREVIOUS, ...]
action_t = steps.action[:, dataset.CURRENT, ...]
predicted_action_t = apply_fn(observation_t, action_tm1)
return mse(predicted_action_t, action_t)
def return_loss(apply_fn: Callable[[networks.Observation, networks.Action],
jnp.ndarray], steps: types.Transition):
"""Returns the loss for the n-step return model.
Args:
apply_fn: applies an n-step return model (o_t, a_t) -> r, expects the
leading axis to index the batch and the second axis to index the
transition triplet (t-1, t, t+1).
steps: RLDS dictionary of transition triplets as prepared by
`rlds_loader.episode_to_timestep_batch`.
Returns:
A scalar loss value as jnp.ndarray.
"""
observation_t = jax.tree_map(lambda obs: obs[:, dataset.CURRENT, ...],
steps.observation)
action_t = steps.action[:, dataset.CURRENT, ...]
n_step_return_t = steps.extras[dataset.N_STEP_RETURN][:, dataset.CURRENT, ...]
predicted_n_step_return_t = apply_fn(observation_t, action_t)
return mse(predicted_n_step_return_t, n_step_return_t)
@dataclasses.dataclass
class MBOPLosses:
"""Losses for the world model, policy prior and the n-step return."""
world_model_loss: Optional[TransitionLoss] = world_model_loss
policy_prior_loss: Optional[TransitionLoss] = policy_prior_loss
n_step_return_loss: Optional[TransitionLoss] = return_loss
|
acme-master
|
acme/agents/jax/mbop/losses.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Combined offline learning of world model, policy and N-step return."""
import dataclasses
import functools
import itertools
import time
from typing import Any, Callable, Iterator, List, Optional
from acme import core
from acme import types
from acme.agents.jax import bc
from acme.agents.jax.mbop import ensemble
from acme.agents.jax.mbop import losses as mbop_losses
from acme.agents.jax.mbop import networks as mbop_networks
from acme.jax import networks as networks_lib
from acme.jax import types as jax_types
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
@dataclasses.dataclass
class TrainingState:
"""States of the world model, policy prior and n-step return learners."""
world_model: Any
policy_prior: Any
n_step_return: Any
LoggerFn = Callable[[str, str], loggers.Logger]
# Creates a world model learner.
MakeWorldModelLearner = Callable[[
LoggerFn,
counting.Counter,
jax_types.PRNGKey,
Iterator[types.Transition],
mbop_networks.WorldModelNetwork,
mbop_losses.TransitionLoss,
], core.Learner]
# Creates a policy prior learner.
MakePolicyPriorLearner = Callable[[
LoggerFn,
counting.Counter,
jax_types.PRNGKey,
Iterator[types.Transition],
mbop_networks.PolicyPriorNetwork,
mbop_losses.TransitionLoss,
], core.Learner]
# Creates an n-step return model learner.
MakeNStepReturnLearner = Callable[[
LoggerFn,
counting.Counter,
jax_types.PRNGKey,
Iterator[types.Transition],
mbop_networks.NStepReturnNetwork,
mbop_losses.TransitionLoss,
], core.Learner]
def make_ensemble_regressor_learner(
name: str,
num_networks: int,
logger_fn: loggers.LoggerFactory,
counter: counting.Counter,
rng_key: jnp.ndarray,
iterator: Iterator[types.Transition],
base_network: networks_lib.FeedForwardNetwork,
loss: mbop_losses.TransitionLoss,
optimizer: optax.GradientTransformation,
num_sgd_steps_per_step: int,
):
"""Creates an ensemble regressor learner from the base network.
Args:
name: Name of the learner used for logging and counters.
num_networks: Number of networks in the ensemble.
logger_fn: Constructs a logger for a label.
counter: Parent counter object.
rng_key: Random key.
iterator: An iterator of time-batched transitions used to train the
networks.
base_network: Base network for the ensemble.
loss: Training loss to use.
optimizer: Optax optimizer.
num_sgd_steps_per_step: Number of gradient updates per step.
Returns:
An ensemble regressor learner.
"""
mbop_ensemble = ensemble.make_ensemble(base_network, ensemble.apply_all,
num_networks)
local_counter = counting.Counter(parent=counter, prefix=name)
local_logger = logger_fn(name,
local_counter.get_steps_key()) if logger_fn else None
def loss_fn(networks: bc.BCNetworks, params: networks_lib.Params,
key: jax_types.PRNGKey,
transitions: types.Transition) -> jnp.ndarray:
del key
return loss(
functools.partial(networks.policy_network.apply, params), transitions)
bc_policy_network = bc.convert_to_bc_network(mbop_ensemble)
bc_networks = bc.BCNetworks(bc_policy_network)
# This is effectively a regressor learner.
return bc.BCLearner(
bc_networks,
rng_key,
loss_fn,
optimizer,
iterator,
num_sgd_steps_per_step,
logger=local_logger,
counter=local_counter)
class MBOPLearner(core.Learner):
"""Model-Based Offline Planning (MBOP) learner.
See https://arxiv.org/abs/2008.05556 for more information.
"""
def __init__(self,
networks: mbop_networks.MBOPNetworks,
losses: mbop_losses.MBOPLosses,
iterator: Iterator[types.Transition],
rng_key: jax_types.PRNGKey,
logger_fn: LoggerFn,
make_world_model_learner: MakeWorldModelLearner,
make_policy_prior_learner: MakePolicyPriorLearner,
make_n_step_return_learner: MakeNStepReturnLearner,
counter: Optional[counting.Counter] = None):
"""Creates an MBOP learner.
Args:
networks: One network per model.
losses: One loss per model.
iterator: An iterator of time-batched transitions used to train the
networks.
rng_key: Random key.
logger_fn: Constructs a logger for a label.
make_world_model_learner: Function to create the world model learner.
make_policy_prior_learner: Function to create the policy prior learner.
make_n_step_return_learner: Function to create the n-step return learner.
counter: Parent counter object.
"""
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger_fn('mbop', 'steps')
# Prepare iterators for the learners, to not split the data (preserve sample
# efficiency).
sharded_prefetching_dataset = utils.sharded_prefetch(iterator)
world_model_iterator, policy_prior_iterator, n_step_return_iterator = (
itertools.tee(sharded_prefetching_dataset, 3))
world_model_key, policy_prior_key, n_step_return_key = jax.random.split(
rng_key, 3)
self._world_model = make_world_model_learner(logger_fn, self._counter,
world_model_key,
world_model_iterator,
networks.world_model_network,
losses.world_model_loss)
self._policy_prior = make_policy_prior_learner(
logger_fn, self._counter, policy_prior_key, policy_prior_iterator,
networks.policy_prior_network, losses.policy_prior_loss)
self._n_step_return = make_n_step_return_learner(
logger_fn, self._counter, n_step_return_key, n_step_return_iterator,
networks.n_step_return_network, losses.n_step_return_loss)
# Start recording timestamps after the first learning step to not report
# "warmup" time.
self._timestamp = None
self._learners = {
'world_model': self._world_model,
'policy_prior': self._policy_prior,
'n_step_return': self._n_step_return
}
def step(self):
# Step the world model, policy learner and n-step return learners.
self._world_model.step()
self._policy_prior.step()
self._n_step_return.step()
# Compute the elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
# Attempt to write the logs.
self._logger.write({**counts})
def get_variables(self, names: List[str]) -> List[types.NestedArray]:
variables = []
for name in names:
# Variables will be prefixed by the learner names. If separator is not
# found, learner_name=name, which is OK.
learner_name, _, variable_name = name.partition('-')
learner = self._learners[learner_name]
variables.extend(learner.get_variables([variable_name]))
return variables
def save(self) -> TrainingState:
return TrainingState(
world_model=self._world_model.save(),
policy_prior=self._policy_prior.save(),
n_step_return=self._n_step_return.save())
def restore(self, state: TrainingState):
self._world_model.restore(state.world_model)
self._policy_prior.restore(state.policy_prior)
self._n_step_return.restore(state.n_step_return)
|
acme-master
|
acme/agents/jax/mbop/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The MPPI-inspired JAX actor."""
from typing import List, Mapping, Optional, Tuple
from acme import adders
from acme import core
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax.mbop import models
from acme.agents.jax.mbop import mppi
from acme.agents.jax.mbop import networks as mbop_networks
from acme.jax import networks as networks_lib
from acme.jax import running_statistics
from acme.jax import variable_utils
import jax
from jax import numpy as jnp
# Recurrent state is the trajectory.
Trajectory = jnp.ndarray
ActorCore = actor_core_lib.ActorCore[
actor_core_lib.SimpleActorCoreRecurrentState[Trajectory],
Mapping[str, jnp.ndarray]]
def make_actor_core(
mppi_config: mppi.MPPIConfig,
world_model: models.WorldModel,
policy_prior: models.PolicyPrior,
n_step_return: models.NStepReturn,
environment_spec: specs.EnvironmentSpec,
mean_std: Optional[running_statistics.NestedMeanStd] = None,
) -> ActorCore:
"""Creates an actor core wrapping the MBOP-configured MPPI planner.
Args:
mppi_config: Planner hyperparameters.
world_model: A world model.
policy_prior: A policy prior.
n_step_return: An n-step return.
environment_spec: Used to initialize the initial trajectory data structure.
mean_std: Used to undo normalization if the networks trained normalized.
Returns:
A recurrent actor core.
"""
if mean_std is not None:
mean_std_observation = running_statistics.NestedMeanStd(
mean=mean_std.mean.observation, std=mean_std.std.observation)
mean_std_action = running_statistics.NestedMeanStd(
mean=mean_std.mean.action, std=mean_std.std.action)
mean_std_reward = running_statistics.NestedMeanStd(
mean=mean_std.mean.reward, std=mean_std.std.reward)
mean_std_n_step_return = running_statistics.NestedMeanStd(
mean=mean_std.mean.extras['n_step_return'],
std=mean_std.std.extras['n_step_return'])
def denormalized_world_model(
params: networks_lib.Params, observation_t: networks_lib.Observation,
action_t: networks_lib.Action
) -> Tuple[networks_lib.Observation, networks_lib.Value]:
"""Denormalizes the reward for proper weighting in the planner."""
observation_tp1, normalized_reward_t = world_model(
params, observation_t, action_t)
reward_t = running_statistics.denormalize(normalized_reward_t,
mean_std_reward)
return observation_tp1, reward_t
planner_world_model = denormalized_world_model
def denormalized_n_step_return(
params: networks_lib.Params, observation_t: networks_lib.Observation,
action_t: networks_lib.Action) -> networks_lib.Value:
"""Denormalize the n-step return for proper weighting in the planner."""
normalized_n_step_return_t = n_step_return(params, observation_t,
action_t)
return running_statistics.denormalize(normalized_n_step_return_t,
mean_std_n_step_return)
planner_n_step_return = denormalized_n_step_return
else:
planner_world_model = world_model
planner_n_step_return = n_step_return
def recurrent_policy(
params_list: List[networks_lib.Params],
random_key: networks_lib.PRNGKey,
observation: networks_lib.Observation,
previous_trajectory: Trajectory,
) -> Tuple[networks_lib.Action, Trajectory]:
# Note that splitting the random key is handled by GenericActor.
if mean_std is not None:
observation = running_statistics.normalize(
observation, mean_std=mean_std_observation)
trajectory = mppi.mppi_planner(
config=mppi_config,
world_model=planner_world_model,
policy_prior=policy_prior,
n_step_return=planner_n_step_return,
world_model_params=params_list[0],
policy_prior_params=params_list[1],
n_step_return_params=params_list[2],
random_key=random_key,
observation=observation,
previous_trajectory=previous_trajectory)
action = trajectory[0, ...]
if mean_std is not None:
action = running_statistics.denormalize(action, mean_std=mean_std_action)
return (action, trajectory)
batched_policy = jax.vmap(recurrent_policy, in_axes=(None, None, 0, 0))
batched_policy = jax.jit(batched_policy)
initial_trajectory = mppi.get_initial_trajectory(
config=mppi_config, env_spec=environment_spec)
initial_trajectory = jnp.expand_dims(initial_trajectory, axis=0)
return actor_core_lib.batched_recurrent_to_actor_core(batched_policy,
initial_trajectory)
def make_ensemble_actor_core(
networks: mbop_networks.MBOPNetworks,
mppi_config: mppi.MPPIConfig,
environment_spec: specs.EnvironmentSpec,
mean_std: Optional[running_statistics.NestedMeanStd] = None,
use_round_robin: bool = True,
) -> ActorCore:
"""Creates an actor core that uses ensemble models.
Args:
networks: MBOP networks.
mppi_config: Planner hyperparameters.
environment_spec: Used to initialize the initial trajectory data structure.
mean_std: Used to undo normalization if the networks trained normalized.
use_round_robin: Whether to use round robin or mean to calculate the policy
prior over the ensemble members.
Returns:
A recurrent actor core.
"""
world_model = models.make_ensemble_world_model(networks.world_model_network)
policy_prior = models.make_ensemble_policy_prior(
networks.policy_prior_network,
environment_spec,
use_round_robin=use_round_robin)
n_step_return = models.make_ensemble_n_step_return(
networks.n_step_return_network)
return make_actor_core(mppi_config, world_model, policy_prior, n_step_return,
environment_spec, mean_std)
def make_actor(actor_core: ActorCore,
random_key: networks_lib.PRNGKey,
variable_source: core.VariableSource,
adder: Optional[adders.Adder] = None) -> core.Actor:
"""Creates an MBOP actor from an actor core.
Args:
actor_core: An MBOP actor core.
random_key: JAX Random key.
variable_source: The source to get networks parameters from.
adder: An adder to add experiences to. The `extras` of the adder holds the
state of the recurrent policy. If `has_extras=True` then the `extras` part
returned from the recurrent policy is appended to the state before added
to the adder.
Returns:
A recurrent actor.
"""
variable_client = variable_utils.VariableClient(
client=variable_source,
key=['world_model-policy', 'policy_prior-policy', 'n_step_return-policy'])
return actors.GenericActor(
actor_core, random_key, variable_client, adder, backend=None)
|
acme-master
|
acme/agents/jax/mbop/acting.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO config."""
import dataclasses
from acme.adders import reverb as adders_reverb
import rlax
@dataclasses.dataclass
class R2D2Config:
"""Configuration options for R2D2 agent."""
discount: float = 0.997
target_update_period: int = 2500
evaluation_epsilon: float = 0.
num_epsilons: int = 256
variable_update_period: int = 400
# Learner options
burn_in_length: int = 40
trace_length: int = 80
sequence_period: int = 40
learning_rate: float = 1e-3
bootstrap_n: int = 5
clip_rewards: bool = False
tx_pair: rlax.TxPair = rlax.SIGNED_HYPERBOLIC_PAIR
# Replay options
samples_per_insert_tolerance_rate: float = 0.1
samples_per_insert: float = 4.0
min_replay_size: int = 50_000
max_replay_size: int = 100_000
batch_size: int = 64
prefetch_size: int = 2
num_parallel_calls: int = 16
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
# Priority options
importance_sampling_exponent: float = 0.6
priority_exponent: float = 0.9
max_priority_weight: float = 0.9
|
acme-master
|
acme/agents/jax/r2d2/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of an R2D2 agent."""
from acme.agents.jax.r2d2.actor import EpsilonRecurrentPolicy
from acme.agents.jax.r2d2.actor import make_behavior_policy
from acme.agents.jax.r2d2.builder import R2D2Builder
from acme.agents.jax.r2d2.config import R2D2Config
from acme.agents.jax.r2d2.learning import R2D2Learner
from acme.agents.jax.r2d2.learning import R2D2ReplaySample
from acme.agents.jax.r2d2.networks import make_atari_networks
from acme.agents.jax.r2d2.networks import R2D2Networks
|
acme-master
|
acme/agents/jax/r2d2/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""R2D2 Builder."""
from typing import Generic, Iterator, List, Optional
import acme
from acme import adders
from acme import core
from acme import specs
from acme.adders import reverb as adders_reverb
from acme.adders.reverb import base as reverb_base
from acme.adders.reverb import structured
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.r2d2 import actor as r2d2_actor
from acme.agents.jax.r2d2 import config as r2d2_config
from acme.agents.jax.r2d2 import learning as r2d2_learning
from acme.agents.jax.r2d2 import networks as r2d2_networks
from acme.datasets import reverb as datasets
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import optax
import reverb
from reverb import structured_writer as sw
import tensorflow as tf
import tree
# TODO(b/450949030): extrac the private functions to a library once other agents
# reuse them.
# TODO(b/450949030): add support to add all the final subsequences of
# length < sequence_lenght at the end of the episode and pad them with zeros.
# We have to check if this requires moving _zero_pad to the adder.
def _build_sequence(length: int,
step_spec: reverb_base.Step) -> reverb_base.Trajectory:
"""Constructs the sequence using only the first value of core_state."""
step_dict = step_spec._asdict()
extras_dict = step_dict.pop('extras')
return reverb_base.Trajectory(
**tree.map_structure(lambda x: x[-length:], step_dict),
extras=tree.map_structure(lambda x: x[-length], extras_dict))
def _zero_pad(sequence_length: int) -> datasets.Transform:
"""Adds zero padding to the right so all samples have the same length."""
def _zero_pad_transform(sample: reverb.ReplaySample) -> reverb.ReplaySample:
trajectory: reverb_base.Trajectory = sample.data
# Split steps and extras data (the extras won't be padded as they only
# contain one element)
trajectory_steps = trajectory._asdict()
trajectory_extras = trajectory_steps.pop('extras')
unpadded_length = len(tree.flatten(trajectory_steps)[0])
# Do nothing if the sequence is already full.
if unpadded_length != sequence_length:
to_pad = sequence_length - unpadded_length
pad = lambda x: tf.pad(x, [[0, to_pad]] + [[0, 0]] * (len(x.shape) - 1))
trajectory_steps = tree.map_structure(pad, trajectory_steps)
# Set the shape to be statically known, and checks it at runtime.
def _ensure_shape(x):
shape = tf.TensorShape([sequence_length]).concatenate(x.shape[1:])
return tf.ensure_shape(x, shape)
trajectory_steps = tree.map_structure(_ensure_shape, trajectory_steps)
return reverb.ReplaySample(
info=sample.info,
data=reverb_base.Trajectory(
**trajectory_steps, extras=trajectory_extras))
return _zero_pad_transform
def _make_adder_config(step_spec: reverb_base.Step, seq_len: int,
seq_period: int) -> List[sw.Config]:
return structured.create_sequence_config(
step_spec=step_spec,
sequence_length=seq_len,
period=seq_period,
end_of_episode_behavior=adders_reverb.EndBehavior.TRUNCATE,
sequence_pattern=_build_sequence)
class R2D2Builder(Generic[actor_core_lib.RecurrentState],
builders.ActorLearnerBuilder[r2d2_networks.R2D2Networks,
r2d2_actor.R2D2Policy,
r2d2_learning.R2D2ReplaySample]):
"""R2D2 Builder.
This is constructs all of the components for Recurrent Experience Replay in
Distributed Reinforcement Learning (Kapturowski et al.)
https://openreview.net/pdf?id=r1lyTjAqYX.
"""
def __init__(self, config: r2d2_config.R2D2Config):
"""Creates a R2D2 learner, a behavior policy and an eval actor."""
self._config = config
self._sequence_length = (
self._config.burn_in_length + self._config.trace_length + 1)
@property
def _batch_size_per_device(self) -> int:
"""Splits batch size across all learner devices evenly."""
# TODO(bshahr): Using jax.device_count will not be valid when colocating
# learning and inference.
return self._config.batch_size // jax.device_count()
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: r2d2_networks.R2D2Networks,
dataset: Iterator[r2d2_learning.R2D2ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec
# The learner updates the parameters (and initializes them).
return r2d2_learning.R2D2Learner(
networks=networks,
batch_size=self._batch_size_per_device,
random_key=random_key,
burn_in_length=self._config.burn_in_length,
discount=self._config.discount,
importance_sampling_exponent=(
self._config.importance_sampling_exponent),
max_priority_weight=self._config.max_priority_weight,
target_update_period=self._config.target_update_period,
iterator=dataset,
optimizer=optax.adam(self._config.learning_rate),
bootstrap_n=self._config.bootstrap_n,
tx_pair=self._config.tx_pair,
clip_rewards=self._config.clip_rewards,
replay_client=replay_client,
counter=counter,
logger=logger_fn('learner'))
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: r2d2_actor.R2D2Policy,
) -> List[reverb.Table]:
"""Create tables to insert data into."""
dummy_actor_state = policy.init(jax.random.PRNGKey(0))
extras_spec = policy.get_extras(dummy_actor_state)
step_spec = structured.create_step_spec(
environment_spec=environment_spec, extras_spec=extras_spec)
if self._config.samples_per_insert:
samples_per_insert_tolerance = (
self._config.samples_per_insert_tolerance_rate *
self._config.samples_per_insert)
error_buffer = self._config.min_replay_size * samples_per_insert_tolerance
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._config.min_replay_size,
samples_per_insert=self._config.samples_per_insert,
error_buffer=error_buffer)
else:
limiter = reverb.rate_limiters.MinSize(self._config.min_replay_size)
return [
reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Prioritized(
self._config.priority_exponent),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=limiter,
signature=sw.infer_signature(
configs=_make_adder_config(step_spec, self._sequence_length,
self._config.sequence_period),
step_spec=step_spec))
]
def make_dataset_iterator(
self,
replay_client: reverb.Client) -> Iterator[r2d2_learning.R2D2ReplaySample]:
"""Create a dataset iterator to use for learning/updating the agent."""
batch_size_per_learner = self._config.batch_size // jax.process_count()
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=replay_client.server_address,
batch_size=self._batch_size_per_device,
num_parallel_calls=None,
max_in_flight_samples_per_worker=2 * batch_size_per_learner,
postprocess=_zero_pad(self._sequence_length),
)
return utils.multi_device_put(
dataset.as_numpy_iterator(),
devices=jax.local_devices(),
split_fn=utils.keep_key_on_host)
def make_adder(
self, replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[r2d2_actor.R2D2Policy]) -> Optional[adders.Adder]:
"""Create an adder which records data generated by the actor/environment."""
if environment_spec is None or policy is None:
raise ValueError('`environment_spec` and `policy` cannot be None.')
dummy_actor_state = policy.init(jax.random.PRNGKey(0))
extras_spec = policy.get_extras(dummy_actor_state)
step_spec = structured.create_step_spec(
environment_spec=environment_spec, extras_spec=extras_spec)
return structured.StructuredAdder(
client=replay_client,
max_in_flight_items=5,
configs=_make_adder_config(step_spec, self._sequence_length,
self._config.sequence_period),
step_spec=step_spec)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: r2d2_actor.R2D2Policy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> acme.Actor:
del environment_spec
# Create variable client.
variable_client = variable_utils.VariableClient(
variable_source,
key='actor_variables',
update_period=self._config.variable_update_period)
return actors.GenericActor(
policy, random_key, variable_client, adder, backend='cpu')
def make_policy(self,
networks: r2d2_networks.R2D2Networks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> r2d2_actor.R2D2Policy:
if evaluation:
return r2d2_actor.get_actor_core(
networks,
num_epsilons=None,
evaluation_epsilon=self._config.evaluation_epsilon)
else:
return r2d2_actor.get_actor_core(networks, self._config.num_epsilons)
|
acme-master
|
acme/agents/jax/r2d2/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""R2D2 Networks."""
from acme import specs
from acme.jax import networks as networks_lib
R2D2Networks = networks_lib.UnrollableNetwork
def make_atari_networks(env_spec: specs.EnvironmentSpec) -> R2D2Networks:
"""Builds default R2D2 networks for Atari games."""
def make_core_module() -> networks_lib.R2D2AtariNetwork:
return networks_lib.R2D2AtariNetwork(env_spec.actions.num_values)
return networks_lib.make_unrollable_network(env_spec, make_core_module)
|
acme-master
|
acme/agents/jax/r2d2/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""R2D2 learner implementation."""
import functools
import time
from typing import Dict, Iterator, List, NamedTuple, Optional, Tuple
from absl import logging
import acme
from acme.adders import reverb as adders
from acme.agents.jax.r2d2 import networks as r2d2_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import async_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
import reverb
import rlax
import tree
_PMAP_AXIS_NAME = 'data'
# This type allows splitting a sample between the host and device, which avoids
# putting item keys (uint64) on device for the purposes of priority updating.
R2D2ReplaySample = utils.PrefetchingSplit
class TrainingState(NamedTuple):
"""Holds the agent's training state."""
params: networks_lib.Params
target_params: networks_lib.Params
opt_state: optax.OptState
steps: int
random_key: networks_lib.PRNGKey
class R2D2Learner(acme.Learner):
"""R2D2 learner."""
def __init__(self,
networks: r2d2_networks.R2D2Networks,
batch_size: int,
random_key: networks_lib.PRNGKey,
burn_in_length: int,
discount: float,
importance_sampling_exponent: float,
max_priority_weight: float,
target_update_period: int,
iterator: Iterator[R2D2ReplaySample],
optimizer: optax.GradientTransformation,
bootstrap_n: int = 5,
tx_pair: rlax.TxPair = rlax.SIGNED_HYPERBOLIC_PAIR,
clip_rewards: bool = False,
max_abs_reward: float = 1.,
use_core_state: bool = True,
prefetch_size: int = 2,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None):
"""Initializes the learner."""
def loss(
params: networks_lib.Params,
target_params: networks_lib.Params,
key_grad: networks_lib.PRNGKey,
sample: reverb.ReplaySample,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Computes mean transformed N-step loss for a batch of sequences."""
# Get core state & warm it up on observations for a burn-in period.
if use_core_state:
# Replay core state.
# NOTE: We may need to recover the type of the hk.LSTMState if the user
# specifies a dynamically unrolled RNN as it will strictly enforce the
# match between input/output state types.
online_state = utils.maybe_recover_lstm_type(
sample.data.extras.get('core_state'))
else:
key_grad, initial_state_rng = jax.random.split(key_grad)
online_state = networks.init_recurrent_state(initial_state_rng,
batch_size)
target_state = online_state
# Convert sample data to sequence-major format [T, B, ...].
data = utils.batch_to_sequence(sample.data)
# Maybe burn the core state in.
if burn_in_length:
burn_obs = jax.tree_map(lambda x: x[:burn_in_length], data.observation)
key_grad, key1, key2 = jax.random.split(key_grad, 3)
_, online_state = networks.unroll(params, key1, burn_obs, online_state)
_, target_state = networks.unroll(target_params, key2, burn_obs,
target_state)
# Only get data to learn on from after the end of the burn in period.
data = jax.tree_map(lambda seq: seq[burn_in_length:], data)
# Unroll on sequences to get online and target Q-Values.
key1, key2 = jax.random.split(key_grad)
online_q, _ = networks.unroll(params, key1, data.observation,
online_state)
target_q, _ = networks.unroll(target_params, key2, data.observation,
target_state)
# Get value-selector actions from online Q-values for double Q-learning.
selector_actions = jnp.argmax(online_q, axis=-1)
# Preprocess discounts & rewards.
discounts = (data.discount * discount).astype(online_q.dtype)
rewards = data.reward
if clip_rewards:
rewards = jnp.clip(rewards, -max_abs_reward, max_abs_reward)
rewards = rewards.astype(online_q.dtype)
# Get N-step transformed TD error and loss.
batch_td_error_fn = jax.vmap(
functools.partial(
rlax.transformed_n_step_q_learning,
n=bootstrap_n,
tx_pair=tx_pair),
in_axes=1,
out_axes=1)
batch_td_error = batch_td_error_fn(
online_q[:-1],
data.action[:-1],
target_q[1:],
selector_actions[1:],
rewards[:-1],
discounts[:-1])
batch_loss = 0.5 * jnp.square(batch_td_error).sum(axis=0)
# Importance weighting.
probs = sample.info.probability
importance_weights = (1. / (probs + 1e-6)).astype(online_q.dtype)
importance_weights **= importance_sampling_exponent
importance_weights /= jnp.max(importance_weights)
mean_loss = jnp.mean(importance_weights * batch_loss)
# Calculate priorities as a mixture of max and mean sequence errors.
abs_td_error = jnp.abs(batch_td_error).astype(online_q.dtype)
max_priority = max_priority_weight * jnp.max(abs_td_error, axis=0)
mean_priority = (1 - max_priority_weight) * jnp.mean(abs_td_error, axis=0)
priorities = (max_priority + mean_priority)
return mean_loss, priorities
def sgd_step(
state: TrainingState,
samples: reverb.ReplaySample
) -> Tuple[TrainingState, jnp.ndarray, Dict[str, jnp.ndarray]]:
"""Performs an update step, averaging over pmap replicas."""
# Compute loss and gradients.
grad_fn = jax.value_and_grad(loss, has_aux=True)
key, key_grad = jax.random.split(state.random_key)
(loss_value, priorities), gradients = grad_fn(state.params,
state.target_params,
key_grad,
samples)
# Average gradients over pmap replicas before optimizer update.
gradients = jax.lax.pmean(gradients, _PMAP_AXIS_NAME)
# Apply optimizer updates.
updates, new_opt_state = optimizer.update(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
# Periodically update target networks.
steps = state.steps + 1
target_params = optax.periodic_update(new_params, state.target_params, # pytype: disable=wrong-arg-types # numpy-scalars
steps, self._target_update_period)
new_state = TrainingState(
params=new_params,
target_params=target_params,
opt_state=new_opt_state,
steps=steps,
random_key=key)
return new_state, priorities, {'loss': loss_value}
def update_priorities(
keys_and_priorities: Tuple[jnp.ndarray, jnp.ndarray]):
keys, priorities = keys_and_priorities
keys, priorities = tree.map_structure(
# Fetch array and combine device and batch dimensions.
lambda x: utils.fetch_devicearray(x).reshape((-1,) + x.shape[2:]),
(keys, priorities))
replay_client.mutate_priorities( # pytype: disable=attribute-error
table=adders.DEFAULT_PRIORITY_TABLE,
updates=dict(zip(keys, priorities)))
# Internalise components, hyperparameters, logger, counter, and methods.
self._iterator = iterator
self._replay_client = replay_client
self._target_update_period = target_update_period
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
time_delta=1.,
steps_key=self._counter.get_steps_key())
self._sgd_step = jax.pmap(sgd_step, axis_name=_PMAP_AXIS_NAME)
self._async_priority_updater = async_utils.AsyncExecutor(update_priorities)
# Initialise and internalise training state (parameters/optimiser state).
random_key, key_init = jax.random.split(random_key)
initial_params = networks.init(key_init)
opt_state = optimizer.init(initial_params)
# Log how many parameters the network has.
sizes = tree.map_structure(jnp.size, initial_params)
logging.info('Total number of params: %d',
sum(tree.flatten(sizes.values())))
state = TrainingState(
params=initial_params,
target_params=initial_params,
opt_state=opt_state,
steps=jnp.array(0),
random_key=random_key)
# Replicate parameters.
self._state = utils.replicate_in_all_devices(state)
def step(self):
prefetching_split = next(self._iterator)
# The split_sample method passed to utils.sharded_prefetch specifies what
# parts of the objects returned by the original iterator are kept in the
# host and what parts are prefetched on-device.
# In this case the host property of the prefetching split contains only the
# replay keys and the device property is the prefetched full original
# sample.
keys = prefetching_split.host
samples: reverb.ReplaySample = prefetching_split.device
# Do a batch of SGD.
start = time.time()
self._state, priorities, metrics = self._sgd_step(self._state, samples)
# Take metrics from first replica.
metrics = utils.get_from_first_device(metrics)
# Update our counts and record it.
counts = self._counter.increment(steps=1, time_elapsed=time.time() - start)
# Update priorities in replay.
if self._replay_client:
self._async_priority_updater.put((keys, priorities))
# Attempt to write logs.
self._logger.write({**metrics, **counts})
def get_variables(self, names: List[str]) -> List[networks_lib.Params]:
del names # There's only one available set of params in this agent.
# Return first replica of parameters.
return utils.get_from_first_device([self._state.params])
def save(self) -> TrainingState:
# Serialize only the first replica of parameters and optimizer state.
return utils.get_from_first_device(self._state)
def restore(self, state: TrainingState):
self._state = utils.replicate_in_all_devices(state)
|
acme-master
|
acme/agents/jax/r2d2/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""R2D2 actor."""
from typing import Callable, Generic, Mapping, Optional, Tuple
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax.r2d2 import config as r2d2_config
from acme.agents.jax.r2d2 import networks as r2d2_networks
from acme.jax import networks as networks_lib
import chex
import jax
import jax.numpy as jnp
import numpy as np
import rlax
Epsilon = float
R2D2Extras = Mapping[str, jnp.ndarray]
EpsilonRecurrentPolicy = Callable[[
networks_lib.Params, networks_lib.PRNGKey, networks_lib
.Observation, actor_core_lib.RecurrentState, Epsilon
], Tuple[networks_lib.Action, actor_core_lib.RecurrentState]]
@chex.dataclass(frozen=True, mappable_dataclass=False)
class R2D2ActorState(Generic[actor_core_lib.RecurrentState]):
rng: networks_lib.PRNGKey
epsilon: jnp.ndarray
recurrent_state: actor_core_lib.RecurrentState
prev_recurrent_state: actor_core_lib.RecurrentState
R2D2Policy = actor_core_lib.ActorCore[
R2D2ActorState[actor_core_lib.RecurrentState], R2D2Extras]
def get_actor_core(
networks: r2d2_networks.R2D2Networks,
num_epsilons: Optional[int],
evaluation_epsilon: Optional[float] = None,
) -> R2D2Policy:
"""Returns ActorCore for R2D2."""
if (not num_epsilons and evaluation_epsilon is None) or (num_epsilons and
evaluation_epsilon):
raise ValueError(
'Exactly one of `num_epsilons` or `evaluation_epsilon` must be '
f'specified. Received num_epsilon={num_epsilons} and '
f'evaluation_epsilon={evaluation_epsilon}.')
def select_action(params: networks_lib.Params,
observation: networks_lib.Observation,
state: R2D2ActorState[actor_core_lib.RecurrentState]):
rng, policy_rng = jax.random.split(state.rng)
q_values, recurrent_state = networks.apply(params, policy_rng, observation,
state.recurrent_state)
action = rlax.epsilon_greedy(state.epsilon).sample(policy_rng, q_values)
return action, R2D2ActorState(
rng=rng,
epsilon=state.epsilon,
recurrent_state=recurrent_state,
prev_recurrent_state=state.recurrent_state)
def init(
rng: networks_lib.PRNGKey
) -> R2D2ActorState[actor_core_lib.RecurrentState]:
rng, epsilon_rng, state_rng = jax.random.split(rng, 3)
if num_epsilons:
epsilon = jax.random.choice(epsilon_rng,
np.logspace(1, 3, num_epsilons, base=0.1))
else:
epsilon = evaluation_epsilon
initial_core_state = networks.init_recurrent_state(state_rng, None)
return R2D2ActorState(
rng=rng,
epsilon=epsilon,
recurrent_state=initial_core_state,
prev_recurrent_state=initial_core_state)
def get_extras(
state: R2D2ActorState[actor_core_lib.RecurrentState]) -> R2D2Extras:
return {'core_state': state.prev_recurrent_state}
return actor_core_lib.ActorCore(init=init, select_action=select_action,
get_extras=get_extras)
# TODO(bshahr): Deprecate this in favour of R2D2Builder.make_policy.
def make_behavior_policy(networks: r2d2_networks.R2D2Networks,
config: r2d2_config.R2D2Config,
evaluation: bool = False) -> EpsilonRecurrentPolicy:
"""Selects action according to the policy."""
def behavior_policy(params: networks_lib.Params, key: networks_lib.PRNGKey,
observation: types.NestedArray,
core_state: types.NestedArray, epsilon: float):
q_values, core_state = networks.apply(params, key, observation, core_state)
epsilon = config.evaluation_epsilon if evaluation else epsilon
return rlax.epsilon_greedy(epsilon).sample(key, q_values), core_state
return behavior_policy
|
acme-master
|
acme/agents/jax/r2d2/actor.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ValueDice config."""
import dataclasses
from acme.adders import reverb as adders_reverb
@dataclasses.dataclass
class ValueDiceConfig:
"""Configuration options for ValueDice."""
policy_learning_rate: float = 1e-5
nu_learning_rate: float = 1e-3
discount: float = .99
batch_size: int = 256
alpha: float = 0.05
policy_reg_scale: float = 1e-4
nu_reg_scale: float = 10.0
# Replay options
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
samples_per_insert: float = 256 * 4
# Rate to be used for the SampleToInsertRatio rate limitter tolerance.
# See a formula in make_replay_tables for more details.
samples_per_insert_tolerance_rate: float = 0.1
min_replay_size: int = 1000
max_replay_size: int = 1000000
prefetch_size: int = 4
# How many gradient updates to perform per step.
num_sgd_steps_per_step: int = 1
|
acme-master
|
acme/agents/jax/value_dice/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ValueDice agent."""
from acme.agents.jax.value_dice.builder import ValueDiceBuilder
from acme.agents.jax.value_dice.config import ValueDiceConfig
from acme.agents.jax.value_dice.learning import ValueDiceLearner
from acme.agents.jax.value_dice.networks import apply_policy_and_sample
from acme.agents.jax.value_dice.networks import make_networks
from acme.agents.jax.value_dice.networks import ValueDiceNetworks
|
acme-master
|
acme/agents/jax/value_dice/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ValueDice agent implementation, using JAX."""
from typing import Callable, Iterator, List, Optional
from acme import adders
from acme import core
from acme import specs
from acme import types
from acme.adders import reverb as adders_reverb
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.value_dice import config as value_dice_config
from acme.agents.jax.value_dice import learning
from acme.agents.jax.value_dice import networks as value_dice_networks
from acme.datasets import reverb as datasets
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import optax
import reverb
from reverb import rate_limiters
class ValueDiceBuilder(
builders.ActorLearnerBuilder[value_dice_networks.ValueDiceNetworks,
actor_core_lib.FeedForwardPolicy,
reverb.ReplaySample]):
"""ValueDice Builder.
This builder is an entry point for online version of ValueDice.
For offline please use the ValueDiceLearner directly.
"""
def __init__(self, config: value_dice_config.ValueDiceConfig,
make_demonstrations: Callable[[int],
Iterator[types.Transition]]):
self._make_demonstrations = make_demonstrations
self._config = config
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: value_dice_networks.ValueDiceNetworks,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec, replay_client
iterator_demonstration = self._make_demonstrations(
self._config.batch_size * self._config.num_sgd_steps_per_step)
policy_optimizer = optax.adam(
learning_rate=self._config.policy_learning_rate)
nu_optimizer = optax.adam(learning_rate=self._config.nu_learning_rate)
return learning.ValueDiceLearner(
networks=networks,
policy_optimizer=policy_optimizer,
nu_optimizer=nu_optimizer,
discount=self._config.discount,
rng=random_key,
alpha=self._config.alpha,
policy_reg_scale=self._config.policy_reg_scale,
nu_reg_scale=self._config.nu_reg_scale,
num_sgd_steps_per_step=self._config.num_sgd_steps_per_step,
iterator_replay=dataset,
iterator_demonstrations=iterator_demonstration,
logger=logger_fn('learner'),
counter=counter,
)
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: actor_core_lib.FeedForwardPolicy,
) -> List[reverb.Table]:
del policy
samples_per_insert_tolerance = (
self._config.samples_per_insert_tolerance_rate *
self._config.samples_per_insert)
error_buffer = self._config.min_replay_size * samples_per_insert_tolerance
limiter = rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._config.min_replay_size,
samples_per_insert=self._config.samples_per_insert,
error_buffer=error_buffer)
return [reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=limiter,
signature=adders_reverb.NStepTransitionAdder.signature(
environment_spec))]
def make_dataset_iterator(
self, replay_client: reverb.Client) -> Iterator[reverb.ReplaySample]:
"""Creates a dataset iterator to use for learning."""
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=replay_client.server_address,
batch_size=(
self._config.batch_size * self._config.num_sgd_steps_per_step),
prefetch_size=self._config.prefetch_size)
return utils.device_put(dataset.as_numpy_iterator(), jax.devices()[0])
def make_adder(
self, replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[actor_core_lib.FeedForwardPolicy]
) -> Optional[adders.Adder]:
del environment_spec, policy
return adders_reverb.NStepTransitionAdder(
priority_fns={self._config.replay_table_name: None},
client=replay_client,
n_step=1,
discount=self._config.discount)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: actor_core_lib.FeedForwardPolicy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
del environment_spec
assert variable_source is not None
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(policy)
# Inference happens on CPU, so it's better to move variables there too.
variable_client = variable_utils.VariableClient(variable_source, 'policy',
device='cpu')
return actors.GenericActor(
actor_core, random_key, variable_client, adder, backend='cpu')
def make_policy(self,
networks: value_dice_networks.ValueDiceNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> actor_core_lib.FeedForwardPolicy:
del environment_spec
return value_dice_networks.apply_policy_and_sample(
networks, eval_mode=evaluation)
|
acme-master
|
acme/agents/jax/value_dice/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ValueDice networks definition."""
import dataclasses
from typing import Callable, Optional, Tuple
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.jax import networks as networks_lib
from acme.jax import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
@dataclasses.dataclass
class ValueDiceNetworks:
"""ValueDice networks."""
policy_network: networks_lib.FeedForwardNetwork
nu_network: networks_lib.FeedForwardNetwork
# Functions for actors and evaluators, resp., to sample actions.
sample: networks_lib.SampleFn
sample_eval: Optional[networks_lib.SampleFn] = None
# Function that transforms an action before a mixture is applied, typically
# the identity for continuous actions and one-hot encoding for discrete
# actions.
encode_action: Callable[[networks_lib.Action], jnp.ndarray] = lambda x: x
def apply_policy_and_sample(
networks: ValueDiceNetworks,
eval_mode: bool = False) -> actor_core_lib.FeedForwardPolicy:
"""Returns a function that computes actions."""
sample_fn = networks.sample if not eval_mode else networks.sample_eval
if not sample_fn:
raise ValueError('sample function is not provided')
def apply_and_sample(params, key, obs):
return sample_fn(networks.policy_network.apply(params, obs), key)
return apply_and_sample
def make_networks(
spec: specs.EnvironmentSpec,
hidden_layer_sizes: Tuple[int, ...] = (256, 256)) -> ValueDiceNetworks:
"""Creates networks used by the agent."""
num_dimensions = np.prod(spec.actions.shape, dtype=int)
def _actor_fn(obs):
network = hk.Sequential([
hk.nets.MLP(
list(hidden_layer_sizes),
w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
activation=jax.nn.relu,
activate_final=True),
networks_lib.NormalTanhDistribution(num_dimensions),
])
return network(obs)
def _nu_fn(obs, action):
network = hk.Sequential([
hk.nets.MLP(
list(hidden_layer_sizes) + [1],
w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
activation=jax.nn.relu),
])
return network(jnp.concatenate([obs, action], axis=-1))
policy = hk.without_apply_rng(hk.transform(_actor_fn))
nu = hk.without_apply_rng(hk.transform(_nu_fn))
# Create dummy observations and actions to create network parameters.
dummy_action = utils.zeros_like(spec.actions)
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.add_batch_dim(dummy_action)
dummy_obs = utils.add_batch_dim(dummy_obs)
return ValueDiceNetworks(
policy_network=networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_obs), policy.apply),
nu_network=networks_lib.FeedForwardNetwork(
lambda key: nu.init(key, dummy_obs, dummy_action), nu.apply),
sample=lambda params, key: params.sample(seed=key),
sample_eval=lambda params, key: params.mode())
|
acme-master
|
acme/agents/jax/value_dice/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ValueDice learner implementation."""
import functools
import time
from typing import Any, Dict, Iterator, List, Mapping, NamedTuple, Optional, Tuple
import acme
from acme import types
from acme.agents.jax.value_dice import networks as value_dice_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
import reverb
class TrainingState(NamedTuple):
"""Contains training state for the learner."""
policy_optimizer_state: optax.OptState
policy_params: networks_lib.Params
nu_optimizer_state: optax.OptState
nu_params: networks_lib.Params
key: jnp.ndarray
steps: int
def _orthogonal_regularization_loss(params: networks_lib.Params):
"""Orthogonal regularization.
See equation (3) in https://arxiv.org/abs/1809.11096.
Args:
params: Dictionary of parameters to apply regualization for.
Returns:
A regularization loss term.
"""
reg_loss = 0
for key in params:
if isinstance(params[key], Mapping):
reg_loss += _orthogonal_regularization_loss(params[key])
continue
variable = params[key]
assert len(variable.shape) in [1, 2, 4]
if len(variable.shape) == 1:
# This is a bias so do not apply regularization.
continue
if len(variable.shape) == 4:
# CNN
variable = jnp.reshape(variable, (-1, variable.shape[-1]))
prod = jnp.matmul(jnp.transpose(variable), variable)
reg_loss += jnp.sum(jnp.square(prod * (1 - jnp.eye(prod.shape[0]))))
return reg_loss
class ValueDiceLearner(acme.Learner):
"""ValueDice learner."""
_state: TrainingState
def __init__(self,
networks: value_dice_networks.ValueDiceNetworks,
policy_optimizer: optax.GradientTransformation,
nu_optimizer: optax.GradientTransformation,
discount: float,
rng: jnp.ndarray,
iterator_replay: Iterator[reverb.ReplaySample],
iterator_demonstrations: Iterator[types.Transition],
alpha: float = 0.05,
policy_reg_scale: float = 1e-4,
nu_reg_scale: float = 10.0,
num_sgd_steps_per_step: int = 1,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None):
rng, policy_key, nu_key = jax.random.split(rng, 3)
policy_init_params = networks.policy_network.init(policy_key)
policy_optimizer_state = policy_optimizer.init(policy_init_params)
nu_init_params = networks.nu_network.init(nu_key)
nu_optimizer_state = nu_optimizer.init(nu_init_params)
def compute_losses(
policy_params: networks_lib.Params,
nu_params: networks_lib.Params,
key: jnp.ndarray,
replay_o_tm1: types.NestedArray,
replay_a_tm1: types.NestedArray,
replay_o_t: types.NestedArray,
demo_o_tm1: types.NestedArray,
demo_a_tm1: types.NestedArray,
demo_o_t: types.NestedArray,
) -> jnp.ndarray:
# TODO(damienv, hussenot): what to do with the discounts ?
def policy(obs, key):
dist_params = networks.policy_network.apply(policy_params, obs)
return networks.sample(dist_params, key)
key1, key2, key3, key4 = jax.random.split(key, 4)
# Predicted actions.
demo_o_t0 = demo_o_tm1
policy_demo_a_t0 = policy(demo_o_t0, key1)
policy_demo_a_t = policy(demo_o_t, key2)
policy_replay_a_t = policy(replay_o_t, key3)
replay_a_tm1 = networks.encode_action(replay_a_tm1)
demo_a_tm1 = networks.encode_action(demo_a_tm1)
policy_demo_a_t0 = networks.encode_action(policy_demo_a_t0)
policy_demo_a_t = networks.encode_action(policy_demo_a_t)
policy_replay_a_t = networks.encode_action(policy_replay_a_t)
# "Value function" nu over the expert states.
nu_demo_t0 = networks.nu_network.apply(nu_params, demo_o_t0,
policy_demo_a_t0)
nu_demo_tm1 = networks.nu_network.apply(nu_params, demo_o_tm1, demo_a_tm1)
nu_demo_t = networks.nu_network.apply(nu_params, demo_o_t,
policy_demo_a_t)
nu_demo_diff = nu_demo_tm1 - discount * nu_demo_t
# "Value function" nu over the replay buffer states.
nu_replay_tm1 = networks.nu_network.apply(nu_params, replay_o_tm1,
replay_a_tm1)
nu_replay_t = networks.nu_network.apply(nu_params, replay_o_t,
policy_replay_a_t)
nu_replay_diff = nu_replay_tm1 - discount * nu_replay_t
# Linear part of the loss.
linear_loss_demo = jnp.mean(nu_demo_t0 * (1.0 - discount))
linear_loss_rb = jnp.mean(nu_replay_diff)
linear_loss = (linear_loss_demo * (1 - alpha) + linear_loss_rb * alpha)
# Non linear part of the loss.
nu_replay_demo_diff = jnp.concatenate([nu_demo_diff, nu_replay_diff],
axis=0)
replay_demo_weights = jnp.concatenate([
jnp.ones_like(nu_demo_diff) * (1 - alpha),
jnp.ones_like(nu_replay_diff) * alpha
],
axis=0)
replay_demo_weights /= jnp.mean(replay_demo_weights)
non_linear_loss = jnp.sum(
jax.lax.stop_gradient(
utils.weighted_softmax(nu_replay_demo_diff, replay_demo_weights,
axis=0)) *
nu_replay_demo_diff)
# Final loss.
loss = (non_linear_loss - linear_loss)
# Regularized policy loss.
if policy_reg_scale > 0.:
policy_reg = _orthogonal_regularization_loss(policy_params)
else:
policy_reg = 0.
# Gradient penality on nu
if nu_reg_scale > 0.0:
batch_size = demo_o_tm1.shape[0]
c = jax.random.uniform(key4, shape=(batch_size,))
shape_o = [
dim if i == 0 else 1 for i, dim in enumerate(replay_o_tm1.shape)
]
shape_a = [
dim if i == 0 else 1 for i, dim in enumerate(replay_a_tm1.shape)
]
c_o = jnp.reshape(c, shape_o)
c_a = jnp.reshape(c, shape_a)
mixed_o_tm1 = c_o * demo_o_tm1 + (1 - c_o) * replay_o_tm1
mixed_a_tm1 = c_a * demo_a_tm1 + (1 - c_a) * replay_a_tm1
mixed_o_t = c_o * demo_o_t + (1 - c_o) * replay_o_t
mixed_policy_a_t = c_a * policy_demo_a_t + (1 - c_a) * policy_replay_a_t
mixed_o = jnp.concatenate([mixed_o_tm1, mixed_o_t], axis=0)
mixed_a = jnp.concatenate([mixed_a_tm1, mixed_policy_a_t], axis=0)
def sum_nu(o, a):
return jnp.sum(networks.nu_network.apply(nu_params, o, a))
nu_grad_o_fn = jax.grad(sum_nu, argnums=0)
nu_grad_a_fn = jax.grad(sum_nu, argnums=1)
nu_grad_o = nu_grad_o_fn(mixed_o, mixed_a)
nu_grad_a = nu_grad_a_fn(mixed_o, mixed_a)
nu_grad = jnp.concatenate([
jnp.reshape(nu_grad_o, [batch_size, -1]),
jnp.reshape(nu_grad_a, [batch_size, -1])], axis=-1)
# TODO(damienv, hussenot): check for the need of eps
# (like in the original value dice code).
nu_grad_penalty = jnp.mean(
jnp.square(
jnp.linalg.norm(nu_grad + 1e-8, axis=-1, keepdims=True) - 1))
else:
nu_grad_penalty = 0.0
policy_loss = -loss + policy_reg_scale * policy_reg
nu_loss = loss + nu_reg_scale * nu_grad_penalty
return policy_loss, nu_loss # pytype: disable=bad-return-type # jax-ndarray
def sgd_step(
state: TrainingState,
data: Tuple[types.Transition, types.Transition]
) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:
replay_transitions, demo_transitions = data
key, key_loss = jax.random.split(state.key)
compute_losses_with_input = functools.partial(
compute_losses,
replay_o_tm1=replay_transitions.observation,
replay_a_tm1=replay_transitions.action,
replay_o_t=replay_transitions.next_observation,
demo_o_tm1=demo_transitions.observation,
demo_a_tm1=demo_transitions.action,
demo_o_t=demo_transitions.next_observation,
key=key_loss)
(policy_loss_value, nu_loss_value), vjpfun = jax.vjp(
compute_losses_with_input,
state.policy_params, state.nu_params)
policy_gradients, _ = vjpfun((1.0, 0.0))
_, nu_gradients = vjpfun((0.0, 1.0))
# Update optimizers.
policy_update, policy_optimizer_state = policy_optimizer.update(
policy_gradients, state.policy_optimizer_state)
policy_params = optax.apply_updates(state.policy_params, policy_update)
nu_update, nu_optimizer_state = nu_optimizer.update(
nu_gradients, state.nu_optimizer_state)
nu_params = optax.apply_updates(state.nu_params, nu_update)
new_state = TrainingState(
policy_optimizer_state=policy_optimizer_state,
policy_params=policy_params,
nu_optimizer_state=nu_optimizer_state,
nu_params=nu_params,
key=key,
steps=state.steps + 1,
)
metrics = {
'policy_loss': policy_loss_value,
'nu_loss': nu_loss_value,
}
return new_state, metrics
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
steps_key=self._counter.get_steps_key())
# Iterator on demonstration transitions.
self._iterator_demonstrations = iterator_demonstrations
self._iterator_replay = iterator_replay
self._sgd_step = jax.jit(utils.process_multiple_batches(
sgd_step, num_sgd_steps_per_step))
# Create initial state.
self._state = TrainingState(
policy_optimizer_state=policy_optimizer_state,
policy_params=policy_init_params,
nu_optimizer_state=nu_optimizer_state,
nu_params=nu_init_params,
key=rng,
steps=0,
)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
def step(self):
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
# TODO(raveman): Add a support for offline training, where we do not consume
# data from the replay buffer.
sample = next(self._iterator_replay)
replay_transitions = types.Transition(*sample.data)
# Get a batch of Transitions from the demonstration.
demonstration_transitions = next(self._iterator_demonstrations)
self._state, metrics = self._sgd_step(
self._state, (replay_transitions, demonstration_transitions))
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(steps=1, walltime=elapsed_time)
# Attempts to write the logs.
self._logger.write({**metrics, **counts})
def get_variables(self, names: List[str]) -> List[Any]:
variables = {
'policy': self._state.policy_params,
'nu': self._state.nu_params,
}
return [variables[name] for name in names]
def save(self) -> TrainingState:
return self._state
def restore(self, state: TrainingState):
self._state = state
|
acme-master
|
acme/agents/jax/value_dice/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO config."""
import dataclasses
from typing import Callable, Union, Optional
from acme import types
from acme.adders import reverb as adders_reverb
from acme.agents.jax.ppo import normalization
@dataclasses.dataclass
class PPOConfig:
"""Configuration options for PPO.
Attributes:
unroll_length: Length of sequences added to the replay buffer.
num_minibatches: The number of minibatches to split an epoch into.
i.e. minibatch size = batch_size * unroll_length / num_minibatches.
num_epochs: How many times to loop over the set of minibatches.
batch_size: Number of trajectory segments of length unroll_length to gather
for use in a call to the learner's step function.
replay_table_name: Replay table name.
ppo_clipping_epsilon: PPO clipping epsilon.
normalize_advantage: Whether to normalize the advantages in the batch.
normalize_value: Whether the critic should predict normalized values.
normalization_ema_tau: Float tau for the exponential moving average used to
maintain statistics for normalizing advantages and values.
clip_value: Whether to clip the values as described in "What Matters in
On-Policy Reinforcement Learning?".
value_clipping_epsilon: Epsilon for value clipping.
max_abs_reward: If provided clips the rewards in the trajectory to have
absolute value less than or equal to max_abs_reward.
gae_lambda: Lambda parameter in Generalized Advantage Estimation.
discount: Discount factor.
learning_rate: Learning rate for updating the policy and critic networks.
adam_epsilon: Adam epsilon parameter.
entropy_cost: Weight of the entropy regularizer term in policy optimization.
value_cost: Weight of the value loss term in optimization.
max_gradient_norm: Threshold for clipping the gradient norm.
variable_update_period: Determines how frequently actors pull the parameters
from the learner.
log_global_norm_metrics: Whether to log global norm of gradients and
updates.
metrics_logging_period: How often metrics should be aggregated to host and
logged.
pmap_axis_name: The name of the axis used for pmapping
obs_normalization_fns_factory: The factory used for create observation
normalization functions. Setting to None (default) disables observation
normalization.
"""
unroll_length: int = 8
num_minibatches: int = 8
num_epochs: int = 2
batch_size: int = 256
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
ppo_clipping_epsilon: float = 0.2
normalize_advantage: bool = False
normalize_value: bool = False
normalization_ema_tau: float = 0.995
clip_value: bool = False
value_clipping_epsilon: float = 0.2
max_abs_reward: Optional[float] = None
gae_lambda: float = 0.95
discount: float = 0.99
learning_rate: Union[float, Callable[[int], float]] = 3e-4
adam_epsilon: float = 1e-7
entropy_cost: float = 3e-4
value_cost: float = 1.
max_gradient_norm: float = 0.5
variable_update_period: int = 1
log_global_norm_metrics: bool = False
metrics_logging_period: int = 100
pmap_axis_name: str = 'devices'
obs_normalization_fns_factory: Optional[Callable[
[types.NestedSpec], normalization.NormalizationFns]] = None
|
acme-master
|
acme/agents/jax/ppo/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO agent."""
from acme.agents.jax.ppo.builder import PPOBuilder
from acme.agents.jax.ppo.config import PPOConfig
from acme.agents.jax.ppo.learning import PPOLearner
from acme.agents.jax.ppo.networks import EntropyFn
from acme.agents.jax.ppo.networks import make_categorical_ppo_networks
from acme.agents.jax.ppo.networks import make_continuous_networks
from acme.agents.jax.ppo.networks import make_discrete_networks
from acme.agents.jax.ppo.networks import make_inference_fn
from acme.agents.jax.ppo.networks import make_mvn_diag_ppo_networks
from acme.agents.jax.ppo.networks import make_networks
from acme.agents.jax.ppo.networks import make_ppo_networks
from acme.agents.jax.ppo.networks import make_tanh_normal_ppo_networks
from acme.agents.jax.ppo.networks import PPONetworks
from acme.agents.jax.ppo.normalization import build_ema_mean_std_normalizer
from acme.agents.jax.ppo.normalization import build_mean_std_normalizer
from acme.agents.jax.ppo.normalization import NormalizationFns
from acme.agents.jax.ppo.normalization import NormalizedGenericActor
|
acme-master
|
acme/agents/jax/ppo/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO Builder."""
from typing import Iterator, List, Optional
from acme import adders
from acme import core
from acme import specs
from acme.adders import reverb as adders_reverb
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.ppo import config as ppo_config
from acme.agents.jax.ppo import learning
from acme.agents.jax.ppo import networks as ppo_networks
from acme.agents.jax.ppo import normalization
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import numpy as np
import optax
import reverb
class PPOBuilder(
builders.ActorLearnerBuilder[ppo_networks.PPONetworks,
actor_core_lib.FeedForwardPolicyWithExtra,
reverb.ReplaySample]):
"""PPO Builder."""
def __init__(
self,
config: ppo_config.PPOConfig,
):
"""Creates PPO builder."""
self._config = config
# An extra step is used for bootstrapping when computing advantages.
self._sequence_length = config.unroll_length + 1
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: actor_core_lib.FeedForwardPolicyWithExtra,
) -> List[reverb.Table]:
"""Creates reverb tables for the algorithm."""
del policy
# params_num_sgd_steps is used to track how old the actor parameters are
extra_spec = {
'log_prob': np.ones(shape=(), dtype=np.float32),
'params_num_sgd_steps': np.ones(shape=(), dtype=np.float32),
}
signature = adders_reverb.SequenceAdder.signature(
environment_spec, extra_spec, sequence_length=self._sequence_length)
return [
reverb.Table.queue(
name=self._config.replay_table_name,
max_size=self._config.batch_size,
signature=signature)
]
def make_dataset_iterator(
self, replay_client: reverb.Client) -> Iterator[reverb.ReplaySample]:
"""Creates a dataset.
The iterator batch size is computed as follows:
Let:
B := learner batch size (config.batch_size)
H := number of hosts (jax.process_count())
D := number of local devices per host
The Reverb iterator will load batches of size B // (H * D). After wrapping
the iterator with utils.multi_device_put, this will result in an iterable
that provides B // H samples per item, with B // (H * D) samples placed on
each local device. In a multi-host setup, each host has its own learner
node and builds its own instance of the iterator. This will result
in a total batch size of H * (B // H) == B being consumed per learner
step (since the learner is pmapped across all devices). Note that
jax.device_count() returns the total number of devices across hosts,
i.e. H * D.
Args:
replay_client: the reverb replay client
Returns:
A replay buffer iterator to be used by the local devices.
"""
iterator_batch_size, ragged = divmod(self._config.batch_size,
jax.device_count())
if ragged:
raise ValueError(
'Learner batch size must be divisible by total number of devices!')
# We don't use datasets.make_reverb_dataset() here to avoid interleaving
# and prefetching, that doesn't work well with can_sample() check on update.
# NOTE: Value for max_in_flight_samples_per_worker comes from a
# recommendation here: https://git.io/JYzXB
dataset = reverb.TrajectoryDataset.from_table_signature(
server_address=replay_client.server_address,
table=self._config.replay_table_name,
max_in_flight_samples_per_worker=(
2 * self._config.batch_size // jax.process_count()
),
)
dataset = dataset.batch(iterator_batch_size, drop_remainder=True)
dataset = dataset.as_numpy_iterator()
return utils.multi_device_put(iterable=dataset, devices=jax.local_devices())
def make_adder(
self,
replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[actor_core_lib.FeedForwardPolicyWithExtra],
) -> Optional[adders.Adder]:
"""Creates an adder which handles observations."""
del environment_spec, policy
# Note that the last transition in the sequence is used for bootstrapping
# only and is ignored otherwise. So we need to make sure that sequences
# overlap on one transition, thus "-1" in the period length computation.
return adders_reverb.SequenceAdder(
client=replay_client,
priority_fns={self._config.replay_table_name: None},
period=self._sequence_length - 1,
sequence_length=self._sequence_length,
)
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: ppo_networks.PPONetworks,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del replay_client
if callable(self._config.learning_rate):
optimizer = optax.chain(
optax.clip_by_global_norm(self._config.max_gradient_norm),
optax.scale_by_adam(eps=self._config.adam_epsilon),
optax.scale_by_schedule(self._config.learning_rate), optax.scale(-1)) # pytype: disable=wrong-arg-types # numpy-scalars
else:
optimizer = optax.chain(
optax.clip_by_global_norm(self._config.max_gradient_norm),
optax.scale_by_adam(eps=self._config.adam_epsilon),
optax.scale(-self._config.learning_rate))
obs_normalization_fns = None
if self._config.obs_normalization_fns_factory is not None:
obs_normalization_fns = self._config.obs_normalization_fns_factory(
environment_spec.observations)
return learning.PPOLearner(
ppo_networks=networks,
iterator=dataset,
discount=self._config.discount,
entropy_cost=self._config.entropy_cost,
value_cost=self._config.value_cost,
ppo_clipping_epsilon=self._config.ppo_clipping_epsilon,
normalize_advantage=self._config.normalize_advantage,
normalize_value=self._config.normalize_value,
normalization_ema_tau=self._config.normalization_ema_tau,
clip_value=self._config.clip_value,
value_clipping_epsilon=self._config.value_clipping_epsilon,
max_abs_reward=self._config.max_abs_reward,
gae_lambda=self._config.gae_lambda,
counter=counter,
random_key=random_key,
optimizer=optimizer,
num_epochs=self._config.num_epochs,
num_minibatches=self._config.num_minibatches,
logger=logger_fn('learner'),
log_global_norm_metrics=self._config.log_global_norm_metrics,
metrics_logging_period=self._config.metrics_logging_period,
pmap_axis_name=self._config.pmap_axis_name,
obs_normalization_fns=obs_normalization_fns,
)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: actor_core_lib.FeedForwardPolicyWithExtra,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
assert variable_source is not None
actor_core = actor_core_lib.batched_feed_forward_with_extras_to_actor_core(
policy)
if self._config.obs_normalization_fns_factory is not None:
variable_client = variable_utils.VariableClient(
variable_source, ['params', 'obs_normalization_params'],
device='cpu',
update_period=self._config.variable_update_period)
obs_normalization_fns = self._config.obs_normalization_fns_factory(
environment_spec.observations)
actor = normalization.NormalizedGenericActor(
actor_core,
obs_normalization_fns,
random_key,
variable_client,
adder,
jit=True,
backend='cpu',
per_episode_update=False,
)
else:
variable_client = variable_utils.VariableClient(
variable_source,
'params',
device='cpu',
update_period=self._config.variable_update_period)
actor = actors.GenericActor(
actor_core, random_key, variable_client, adder, backend='cpu')
return actor
def make_policy(
self,
networks: ppo_networks.PPONetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> actor_core_lib.FeedForwardPolicyWithExtra:
del environment_spec
return ppo_networks.make_inference_fn(networks, evaluation)
|
acme-master
|
acme/agents/jax/ppo/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO network definitions."""
import dataclasses
from typing import Callable, NamedTuple, Optional, Sequence
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.jax import networks as networks_lib
from acme.jax import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow_probability
tfp = tensorflow_probability.substrates.jax
tfd = tfp.distributions
EntropyFn = Callable[
[networks_lib.Params, networks_lib.PRNGKey], networks_lib.Entropy
]
class MVNDiagParams(NamedTuple):
"""Parameters for a diagonal multi-variate normal distribution."""
loc: jnp.ndarray
scale_diag: jnp.ndarray
class TanhNormalParams(NamedTuple):
"""Parameters for a tanh squashed diagonal MVN distribution."""
loc: jnp.ndarray
scale: jnp.ndarray
class CategoricalParams(NamedTuple):
"""Parameters for a categorical distribution."""
logits: jnp.ndarray
class PPOParams(NamedTuple):
model_params: networks_lib.Params
# Using float32 as it covers a larger range than int32. If using int64 we
# would need to do jax_enable_x64.
num_sgd_steps: jnp.float32
@dataclasses.dataclass
class PPONetworks:
"""Network and pure functions for the PPO agent.
If 'network' returns tfd.Distribution, you can use make_ppo_networks() to
create this object properly.
If one is building this object manually, one has a freedom to make 'network'
object return anything that is later being passed as input to
log_prob/entropy/sample functions to perform the corresponding computations.
An example scenario where you would want to do this due to
tfd.Distribution not playing nice with jax.vmap. Please refer to the
make_continuous_networks() for an example where the network does not return a
tfd.Distribution object.
"""
network: networks_lib.FeedForwardNetwork
log_prob: networks_lib.LogProbFn
entropy: EntropyFn
sample: networks_lib.SampleFn
sample_eval: Optional[networks_lib.SampleFn] = None
def make_inference_fn(
ppo_networks: PPONetworks,
evaluation: bool = False) -> actor_core_lib.FeedForwardPolicyWithExtra:
"""Returns a function to be used for inference by a PPO actor."""
def inference(
params: networks_lib.Params,
key: networks_lib.PRNGKey,
observations: networks_lib.Observation,
):
dist_params, _ = ppo_networks.network.apply(params.model_params,
observations)
if evaluation and ppo_networks.sample_eval:
actions = ppo_networks.sample_eval(dist_params, key)
else:
actions = ppo_networks.sample(dist_params, key)
if evaluation:
return actions, {}
log_prob = ppo_networks.log_prob(dist_params, actions)
extras = {
'log_prob': log_prob,
# Add batch dimension.
'params_num_sgd_steps': params.num_sgd_steps[None, ...]
}
return actions, extras
return inference
def make_networks(
spec: specs.EnvironmentSpec, hidden_layer_sizes: Sequence[int] = (256, 256)
) -> PPONetworks:
if isinstance(spec.actions, specs.DiscreteArray):
return make_discrete_networks(spec, hidden_layer_sizes)
else:
return make_continuous_networks(
spec,
policy_layer_sizes=hidden_layer_sizes,
value_layer_sizes=hidden_layer_sizes)
def make_ppo_networks(network: networks_lib.FeedForwardNetwork) -> PPONetworks:
"""Constructs a PPONetworks instance from the given FeedForwardNetwork.
This method assumes that the network returns a tfd.Distribution. Sometimes it
may be preferable to have networks that do not return tfd.Distribution
objects, for example, due to tfd.Distribution not playing nice with jax.vmap.
Please refer to the make_continuous_networks() for an example where the
network does not return a tfd.Distribution object.
Args:
network: a transformed Haiku network that takes in observations and returns
the action distribution and value.
Returns:
A PPONetworks instance with pure functions wrapping the input network.
"""
return PPONetworks(
network=network,
log_prob=lambda distribution, action: distribution.log_prob(action),
entropy=lambda distribution, key=None: distribution.entropy(),
sample=lambda distribution, key: distribution.sample(seed=key),
sample_eval=lambda distribution, key: distribution.mode())
def make_mvn_diag_ppo_networks(
network: networks_lib.FeedForwardNetwork) -> PPONetworks:
"""Constructs a PPONetworks for MVN Diag policy from the FeedForwardNetwork.
Args:
network: a transformed Haiku network (or equivalent in other libraries) that
takes in observations and returns the action distribution and value.
Returns:
A PPONetworks instance with pure functions wrapping the input network.
"""
def log_prob(params: MVNDiagParams, action):
return tfd.MultivariateNormalDiag(
loc=params.loc, scale_diag=params.scale_diag).log_prob(action)
def entropy(
params: MVNDiagParams, key: networks_lib.PRNGKey
) -> networks_lib.Entropy:
del key
return tfd.MultivariateNormalDiag(
loc=params.loc, scale_diag=params.scale_diag).entropy()
def sample(params: MVNDiagParams, key: networks_lib.PRNGKey):
return tfd.MultivariateNormalDiag(
loc=params.loc, scale_diag=params.scale_diag).sample(seed=key)
def sample_eval(params: MVNDiagParams, key: networks_lib.PRNGKey):
del key
return tfd.MultivariateNormalDiag(
loc=params.loc, scale_diag=params.scale_diag).mode()
return PPONetworks(
network=network,
log_prob=log_prob,
entropy=entropy,
sample=sample,
sample_eval=sample_eval)
def make_tanh_normal_ppo_networks(
network: networks_lib.FeedForwardNetwork) -> PPONetworks:
"""Constructs a PPONetworks for Tanh MVN Diag policy from the FeedForwardNetwork.
Args:
network: a transformed Haiku network (or equivalent in other libraries) that
takes in observations and returns the action distribution and value.
Returns:
A PPONetworks instance with pure functions wrapping the input network.
"""
def build_distribution(params: TanhNormalParams):
distribution = tfd.Normal(loc=params.loc, scale=params.scale)
distribution = tfd.Independent(
networks_lib.TanhTransformedDistribution(distribution),
reinterpreted_batch_ndims=1)
return distribution
def log_prob(params: TanhNormalParams, action):
distribution = build_distribution(params)
return distribution.log_prob(action)
def entropy(
params: TanhNormalParams, key: networks_lib.PRNGKey
) -> networks_lib.Entropy:
distribution = build_distribution(params)
return distribution.entropy(seed=key)
def sample(params: TanhNormalParams, key: networks_lib.PRNGKey):
distribution = build_distribution(params)
return distribution.sample(seed=key)
def sample_eval(params: TanhNormalParams, key: networks_lib.PRNGKey):
del key
distribution = build_distribution(params)
return distribution.mode()
return PPONetworks(
network=network,
log_prob=log_prob,
entropy=entropy,
sample=sample,
sample_eval=sample_eval)
def make_discrete_networks(
environment_spec: specs.EnvironmentSpec,
hidden_layer_sizes: Sequence[int] = (512,),
use_conv: bool = True,
) -> PPONetworks:
"""Creates networks used by the agent for discrete action environments.
Args:
environment_spec: Environment spec used to define number of actions.
hidden_layer_sizes: Network definition.
use_conv: Whether to use a conv or MLP feature extractor.
Returns:
PPONetworks
"""
num_actions = environment_spec.actions.num_values
def forward_fn(inputs):
layers = []
if use_conv:
layers.extend([networks_lib.AtariTorso()])
layers.extend([hk.nets.MLP(hidden_layer_sizes, activate_final=True)])
trunk = hk.Sequential(layers)
h = utils.batch_concat(inputs)
h = trunk(h)
logits = hk.Linear(num_actions)(h)
values = hk.Linear(1)(h)
values = jnp.squeeze(values, axis=-1)
return (CategoricalParams(logits=logits), values)
forward_fn = hk.without_apply_rng(hk.transform(forward_fn))
dummy_obs = utils.zeros_like(environment_spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs) # Dummy 'sequence' dim.
network = networks_lib.FeedForwardNetwork(
lambda rng: forward_fn.init(rng, dummy_obs), forward_fn.apply)
# Create PPONetworks to add functionality required by the agent.
return make_categorical_ppo_networks(network) # pylint:disable=undefined-variable
def make_categorical_ppo_networks(
network: networks_lib.FeedForwardNetwork) -> PPONetworks:
"""Constructs a PPONetworks for Categorical Policy from FeedForwardNetwork.
Args:
network: a transformed Haiku network (or equivalent in other libraries) that
takes in observations and returns the action distribution and value.
Returns:
A PPONetworks instance with pure functions wrapping the input network.
"""
def log_prob(params: CategoricalParams, action):
return tfd.Categorical(logits=params.logits).log_prob(action)
def entropy(
params: CategoricalParams, key: networks_lib.PRNGKey
) -> networks_lib.Entropy:
del key
return tfd.Categorical(logits=params.logits).entropy()
def sample(params: CategoricalParams, key: networks_lib.PRNGKey):
return tfd.Categorical(logits=params.logits).sample(seed=key)
def sample_eval(params: CategoricalParams, key: networks_lib.PRNGKey):
del key
return tfd.Categorical(logits=params.logits).mode()
return PPONetworks(
network=network,
log_prob=log_prob,
entropy=entropy,
sample=sample,
sample_eval=sample_eval)
def make_continuous_networks(
environment_spec: specs.EnvironmentSpec,
policy_layer_sizes: Sequence[int] = (64, 64),
value_layer_sizes: Sequence[int] = (64, 64),
use_tanh_gaussian_policy: bool = True,
) -> PPONetworks:
"""Creates PPONetworks to be used for continuous action environments."""
# Get total number of action dimensions from action spec.
num_dimensions = np.prod(environment_spec.actions.shape, dtype=int)
def forward_fn(inputs: networks_lib.Observation):
def _policy_network(obs: networks_lib.Observation):
h = utils.batch_concat(obs)
h = hk.nets.MLP(policy_layer_sizes, activate_final=True)(h)
# tfd distributions have a weird bug in jax when vmapping is used, so the
# safer implementation in general is for the policy network to output the
# distribution parameters, and for the distribution to be constructed
# in a method such as make_ppo_networks above
if not use_tanh_gaussian_policy:
# Following networks_lib.MultivariateNormalDiagHead
init_scale = 0.3
min_scale = 1e-6
w_init = hk.initializers.VarianceScaling(1e-4)
b_init = hk.initializers.Constant(0.)
loc_layer = hk.Linear(num_dimensions, w_init=w_init, b_init=b_init)
scale_layer = hk.Linear(num_dimensions, w_init=w_init, b_init=b_init)
loc = loc_layer(h)
scale = jax.nn.softplus(scale_layer(h))
scale *= init_scale / jax.nn.softplus(0.)
scale += min_scale
return MVNDiagParams(loc=loc, scale_diag=scale)
# Following networks_lib.NormalTanhDistribution
min_scale = 1e-3
w_init = hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform')
b_init = hk.initializers.Constant(0.)
loc_layer = hk.Linear(num_dimensions, w_init=w_init, b_init=b_init)
scale_layer = hk.Linear(num_dimensions, w_init=w_init, b_init=b_init)
loc = loc_layer(h)
scale = scale_layer(h)
scale = jax.nn.softplus(scale) + min_scale
return TanhNormalParams(loc=loc, scale=scale)
value_network = hk.Sequential([
utils.batch_concat,
hk.nets.MLP(value_layer_sizes, activate_final=True),
hk.Linear(1),
lambda x: jnp.squeeze(x, axis=-1)
])
policy_output = _policy_network(inputs)
value = value_network(inputs)
return (policy_output, value)
# Transform into pure functions.
forward_fn = hk.without_apply_rng(hk.transform(forward_fn))
dummy_obs = utils.zeros_like(environment_spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs) # Dummy 'sequence' dim.
network = networks_lib.FeedForwardNetwork(
lambda rng: forward_fn.init(rng, dummy_obs), forward_fn.apply)
# Create PPONetworks to add functionality required by the agent.
if not use_tanh_gaussian_policy:
return make_mvn_diag_ppo_networks(network)
return make_tanh_normal_ppo_networks(network)
|
acme-master
|
acme/agents/jax/ppo/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learner for the PPO agent."""
from typing import Dict, Iterator, List, NamedTuple, Optional, Tuple
import acme
from acme import types
from acme.agents.jax.ppo import networks
from acme.agents.jax.ppo import normalization
from acme.jax import networks as networks_lib
from acme.jax.utils import get_from_first_device
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
import reverb
import rlax
PPOParams = networks.PPOParams
class Batch(NamedTuple):
"""A batch of data; all shapes are expected to be [B, ...]."""
observations: types.NestedArray
actions: jnp.ndarray
advantages: jnp.ndarray
# Target value estimate used to bootstrap the value function.
target_values: jnp.ndarray
# Value estimate and action log-prob at behavior time.
behavior_values: jnp.ndarray
behavior_log_probs: jnp.ndarray
class TrainingState(NamedTuple):
"""Training state for the PPO learner."""
params: PPOParams
opt_state: optax.OptState
random_key: networks_lib.PRNGKey
# Optional counter used for exponential moving average zero debiasing
# Using float32 as it covers a larger range than int32. If using int64 we
# would need to do jax_enable_x64.
ema_counter: Optional[jnp.float32] = None
# Optional parameter for maintaining a running estimate of the scale of
# advantage estimates
biased_advantage_scale: Optional[networks_lib.Params] = None
advantage_scale: Optional[networks_lib.Params] = None
# Optional parameter for maintaining a running estimate of the mean and
# standard deviation of value estimates
biased_value_first_moment: Optional[networks_lib.Params] = None
biased_value_second_moment: Optional[networks_lib.Params] = None
value_mean: Optional[networks_lib.Params] = None
value_std: Optional[networks_lib.Params] = None
# Optional parameters for observation normalization
obs_normalization_params: Optional[normalization.NormalizationParams] = None
class PPOLearner(acme.Learner):
"""Learner for PPO."""
def __init__(
self,
ppo_networks: networks.PPONetworks,
iterator: Iterator[reverb.ReplaySample],
optimizer: optax.GradientTransformation,
random_key: networks_lib.PRNGKey,
ppo_clipping_epsilon: float = 0.2,
normalize_advantage: bool = True,
normalize_value: bool = False,
normalization_ema_tau: float = 0.995,
clip_value: bool = False,
value_clipping_epsilon: float = 0.2,
max_abs_reward: Optional[float] = None,
gae_lambda: float = 0.95,
discount: float = 0.99,
entropy_cost: float = 0.,
value_cost: float = 1.,
num_epochs: int = 4,
num_minibatches: int = 1,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
log_global_norm_metrics: bool = False,
metrics_logging_period: int = 100,
pmap_axis_name: str = 'devices',
obs_normalization_fns: Optional[normalization.NormalizationFns] = None,
):
self.local_learner_devices = jax.local_devices()
self.num_local_learner_devices = jax.local_device_count()
self.learner_devices = jax.devices()
self.num_epochs = num_epochs
self.num_minibatches = num_minibatches
self.metrics_logging_period = metrics_logging_period
self._num_full_update_steps = 0
self._iterator = iterator
normalize_obs = obs_normalization_fns is not None
if normalize_obs:
assert obs_normalization_fns is not None
# Set up logging/counting.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner')
def ppo_loss(
params: networks_lib.Params,
observations: networks_lib.Observation,
actions: networks_lib.Action,
advantages: jnp.ndarray,
target_values: networks_lib.Value,
behavior_values: networks_lib.Value,
behavior_log_probs: networks_lib.LogProb,
value_mean: jnp.ndarray,
value_std: jnp.ndarray,
key: networks_lib.PRNGKey,
) -> Tuple[jnp.ndarray, Dict[str, jnp.ndarray]]:
"""PPO loss for the policy and the critic."""
distribution_params, values = ppo_networks.network.apply(
params, observations)
if normalize_value:
# values = values * jnp.fmax(value_std, 1e-6) + value_mean
target_values = (target_values - value_mean) / jnp.fmax(value_std, 1e-6)
policy_log_probs = ppo_networks.log_prob(distribution_params, actions)
key, sub_key = jax.random.split(key)
policy_entropies = ppo_networks.entropy(distribution_params, sub_key)
# Compute the policy losses
rhos = jnp.exp(policy_log_probs - behavior_log_probs)
clipped_ppo_policy_loss = rlax.clipped_surrogate_pg_loss(
rhos, advantages, ppo_clipping_epsilon)
policy_entropy_loss = -jnp.mean(policy_entropies)
total_policy_loss = (
clipped_ppo_policy_loss + entropy_cost * policy_entropy_loss)
# Compute the critic losses
unclipped_value_loss = (values - target_values)**2
if clip_value:
# Clip values to reduce variablility during critic training.
clipped_values = behavior_values + jnp.clip(values - behavior_values,
-value_clipping_epsilon,
value_clipping_epsilon)
clipped_value_error = target_values - clipped_values
clipped_value_loss = clipped_value_error ** 2
value_loss = jnp.mean(jnp.fmax(unclipped_value_loss,
clipped_value_loss))
else:
# For Mujoco envs clipping hurts a lot. Evidenced by Figure 43 in
# https://arxiv.org/pdf/2006.05990.pdf
value_loss = jnp.mean(unclipped_value_loss)
total_ppo_loss = total_policy_loss + value_cost * value_loss
return total_ppo_loss, { # pytype: disable=bad-return-type # numpy-scalars
'loss_total': total_ppo_loss,
'loss_policy_total': total_policy_loss,
'loss_policy_pg': clipped_ppo_policy_loss,
'loss_policy_entropy': policy_entropy_loss,
'loss_critic': value_loss,
}
ppo_loss_grad = jax.grad(ppo_loss, has_aux=True)
def sgd_step(state: TrainingState, minibatch: Batch):
observations = minibatch.observations
actions = minibatch.actions
advantages = minibatch.advantages
target_values = minibatch.target_values
behavior_values = minibatch.behavior_values
behavior_log_probs = minibatch.behavior_log_probs
key, sub_key = jax.random.split(state.random_key)
loss_grad, metrics = ppo_loss_grad(
state.params.model_params,
observations,
actions,
advantages,
target_values,
behavior_values,
behavior_log_probs,
state.value_mean,
state.value_std,
sub_key,
)
# Apply updates
loss_grad = jax.lax.pmean(loss_grad, axis_name=pmap_axis_name)
updates, opt_state = optimizer.update(loss_grad, state.opt_state)
model_params = optax.apply_updates(state.params.model_params, updates)
params = PPOParams(
model_params=model_params,
num_sgd_steps=state.params.num_sgd_steps + 1)
if log_global_norm_metrics:
metrics['norm_grad'] = optax.global_norm(loss_grad)
metrics['norm_updates'] = optax.global_norm(updates)
state = state._replace(params=params, opt_state=opt_state, random_key=key)
return state, metrics
def epoch_update(
carry: Tuple[TrainingState, Batch],
unused_t: Tuple[()],
):
state, carry_batch = carry
# Shuffling into minibatches
batch_size = carry_batch.advantages.shape[0]
key, sub_key = jax.random.split(state.random_key)
# TODO(kamyar) For effiency could use same permutation for all epochs
permuted_batch = jax.tree_util.tree_map(
lambda x: jax.random.permutation( # pylint: disable=g-long-lambda
sub_key,
x,
axis=0,
independent=False),
carry_batch)
state = state._replace(random_key=key)
minibatches = jax.tree_util.tree_map(
lambda x: jnp.reshape( # pylint: disable=g-long-lambda
x,
[ # pylint: disable=g-long-lambda
num_minibatches, batch_size // num_minibatches
] + list(x.shape[1:])),
permuted_batch)
# Scan over the minibatches
state, metrics = jax.lax.scan(
sgd_step, state, minibatches, length=num_minibatches)
metrics = jax.tree_util.tree_map(jnp.mean, metrics)
return (state, carry_batch), metrics
vmapped_network_apply = jax.vmap(
ppo_networks.network.apply, in_axes=(None, 0), out_axes=0)
def single_device_update(
state: TrainingState,
trajectories: types.NestedArray,
):
params_num_sgd_steps_before_update = state.params.num_sgd_steps
# Update the EMA counter and obtain the zero debiasing multiplier
if normalize_advantage or normalize_value:
ema_counter = state.ema_counter + 1
state = state._replace(ema_counter=ema_counter)
zero_debias = 1. / (1. - jnp.power(normalization_ema_tau, ema_counter))
# Extract the data.
data = trajectories.data
observations, actions, rewards, termination, extra = (data.observation,
data.action,
data.reward,
data.discount,
data.extras)
if normalize_obs:
obs_norm_params = obs_normalization_fns.update(
state.obs_normalization_params, observations, pmap_axis_name)
state = state._replace(obs_normalization_params=obs_norm_params)
observations = obs_normalization_fns.normalize(
observations, state.obs_normalization_params)
if max_abs_reward is not None:
# Apply reward clipping.
rewards = jnp.clip(rewards, -1. * max_abs_reward, max_abs_reward)
discounts = termination * discount
behavior_log_probs = extra['log_prob']
_, behavior_values = vmapped_network_apply(state.params.model_params,
observations)
if normalize_value:
batch_value_first_moment = jnp.mean(behavior_values)
batch_value_second_moment = jnp.mean(behavior_values**2)
batch_value_first_moment, batch_value_second_moment = jax.lax.pmean(
(batch_value_first_moment, batch_value_second_moment),
axis_name=pmap_axis_name)
biased_value_first_moment = (
normalization_ema_tau * state.biased_value_first_moment +
(1. - normalization_ema_tau) * batch_value_first_moment)
biased_value_second_moment = (
normalization_ema_tau * state.biased_value_second_moment +
(1. - normalization_ema_tau) * batch_value_second_moment)
value_mean = biased_value_first_moment * zero_debias
value_second_moment = biased_value_second_moment * zero_debias
value_std = jnp.sqrt(jax.nn.relu(value_second_moment - value_mean**2))
state = state._replace(
biased_value_first_moment=biased_value_first_moment,
biased_value_second_moment=biased_value_second_moment,
value_mean=value_mean,
value_std=value_std,
)
behavior_values = behavior_values * jnp.fmax(state.value_std,
1e-6) + state.value_mean
behavior_values = jax.lax.stop_gradient(behavior_values)
# Compute GAE using rlax
vmapped_rlax_truncated_generalized_advantage_estimation = jax.vmap(
rlax.truncated_generalized_advantage_estimation,
in_axes=(0, 0, None, 0))
advantages = vmapped_rlax_truncated_generalized_advantage_estimation(
rewards[:, :-1], discounts[:, :-1], gae_lambda, behavior_values)
advantages = jax.lax.stop_gradient(advantages)
target_values = behavior_values[:, :-1] + advantages
target_values = jax.lax.stop_gradient(target_values)
# Exclude the last step - it was only used for bootstrapping.
# The shape is [num_sequences, num_steps, ..]
(observations, actions, behavior_log_probs, behavior_values) = (
jax.tree_util.tree_map(
lambda x: x[:, :-1],
(observations, actions, behavior_log_probs, behavior_values),
)
)
# Shuffle the data and break into minibatches
batch_size = advantages.shape[0] * advantages.shape[1]
batch = Batch(
observations=observations,
actions=actions,
advantages=advantages,
target_values=target_values,
behavior_values=behavior_values,
behavior_log_probs=behavior_log_probs)
batch = jax.tree_util.tree_map(
lambda x: jnp.reshape(x, [batch_size] + list(x.shape[2:])), batch)
if normalize_advantage:
batch_advantage_scale = jnp.mean(jnp.abs(batch.advantages))
batch_advantage_scale = jax.lax.pmean(batch_advantage_scale,
pmap_axis_name)
# update the running statistics
biased_advantage_scale = (
normalization_ema_tau * state.biased_advantage_scale +
(1. - normalization_ema_tau) * batch_advantage_scale)
advantage_scale = biased_advantage_scale * zero_debias
state = state._replace(
biased_advantage_scale=biased_advantage_scale,
advantage_scale=advantage_scale)
# scale the advantages
scaled_advantages = batch.advantages / jnp.fmax(state.advantage_scale,
1e-6)
batch = batch._replace(advantages=scaled_advantages)
# Scan desired number of epoch updates
(state, _), metrics = jax.lax.scan(
epoch_update, (state, batch), (), length=num_epochs)
metrics = jax.tree_util.tree_map(jnp.mean, metrics)
if normalize_advantage:
metrics['advantage_scale'] = state.advantage_scale
if normalize_value:
metrics['value_mean'] = value_mean
metrics['value_std'] = value_std
delta_params_sgd_steps = (
data.extras['params_num_sgd_steps'][:, 0] -
params_num_sgd_steps_before_update)
metrics['delta_params_sgd_steps_min'] = jnp.min(delta_params_sgd_steps)
metrics['delta_params_sgd_steps_max'] = jnp.max(delta_params_sgd_steps)
metrics['delta_params_sgd_steps_mean'] = jnp.mean(delta_params_sgd_steps)
metrics['delta_params_sgd_steps_std'] = jnp.std(delta_params_sgd_steps)
return state, metrics
pmapped_update_step = jax.pmap(
single_device_update,
axis_name=pmap_axis_name,
devices=self.learner_devices)
def full_update_step(
state: TrainingState,
trajectories: types.NestedArray,
):
state, metrics = pmapped_update_step(state, trajectories)
return state, metrics
self._full_update_step = full_update_step
def make_initial_state(key: networks_lib.PRNGKey) -> TrainingState:
"""Initialises the training state (parameters and optimiser state)."""
all_keys = jax.random.split(key, num=self.num_local_learner_devices + 1)
key_init, key_state = all_keys[0], all_keys[1:]
key_state = [key_state[i] for i in range(self.num_local_learner_devices)]
key_state = jax.device_put_sharded(key_state, self.local_learner_devices)
initial_params = ppo_networks.network.init(key_init)
initial_opt_state = optimizer.init(initial_params)
# Using float32 as it covers a larger range than int32. If using int64 we
# would need to do jax_enable_x64.
params_num_sgd_steps = jnp.zeros(shape=(), dtype=jnp.float32)
initial_params = jax.device_put_replicated(initial_params,
self.local_learner_devices)
initial_opt_state = jax.device_put_replicated(initial_opt_state,
self.local_learner_devices)
params_num_sgd_steps = jax.device_put_replicated(
params_num_sgd_steps, self.local_learner_devices)
ema_counter = jnp.float32(0)
ema_counter = jax.device_put_replicated(ema_counter,
self.local_learner_devices)
init_state = TrainingState(
params=PPOParams(
model_params=initial_params, num_sgd_steps=params_num_sgd_steps),
opt_state=initial_opt_state,
random_key=key_state,
ema_counter=ema_counter,
)
if normalize_advantage:
biased_advantage_scale = jax.device_put_replicated(
jnp.zeros([]), self.local_learner_devices)
advantage_scale = jax.device_put_replicated(
jnp.zeros([]), self.local_learner_devices)
init_state = init_state._replace(
biased_advantage_scale=biased_advantage_scale,
advantage_scale=advantage_scale)
if normalize_value:
biased_value_first_moment = jax.device_put_replicated(
jnp.zeros([]), self.local_learner_devices)
value_mean = biased_value_first_moment
biased_value_second_moment = jax.device_put_replicated(
jnp.zeros([]), self.local_learner_devices)
value_second_moment = biased_value_second_moment
value_std = jnp.sqrt(jax.nn.relu(value_second_moment - value_mean**2))
init_state = init_state._replace(
biased_value_first_moment=biased_value_first_moment,
biased_value_second_moment=biased_value_second_moment,
value_mean=value_mean,
value_std=value_std)
if normalize_obs:
obs_norm_params = obs_normalization_fns.init() # pytype: disable=attribute-error
obs_norm_params = jax.device_put_replicated(obs_norm_params,
self.local_learner_devices)
init_state = init_state._replace(
obs_normalization_params=obs_norm_params)
return init_state
# Initialise training state (parameters and optimizer state).
self._state = make_initial_state(random_key)
self._cached_state = get_from_first_device(self._state, as_numpy=True)
def step(self):
"""Does a learner step and logs the results.
One learner step consists of (possibly multiple) epochs of PPO updates on
a batch of NxT steps collected by the actors.
"""
sample = next(self._iterator)
self._state, results = self._full_update_step(self._state, sample)
self._cached_state = get_from_first_device(self._state, as_numpy=True)
# Update our counts and record it.
counts = self._counter.increment(steps=self.num_epochs *
self.num_minibatches)
# Snapshot and attempt to write logs.
if self._num_full_update_steps % self.metrics_logging_period == 0:
results = jax.tree_util.tree_map(jnp.mean, results)
self._logger.write({**results, **counts})
self._num_full_update_steps += 1
def get_variables(self, names: List[str]) -> List[networks_lib.Params]:
variables = self._cached_state
return [getattr(variables, name) for name in names]
def save(self) -> TrainingState:
return self._cached_state
def restore(self, state: TrainingState):
# TODO(kamyar) Should the random_key come from self._state instead?
random_key = state.random_key
random_key = jax.random.split(
random_key, num=self.num_local_learner_devices)
random_key = jax.device_put_sharded(
[random_key[i] for i in range(self.num_local_learner_devices)],
self.local_learner_devices)
state = jax.device_put_replicated(state, self.local_learner_devices)
state = state._replace(random_key=random_key)
self._state = state
self._cached_state = get_from_first_device(self._state, as_numpy=True)
|
acme-master
|
acme/agents/jax/ppo/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for normalization."""
import dataclasses
from typing import Any, Callable, Generic, NamedTuple, Optional
from acme import adders
from acme import types
from acme.agents.jax import actor_core
from acme.agents.jax import actors
from acme.jax import networks as network_lib
from acme.jax import running_statistics
from acme.jax import utils
from acme.jax import variable_utils
import jax
import jax.numpy as jnp
NormalizationParams = Any
RunningStatisticsState = running_statistics.RunningStatisticsState
@dataclasses.dataclass
class NormalizationFns:
"""Holds pure functions for normalization.
Attributes:
init: A pure function: ``params = init()``
normalize: A pure function: ``norm_x = normalize(x, params)``
update: A pure function: ``params = update(params, x, pmap_axis_name)``
"""
# Returns the initial parameters for the normalization utility.
init: Callable[[], NormalizationParams]
# Returns the normalized input nested array.
normalize: Callable[[types.NestedArray, NormalizationParams],
types.NestedArray]
# Returns updates normalization parameters.
update: Callable[[NormalizationParams, types.NestedArray, Optional[str]],
NormalizationParams]
class NormalizedGenericActor(actors.GenericActor[actor_core.State,
actor_core.Extras],
Generic[actor_core.State, actor_core.Extras]):
"""A GenericActor that uses observation normalization."""
def __init__(self,
actor: actor_core.ActorCore[actor_core.State, actor_core.Extras],
normalization_fns: NormalizationFns,
random_key: network_lib.PRNGKey,
variable_client: Optional[variable_utils.VariableClient],
adder: Optional[adders.Adder] = None,
jit: bool = True,
backend: Optional[str] = 'cpu',
per_episode_update: bool = False):
"""Initializes a feed forward actor.
Args:
actor: actor core.
normalization_fns: Function that are used for normalizing observations.
random_key: Random key.
variable_client: The variable client to get policy and observation
normalization parameters from. The variable client should be defined to
provide [policy_params, obs_norm_params].
adder: An adder to add experiences to.
jit: Whether or not to jit the ActorCore and normalization functions.
backend: Which backend to use when jitting.
per_episode_update: if True, updates variable client params once at the
beginning of each episode
"""
super().__init__(actor, random_key, variable_client, adder, jit, backend,
per_episode_update)
if jit:
self._apply_normalization = jax.jit(
normalization_fns.normalize, backend=backend)
else:
self._apply_normalization = normalization_fns.normalize
def select_action(self,
observation: network_lib.Observation) -> types.NestedArray:
policy_params, obs_norm_params = tuple(self._params)
observation = self._apply_normalization(observation, obs_norm_params)
action, self._state = self._policy(policy_params, observation, self._state)
return utils.to_numpy(action)
class EMAMeanStdNormalizerParams(NamedTuple):
"""Using technique form Adam optimizer paper for computing running stats."""
ema_counter: jnp.int32
biased_first_moment: types.NestedArray
biased_second_moment: types.NestedArray
def build_ema_mean_std_normalizer(
nested_spec: types.NestedSpec,
tau: float = 0.995,
epsilon: float = 1e-6,) -> NormalizationFns:
"""Builds pure functions used for normalizing based on EMA mean and std.
The built normalizer functions can be used to normalize nested arrays that
have a structure corresponding to nested_spec. Currently only supports
nested_spec where all leafs have float dtype.
Arguments:
nested_spec: A nested spec where all leaves have float dtype
tau: tau parameter for exponential moving average
epsilon: epsilon for avoiding division by zero std
Returns:
NormalizationFns to be used for normalization
"""
nested_dims = jax.tree_util.tree_map(lambda x: len(x.shape), nested_spec)
def init() -> EMAMeanStdNormalizerParams:
first_moment = utils.zeros_like(nested_spec)
second_moment = utils.zeros_like(nested_spec)
return EMAMeanStdNormalizerParams(
ema_counter=jnp.int32(0),
biased_first_moment=first_moment,
biased_second_moment=second_moment,
)
def _normalize_leaf(
x: jnp.ndarray,
ema_counter: jnp.int32,
biased_first_moment: jnp.ndarray,
biased_second_moment: jnp.ndarray,
) -> jnp.ndarray:
zero_debias = 1. / (1. - jnp.power(tau, ema_counter))
mean = biased_first_moment * zero_debias
second_moment = biased_second_moment * zero_debias
std = jnp.sqrt(jax.nn.relu(second_moment - mean**2))
mean = jnp.broadcast_to(mean, x.shape)
std = jnp.broadcast_to(std, x.shape)
return (x - mean) / jnp.fmax(std, epsilon)
def _normalize(nested_array: types.NestedArray,
params: EMAMeanStdNormalizerParams) -> types.NestedArray:
ema_counter = params.ema_counter
normalized_nested_array = jax.tree_util.tree_map(
lambda x, f, s: _normalize_leaf(x, ema_counter, f, s),
nested_array,
params.biased_first_moment,
params.biased_second_moment)
return normalized_nested_array
def normalize(nested_array: types.NestedArray,
params: EMAMeanStdNormalizerParams) -> types.NestedArray:
ema_counter = params.ema_counter
norm_obs = jax.lax.cond(
ema_counter > 0,
_normalize,
lambda o, p: o,
nested_array, params)
return norm_obs
def _compute_first_moment(x: jnp.ndarray, ndim: int):
reduce_axes = tuple(range(len(x.shape) - ndim))
first_moment = jnp.mean(x, axis=reduce_axes)
return first_moment
def _compute_second_moment(x: jnp.ndarray, ndim: int):
reduce_axes = tuple(range(len(x.shape) - ndim))
second_moment = jnp.mean(x**2, axis=reduce_axes)
return second_moment
def update(
params: EMAMeanStdNormalizerParams,
nested_array: types.NestedArray,
pmap_axis_name: Optional[str] = None) -> EMAMeanStdNormalizerParams:
# compute the stats
first_moment = jax.tree_util.tree_map(
_compute_first_moment, nested_array, nested_dims)
second_moment = jax.tree_util.tree_map(
_compute_second_moment, nested_array, nested_dims)
# propagate across devices
if pmap_axis_name is not None:
first_moment, second_moment = jax.lax.pmean(
(first_moment, second_moment), axis_name=pmap_axis_name)
# update running statistics
new_first_moment = jax.tree_util.tree_map(
lambda x, y: tau * x + # pylint: disable=g-long-lambda
(1. - tau) * y,
params.biased_first_moment,
first_moment)
new_second_moment = jax.tree_util.tree_map(
lambda x, y: tau * x + # pylint: disable=g-long-lambda
(1. - tau) * y,
params.biased_second_moment,
second_moment)
# update ema_counter and return updated params
new_params = EMAMeanStdNormalizerParams(
ema_counter=params.ema_counter + 1,
biased_first_moment=new_first_moment,
biased_second_moment=new_second_moment,
)
return new_params
return NormalizationFns(
init=init,
normalize=normalize,
update=update,
)
def build_mean_std_normalizer(
nested_spec: types.NestedSpec,
max_abs_value: Optional[float] = None) -> NormalizationFns:
"""Builds pure functions used for normalizing based on mean and std.
Arguments:
nested_spec: A nested spec where all leaves have float dtype
max_abs_value: Normalized nested arrays will be clipped so that all values
will be between -max_abs_value and +max_abs_value. Setting to None
(default) does not perform this clipping.
Returns:
NormalizationFns to be used for normalization
"""
def init() -> RunningStatisticsState:
return running_statistics.init_state(nested_spec)
def normalize(
nested_array: types.NestedArray,
params: RunningStatisticsState) -> types.NestedArray:
return running_statistics.normalize(
nested_array, params, max_abs_value=max_abs_value)
def update(
params: RunningStatisticsState,
nested_array: types.NestedArray,
pmap_axis_name: Optional[str]) -> RunningStatisticsState:
return running_statistics.update(
params, nested_array, pmap_axis_name=pmap_axis_name)
return NormalizationFns(
init=init,
normalize=normalize,
update=update)
|
acme-master
|
acme/agents/jax/ppo/normalization.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config classes for CRR."""
import dataclasses
@dataclasses.dataclass
class CRRConfig:
"""Configuration options for CRR.
Attributes:
learning_rate: Learning rate.
discount: discount to use for TD updates.
target_update_period: period to update target's parameters.
use_sarsa_target: compute on-policy target using iterator's actions rather
than sampled actions.
Useful for 1-step offline RL (https://arxiv.org/pdf/2106.08909.pdf).
"""
learning_rate: float = 3e-4
discount: float = 0.99
target_update_period: int = 100
use_sarsa_target: bool = False
|
acme-master
|
acme/agents/jax/crr/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the Critic Regularized Regression (CRR) agent."""
from acme.agents.jax.crr.builder import CRRBuilder
from acme.agents.jax.crr.config import CRRConfig
from acme.agents.jax.crr.learning import CRRLearner
from acme.agents.jax.crr.losses import policy_loss_coeff_advantage_exp
from acme.agents.jax.crr.losses import policy_loss_coeff_advantage_indicator
from acme.agents.jax.crr.losses import policy_loss_coeff_constant
from acme.agents.jax.crr.losses import PolicyLossCoeff
from acme.agents.jax.crr.networks import CRRNetworks
from acme.agents.jax.crr.networks import make_networks
|
acme-master
|
acme/agents/jax/crr/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CRR Builder."""
from typing import Iterator, Optional
from acme import core
from acme import specs
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.crr import config as crr_config
from acme.agents.jax.crr import learning
from acme.agents.jax.crr import losses
from acme.agents.jax.crr import networks as crr_networks
from acme.jax import networks as networks_lib
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import optax
class CRRBuilder(builders.OfflineBuilder[crr_networks.CRRNetworks,
actor_core_lib.FeedForwardPolicy,
types.Transition]):
"""CRR Builder."""
def __init__(
self,
config: crr_config.CRRConfig,
policy_loss_coeff_fn: losses.PolicyLossCoeff,
):
"""Creates a CRR learner, an evaluation policy and an eval actor.
Args:
config: a config with CRR hps.
policy_loss_coeff_fn: set the loss function for the policy.
"""
self._config = config
self._policy_loss_coeff_fn = policy_loss_coeff_fn
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: crr_networks.CRRNetworks,
dataset: Iterator[types.Transition],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
*,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec
return learning.CRRLearner(
networks=networks,
random_key=random_key,
discount=self._config.discount,
target_update_period=self._config.target_update_period,
iterator=dataset,
policy_loss_coeff_fn=self._policy_loss_coeff_fn,
policy_optimizer=optax.adam(self._config.learning_rate),
critic_optimizer=optax.adam(self._config.learning_rate),
use_sarsa_target=self._config.use_sarsa_target,
logger=logger_fn('learner'),
counter=counter)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: actor_core_lib.FeedForwardPolicy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
) -> core.Actor:
del environment_spec
assert variable_source is not None
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(policy)
variable_client = variable_utils.VariableClient(
variable_source, 'policy', device='cpu')
return actors.GenericActor(
actor_core, random_key, variable_client, backend='cpu')
def make_policy(self, networks: crr_networks.CRRNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool) -> actor_core_lib.FeedForwardPolicy:
"""Construct the policy."""
del environment_spec, evaluation
def evaluation_policy(
params: networks_lib.Params, key: networks_lib.PRNGKey,
observation: networks_lib.Observation) -> networks_lib.Action:
dist_params = networks.policy_network.apply(params, observation)
return networks.sample_eval(dist_params, key)
return evaluation_policy
|
acme-master
|
acme/agents/jax/crr/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks definition for CRR."""
import dataclasses
from typing import Callable, Tuple
from acme import specs
from acme.jax import networks as networks_lib
from acme.jax import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
@dataclasses.dataclass
class CRRNetworks:
"""Network and pure functions for the CRR agent.."""
policy_network: networks_lib.FeedForwardNetwork
critic_network: networks_lib.FeedForwardNetwork
log_prob: networks_lib.LogProbFn
sample: networks_lib.SampleFn
sample_eval: networks_lib.SampleFn
def make_networks(
spec: specs.EnvironmentSpec,
policy_layer_sizes: Tuple[int, ...] = (256, 256),
critic_layer_sizes: Tuple[int, ...] = (256, 256),
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.relu,
) -> CRRNetworks:
"""Creates networks used by the agent."""
num_actions = np.prod(spec.actions.shape, dtype=int)
# Create dummy observations and actions to create network parameters.
dummy_action = utils.add_batch_dim(utils.zeros_like(spec.actions))
dummy_obs = utils.add_batch_dim(utils.zeros_like(spec.observations))
def _policy_fn(obs: jnp.ndarray) -> jnp.ndarray:
network = hk.Sequential([
hk.nets.MLP(
list(policy_layer_sizes),
w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
activation=activation,
activate_final=True),
networks_lib.NormalTanhDistribution(num_actions),
])
return network(obs)
policy = hk.without_apply_rng(hk.transform(_policy_fn))
policy_network = networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_obs), policy.apply)
def _critic_fn(obs, action):
network = hk.Sequential([
hk.nets.MLP(
list(critic_layer_sizes) + [1],
w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
activation=activation),
])
data = jnp.concatenate([obs, action], axis=-1)
return network(data)
critic = hk.without_apply_rng(hk.transform(_critic_fn))
critic_network = networks_lib.FeedForwardNetwork(
lambda key: critic.init(key, dummy_obs, dummy_action), critic.apply)
return CRRNetworks(
policy_network=policy_network,
critic_network=critic_network,
log_prob=lambda params, actions: params.log_prob(actions),
sample=lambda params, key: params.sample(seed=key),
sample_eval=lambda params, key: params.mode())
|
acme-master
|
acme/agents/jax/crr/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the CRR agent."""
from acme import specs
from acme.agents.jax import crr
from acme.testing import fakes
import jax
import optax
from absl.testing import absltest
from absl.testing import parameterized
class CRRTest(parameterized.TestCase):
@parameterized.named_parameters(
('exp', crr.policy_loss_coeff_advantage_exp),
('indicator', crr.policy_loss_coeff_advantage_indicator),
('all', crr.policy_loss_coeff_constant))
def test_train(self, policy_loss_coeff_fn):
seed = 0
num_iterations = 5
batch_size = 64
grad_updates_per_batch = 1
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(
episode_length=10, bounded=True, action_dim=6)
spec = specs.make_environment_spec(environment)
# Construct the learner.
networks = crr.make_networks(
spec, policy_layer_sizes=(8, 8), critic_layer_sizes=(8, 8))
key = jax.random.PRNGKey(seed)
dataset = fakes.transition_iterator(environment)
learner = crr.CRRLearner(
networks,
key,
discount=0.95,
target_update_period=2,
policy_loss_coeff_fn=policy_loss_coeff_fn,
iterator=dataset(batch_size * grad_updates_per_batch),
policy_optimizer=optax.adam(1e-4),
critic_optimizer=optax.adam(1e-4),
grad_updates_per_batch=grad_updates_per_batch)
# Train the learner.
for _ in range(num_iterations):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/crr/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss (weight) functions for CRR."""
from typing import Callable
from acme import types
from acme.agents.jax.crr.networks import CRRNetworks
from acme.jax import networks as networks_lib
import jax.numpy as jnp
PolicyLossCoeff = Callable[[
CRRNetworks,
networks_lib.Params,
networks_lib.Params,
types.Transition,
networks_lib.PRNGKey,
], jnp.ndarray]
def _compute_advantage(networks: CRRNetworks,
policy_params: networks_lib.Params,
critic_params: networks_lib.Params,
transition: types.Transition,
key: networks_lib.PRNGKey,
num_action_samples: int = 4) -> jnp.ndarray:
"""Returns the advantage for the transition."""
# Sample count actions.
replicated_observation = jnp.broadcast_to(transition.observation,
(num_action_samples,) +
transition.observation.shape)
dist_params = networks.policy_network.apply(policy_params,
replicated_observation)
actions = networks.sample(dist_params, key)
# Compute the state-action values for the sampled actions.
q_actions = networks.critic_network.apply(critic_params,
replicated_observation, actions)
# Take the mean as the state-value estimate. It is also possible to take the
# maximum, aka CRR(max); see table 1 in CRR paper.
q_estimate = jnp.mean(q_actions, axis=0)
# Compute the advantage.
q = networks.critic_network.apply(critic_params, transition.observation,
transition.action)
return q - q_estimate
def policy_loss_coeff_advantage_exp(
networks: CRRNetworks,
policy_params: networks_lib.Params,
critic_params: networks_lib.Params,
transition: types.Transition,
key: networks_lib.PRNGKey,
num_action_samples: int = 4,
beta: float = 1.0,
ratio_upper_bound: float = 20.0) -> jnp.ndarray:
"""Exponential advantage weigting; see equation (4) in CRR paper."""
advantage = _compute_advantage(networks, policy_params, critic_params,
transition, key, num_action_samples)
return jnp.minimum(jnp.exp(advantage / beta), ratio_upper_bound)
def policy_loss_coeff_advantage_indicator(
networks: CRRNetworks,
policy_params: networks_lib.Params,
critic_params: networks_lib.Params,
transition: types.Transition,
key: networks_lib.PRNGKey,
num_action_samples: int = 4) -> jnp.ndarray:
"""Indicator advantage weighting; see equation (3) in CRR paper."""
advantage = _compute_advantage(networks, policy_params, critic_params,
transition, key, num_action_samples)
return jnp.heaviside(advantage, 0.)
def policy_loss_coeff_constant(networks: CRRNetworks,
policy_params: networks_lib.Params,
critic_params: networks_lib.Params,
transition: types.Transition,
key: networks_lib.PRNGKey,
value: float = 1.0) -> jnp.ndarray:
"""Constant weights."""
del networks
del policy_params
del critic_params
del transition
del key
return value # pytype: disable=bad-return-type # jax-ndarray
|
acme-master
|
acme/agents/jax/crr/losses.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CRR learner implementation."""
import time
from typing import Dict, Iterator, List, NamedTuple, Optional, Tuple
import acme
from acme import types
from acme.agents.jax.crr.losses import PolicyLossCoeff
from acme.agents.jax.crr.networks import CRRNetworks
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
class TrainingState(NamedTuple):
"""Contains training state for the learner."""
policy_params: networks_lib.Params
target_policy_params: networks_lib.Params
critic_params: networks_lib.Params
target_critic_params: networks_lib.Params
policy_opt_state: optax.OptState
critic_opt_state: optax.OptState
steps: int
key: networks_lib.PRNGKey
class CRRLearner(acme.Learner):
"""Critic Regularized Regression (CRR) learner.
This is the learning component of a CRR agent as described in
https://arxiv.org/abs/2006.15134.
"""
_state: TrainingState
def __init__(self,
networks: CRRNetworks,
random_key: networks_lib.PRNGKey,
discount: float,
target_update_period: int,
policy_loss_coeff_fn: PolicyLossCoeff,
iterator: Iterator[types.Transition],
policy_optimizer: optax.GradientTransformation,
critic_optimizer: optax.GradientTransformation,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
grad_updates_per_batch: int = 1,
use_sarsa_target: bool = False):
"""Initializes the CRR learner.
Args:
networks: CRR networks.
random_key: a key for random number generation.
discount: discount to use for TD updates.
target_update_period: period to update target's parameters.
policy_loss_coeff_fn: set the loss function for the policy.
iterator: an iterator over training data.
policy_optimizer: the policy optimizer.
critic_optimizer: the Q-function optimizer.
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
grad_updates_per_batch: how many gradient updates given a sampled batch.
use_sarsa_target: compute on-policy target using iterator's actions rather
than sampled actions.
Useful for 1-step offline RL (https://arxiv.org/pdf/2106.08909.pdf).
When set to `True`, `target_policy_params` are unused.
"""
critic_network = networks.critic_network
policy_network = networks.policy_network
def policy_loss(
policy_params: networks_lib.Params,
critic_params: networks_lib.Params,
transition: types.Transition,
key: networks_lib.PRNGKey,
) -> jnp.ndarray:
# Compute the loss coefficients.
coeff = policy_loss_coeff_fn(networks, policy_params, critic_params,
transition, key)
coeff = jax.lax.stop_gradient(coeff)
# Return the weighted loss.
dist_params = policy_network.apply(policy_params, transition.observation)
logp_action = networks.log_prob(dist_params, transition.action)
# Make sure there is no broadcasting.
logp_action *= coeff.flatten()
assert len(logp_action.shape) == 1
return -jnp.mean(logp_action)
def critic_loss(
critic_params: networks_lib.Params,
target_policy_params: networks_lib.Params,
target_critic_params: networks_lib.Params,
transition: types.Transition,
key: networks_lib.PRNGKey,
):
# Sample the next action.
if use_sarsa_target:
# TODO(b/222674779): use N-steps Trajectories to get the next actions.
assert 'next_action' in transition.extras, (
'next actions should be given as extras for one step RL.')
next_action = transition.extras['next_action']
else:
next_dist_params = policy_network.apply(target_policy_params,
transition.next_observation)
next_action = networks.sample(next_dist_params, key)
# Calculate the value of the next state and action.
next_q = critic_network.apply(target_critic_params,
transition.next_observation, next_action)
target_q = transition.reward + transition.discount * discount * next_q
target_q = jax.lax.stop_gradient(target_q)
q = critic_network.apply(critic_params, transition.observation,
transition.action)
q_error = q - target_q
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
# TODO(sertan): Replace with a distributional critic. CRR paper states
# that this may perform better.
return 0.5 * jnp.mean(jnp.square(q_error))
policy_loss_and_grad = jax.value_and_grad(policy_loss)
critic_loss_and_grad = jax.value_and_grad(critic_loss)
def sgd_step(
state: TrainingState,
transitions: types.Transition,
) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:
key, key_policy, key_critic = jax.random.split(state.key, 3)
# Compute losses and their gradients.
policy_loss_value, policy_gradients = policy_loss_and_grad(
state.policy_params, state.critic_params, transitions, key_policy)
critic_loss_value, critic_gradients = critic_loss_and_grad(
state.critic_params, state.target_policy_params,
state.target_critic_params, transitions, key_critic)
# Get optimizer updates and state.
policy_updates, policy_opt_state = policy_optimizer.update(
policy_gradients, state.policy_opt_state)
critic_updates, critic_opt_state = critic_optimizer.update(
critic_gradients, state.critic_opt_state)
# Apply optimizer updates to parameters.
policy_params = optax.apply_updates(state.policy_params, policy_updates)
critic_params = optax.apply_updates(state.critic_params, critic_updates)
steps = state.steps + 1
# Periodically update target networks.
target_policy_params, target_critic_params = optax.periodic_update( # pytype: disable=wrong-arg-types # numpy-scalars
(policy_params, critic_params),
(state.target_policy_params, state.target_critic_params), steps,
target_update_period)
new_state = TrainingState(
policy_params=policy_params,
target_policy_params=target_policy_params,
critic_params=critic_params,
target_critic_params=target_critic_params,
policy_opt_state=policy_opt_state,
critic_opt_state=critic_opt_state,
steps=steps,
key=key,
)
metrics = {
'policy_loss': policy_loss_value,
'critic_loss': critic_loss_value,
}
return new_state, metrics
sgd_step = utils.process_multiple_batches(sgd_step, grad_updates_per_batch)
self._sgd_step = jax.jit(sgd_step)
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
steps_key=self._counter.get_steps_key())
# Create prefetching dataset iterator.
self._iterator = iterator
# Create the network parameters and copy into the target network parameters.
key, key_policy, key_critic = jax.random.split(random_key, 3)
initial_policy_params = policy_network.init(key_policy)
initial_critic_params = critic_network.init(key_critic)
initial_target_policy_params = initial_policy_params
initial_target_critic_params = initial_critic_params
# Initialize optimizers.
initial_policy_opt_state = policy_optimizer.init(initial_policy_params)
initial_critic_opt_state = critic_optimizer.init(initial_critic_params)
# Create initial state.
self._state = TrainingState(
policy_params=initial_policy_params,
target_policy_params=initial_target_policy_params,
critic_params=initial_critic_params,
target_critic_params=initial_target_critic_params,
policy_opt_state=initial_policy_opt_state,
critic_opt_state=initial_critic_opt_state,
steps=0,
key=key,
)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
def step(self):
transitions = next(self._iterator)
self._state, metrics = self._sgd_step(self._state, transitions)
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(steps=1, walltime=elapsed_time)
# Attempts to write the logs.
self._logger.write({**metrics, **counts})
def get_variables(self, names: List[str]) -> List[networks_lib.Params]:
# We only expose the variables for the learned policy and critic. The target
# policy and critic are internal details.
variables = {
'policy': self._state.target_policy_params,
'critic': self._state.target_critic_params,
}
return [variables[name] for name in names]
def save(self) -> TrainingState:
return self._state
def restore(self, state: TrainingState):
self._state = state
|
acme-master
|
acme/agents/jax/crr/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AIL config."""
import dataclasses
from typing import Optional
import optax
@dataclasses.dataclass
class AILConfig:
"""Configuration options for AIL.
Attributes:
direct_rl_batch_size: Batch size of a direct rl algorithm (measured in
transitions).
is_sequence_based: If True, a direct rl algorithm is using SequenceAdder
data format. Otherwise the learner assumes that the direct rl algorithm is
using NStepTransitionAdder.
share_iterator: If True, AIL will use the same iterator for the
discriminator network training as the direct rl algorithm.
num_sgd_steps_per_step: Only used if 'share_iterator' is False. Denotes how
many gradient updates perform per one learner step.
discriminator_batch_size: Batch size for training the discriminator.
policy_variable_name: The name of the policy variable to retrieve direct_rl
policy parameters.
discriminator_optimizer: Optimizer for the discriminator. If not specified
it is set to Adam with learning rate of 1e-5.
replay_table_name: The name of the reverb replay table to use.
prefetch_size: How many batches to prefetch
discount: Discount to use for TD updates
min_replay_size: Minimal size of replay buffer
max_replay_size: Maximal size of replay buffer
policy_to_expert_data_ratio: If not None, the direct RL learner will receive
expert transitions in the given proportions.
policy_to_expert_data_ratio + 1 must divide the direct RL batch size.
"""
direct_rl_batch_size: int
is_sequence_based: bool = False
share_iterator: bool = True
num_sgd_steps_per_step: int = 1
discriminator_batch_size: int = 256
policy_variable_name: Optional[str] = None
discriminator_optimizer: Optional[optax.GradientTransformation] = None
replay_table_name: str = 'ail_table'
prefetch_size: int = 4
discount: float = 0.99
min_replay_size: int = 1000
max_replay_size: int = int(1e6)
policy_to_expert_data_ratio: Optional[int] = None
def __post_init__(self):
assert self.direct_rl_batch_size % self.discriminator_batch_size == 0
def get_per_learner_step_batch_size(config: AILConfig) -> int:
"""Returns how many transitions should be sampled per direct learner step."""
# If the iterators are tied, the discriminator learning batch size has to
# match the direct RL one.
if config.share_iterator:
assert (config.direct_rl_batch_size % config.discriminator_batch_size) == 0
return config.direct_rl_batch_size
# Otherwise each iteration of the discriminator will sample a batch which will
# be split in num_sgd_steps_per_step batches, each of size
# discriminator_batch_size.
return config.discriminator_batch_size * config.num_sgd_steps_per_step
|
acme-master
|
acme/agents/jax/ail/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builder for DAC.
https://arxiv.org/pdf/1809.02925.pdf
"""
import dataclasses
from typing import Callable, Iterator
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import td3
from acme.agents.jax.ail import builder
from acme.agents.jax.ail import config as ail_config
from acme.agents.jax.ail import losses
@dataclasses.dataclass
class DACConfig:
"""Configuration options specific to DAC.
Attributes:
ail_config: AIL config.
td3_config: TD3 config.
entropy_coefficient: Entropy coefficient of the discriminator loss.
gradient_penalty_coefficient: Coefficient for the gradient penalty term in
the discriminator loss.
"""
ail_config: ail_config.AILConfig
td3_config: td3.TD3Config
entropy_coefficient: float = 1e-3
gradient_penalty_coefficient: float = 10.
class DACBuilder(builder.AILBuilder[td3.TD3Networks,
actor_core_lib.FeedForwardPolicy]):
"""DAC Builder."""
def __init__(self, config: DACConfig,
make_demonstrations: Callable[[int],
Iterator[types.Transition]]):
td3_builder = td3.TD3Builder(config.td3_config)
dac_loss = losses.add_gradient_penalty(
losses.gail_loss(entropy_coefficient=config.entropy_coefficient),
gradient_penalty_coefficient=config.gradient_penalty_coefficient,
gradient_penalty_target=1.)
super().__init__(
td3_builder,
config=config.ail_config,
discriminator_loss=dac_loss,
make_demonstrations=make_demonstrations)
|
acme-master
|
acme/agents/jax/ail/dac.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the AIL learner."""
import functools
from acme import specs
from acme import types
from acme.agents.jax.ail import learning as ail_learning
from acme.agents.jax.ail import losses
from acme.agents.jax.ail import networks as ail_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
import haiku as hk
import jax
import numpy as np
import optax
from absl.testing import absltest
def _make_discriminator(spec):
def discriminator(*args, **kwargs) -> networks_lib.Logits:
return ail_networks.DiscriminatorModule(
environment_spec=spec,
use_action=False,
use_next_obs=False,
network_core=ail_networks.DiscriminatorMLP([]))(*args, **kwargs)
discriminator_transformed = hk.without_apply_rng(
hk.transform_with_state(discriminator))
return ail_networks.make_discriminator(
environment_spec=spec,
discriminator_transformed=discriminator_transformed)
class AilLearnerTest(absltest.TestCase):
def test_step(self):
simple_spec = specs.Array(shape=(), dtype=float)
spec = specs.EnvironmentSpec(simple_spec, simple_spec, simple_spec,
simple_spec)
discriminator = _make_discriminator(spec)
ail_network = ail_networks.AILNetworks(
discriminator, imitation_reward_fn=lambda x: x, direct_rl_networks=None)
loss = losses.gail_loss()
optimizer = optax.adam(.01)
step = jax.jit(functools.partial(
ail_learning.ail_update_step,
optimizer=optimizer,
ail_network=ail_network,
loss_fn=loss))
zero_transition = types.Transition(
np.array([0.]), np.array([0.]), 0., 0., np.array([0.]))
zero_transition = utils.add_batch_dim(zero_transition)
one_transition = types.Transition(
np.array([1.]), np.array([0.]), 0., 0., np.array([0.]))
one_transition = utils.add_batch_dim(one_transition)
key = jax.random.PRNGKey(0)
discriminator_params, discriminator_state = discriminator.init(key)
state = ail_learning.DiscriminatorTrainingState(
optimizer_state=optimizer.init(discriminator_params),
discriminator_params=discriminator_params,
discriminator_state=discriminator_state,
policy_params=None,
key=key,
steps=0,
)
expected_loss = [1.062, 1.057, 1.052]
for i in range(3):
state, loss = step(state, (one_transition, zero_transition))
self.assertAlmostEqual(loss['total_loss'], expected_loss[i], places=3)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/ail/learning_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a AIL agent."""
from acme.agents.jax.ail import losses
from acme.agents.jax.ail import rewards
from acme.agents.jax.ail.builder import AILBuilder
from acme.agents.jax.ail.config import AILConfig
from acme.agents.jax.ail.dac import DACBuilder
from acme.agents.jax.ail.dac import DACConfig
from acme.agents.jax.ail.gail import GAILBuilder
from acme.agents.jax.ail.gail import GAILConfig
from acme.agents.jax.ail.learning import AILLearner
from acme.agents.jax.ail.networks import AILNetworks
from acme.agents.jax.ail.networks import AIRLModule
from acme.agents.jax.ail.networks import compute_ail_reward
from acme.agents.jax.ail.networks import DiscriminatorMLP
from acme.agents.jax.ail.networks import DiscriminatorModule
from acme.agents.jax.ail.networks import make_discriminator
|
acme-master
|
acme/agents/jax/ail/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adversarial Imitation Learning (AIL) Builder."""
import functools
import itertools
from typing import Callable, Generic, Iterator, List, Optional, Tuple
from acme import adders
from acme import core
from acme import specs
from acme import types
from acme.adders import reverb as adders_reverb
from acme.agents.jax import builders
from acme.agents.jax.ail import config as ail_config
from acme.agents.jax.ail import learning
from acme.agents.jax.ail import losses
from acme.agents.jax.ail import networks as ail_networks
from acme.datasets import reverb as datasets
from acme.jax import types as jax_types
from acme.jax import utils
from acme.jax.imitation_learning_types import DirectPolicyNetwork
from acme.utils import counting
from acme.utils import loggers
from acme.utils import reverb_utils
import jax
import numpy as np
import optax
import reverb
from reverb import rate_limiters
import tree
def _split_transitions(
transitions: types.Transition,
index: int) -> Tuple[types.Transition, types.Transition]:
"""Splits the given transition on the first axis at the given index.
Args:
transitions: Transitions to split.
index: Spliting index.
Returns:
A pair of transitions, the first containing elements before the index
(exclusive) and the second after the index (inclusive)
"""
return (tree.map_structure(lambda x: x[:index], transitions),
tree.map_structure(lambda x: x[index:], transitions))
def _rebatch(iterator: Iterator[types.Transition],
batch_size: int) -> Iterator[types.Transition]:
"""Rebatch the itererator with the given batch size.
Args:
iterator: Iterator to rebatch.
batch_size: New batch size.
Yields:
Transitions with the new batch size.
"""
data = next(iterator)
while True:
while len(data.reward) < batch_size:
# Ensure we can get enough demonstrations.
next_data = next(iterator)
data = tree.map_structure(lambda *args: np.concatenate(list(args)), data,
next_data)
output, data = _split_transitions(data, batch_size)
yield output
def _mix_arrays(
replay: np.ndarray,
demo: np.ndarray,
index: int,
seed: int) -> np.ndarray:
"""Mixes `replay` and `demo`.
Args:
replay: Replay data to mix. Only index element will be selected.
demo: Demonstration data to mix.
index: Amount of replay data we should include.
seed: RNG seed.
Returns:
An array with replay elements up to 'index' and all the demos.
"""
# We're throwing away some replay data here. We have to if we want to make
# sure the output info field is correct.
output = np.concatenate((replay[:index], demo))
return np.random.default_rng(seed=seed).permutation(output)
def _generate_samples_with_demonstrations(
demonstration_iterator: Iterator[types.Transition],
replay_iterator: Iterator[reverb.ReplaySample],
policy_to_expert_data_ratio: int,
batch_size) -> Iterator[reverb.ReplaySample]:
"""Generator which creates the sample having demonstrations in them.
It takes the demonstrations and replay iterators and generates batches with
same size as the replay iterator, such that each batches have the ratio of
policy and expert data specified in policy_to_expert_data_ratio on average.
There is no constraints on how the demonstrations and replay samples should be
batched.
Args:
demonstration_iterator: Iterator of demonstrations.
replay_iterator: Replay buffer sample iterator.
policy_to_expert_data_ratio: Amount of policy transitions for 1 expert
transitions.
batch_size: Output batch size, which should match the replay batch size.
Yields:
Samples having a mix of demonstrations and policy data. The info will match
the current replay sample info and the batch size will be the same as the
replay_iterator data batch size.
"""
count = 0
if batch_size % (policy_to_expert_data_ratio + 1) != 0:
raise ValueError(
'policy_to_expert_data_ratio + 1 must divide the batch size but '
f'{batch_size} % {policy_to_expert_data_ratio+1} !=0')
demo_insertion_size = batch_size // (policy_to_expert_data_ratio + 1)
policy_insertion_size = batch_size - demo_insertion_size
demonstration_iterator = _rebatch(demonstration_iterator, demo_insertion_size)
for sample, demos in zip(replay_iterator, demonstration_iterator):
output_transitions = tree.map_structure(
functools.partial(_mix_arrays,
index=policy_insertion_size,
seed=count),
sample.data, demos)
count += 1
yield reverb.ReplaySample(info=sample.info, data=output_transitions)
class AILBuilder(builders.ActorLearnerBuilder[ail_networks.AILNetworks,
DirectPolicyNetwork,
learning.AILSample],
Generic[ail_networks.DirectRLNetworks, DirectPolicyNetwork]):
"""AIL Builder."""
def __init__(
self,
rl_agent: builders.ActorLearnerBuilder[ail_networks.DirectRLNetworks,
DirectPolicyNetwork,
reverb.ReplaySample],
config: ail_config.AILConfig, discriminator_loss: losses.Loss,
make_demonstrations: Callable[[int], Iterator[types.Transition]]):
"""Implements a builder for AIL using rl_agent as forward RL algorithm.
Args:
rl_agent: The standard RL agent used by AIL to optimize the generator.
config: a AIL config
discriminator_loss: The loss function for the discriminator to minimize.
make_demonstrations: A function that returns an iterator with
demonstrations to be imitated.
"""
self._rl_agent = rl_agent
self._config = config
self._discriminator_loss = discriminator_loss
self._make_demonstrations = make_demonstrations
def make_learner(self,
random_key: jax_types.PRNGKey,
networks: ail_networks.AILNetworks,
dataset: Iterator[learning.AILSample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None) -> core.Learner:
counter = counter or counting.Counter()
direct_rl_counter = counting.Counter(counter, 'direct_rl')
batch_size_per_learner_step = ail_config.get_per_learner_step_batch_size(
self._config)
direct_rl_learner_key, discriminator_key = jax.random.split(random_key)
direct_rl_learner = functools.partial(
self._rl_agent.make_learner,
direct_rl_learner_key,
networks.direct_rl_networks,
logger_fn=logger_fn,
environment_spec=environment_spec,
replay_client=replay_client,
counter=direct_rl_counter)
discriminator_optimizer = (
self._config.discriminator_optimizer or optax.adam(1e-5))
return learning.AILLearner(
counter,
direct_rl_learner_factory=direct_rl_learner,
loss_fn=self._discriminator_loss,
iterator=dataset,
discriminator_optimizer=discriminator_optimizer,
ail_network=networks,
discriminator_key=discriminator_key,
is_sequence_based=self._config.is_sequence_based,
num_sgd_steps_per_step=batch_size_per_learner_step //
self._config.discriminator_batch_size,
policy_variable_name=self._config.policy_variable_name,
logger=logger_fn('learner', steps_key=counter.get_steps_key()))
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: DirectPolicyNetwork,
) -> List[reverb.Table]:
replay_tables = self._rl_agent.make_replay_tables(environment_spec, policy)
if self._config.share_iterator:
return replay_tables
replay_tables.append(
reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=rate_limiters.MinSize(self._config.min_replay_size),
signature=adders_reverb.NStepTransitionAdder.signature(
environment_spec)))
return replay_tables
# This function does not expose all the iterators used by the learner when
# share_iterator is False, making further wrapping impossible.
# TODO(eorsini): Expose all iterators.
# Currently GAIL uses 3 iterators, instead we can make it use a single
# iterator and return this one here. The way to achieve this would be:
# * Create the 3 iterators here.
# * zip them and return them here.
# * upzip them in the learner (this step will not be necessary once we move to
# stateless learners)
# This should work fine as the 3 iterators are always iterated in parallel
# (i.e. at every step we call next once on each of them).
def make_dataset_iterator(
self, replay_client: reverb.Client) -> Iterator[learning.AILSample]:
batch_size_per_learner_step = ail_config.get_per_learner_step_batch_size(
self._config)
iterator_demonstration = self._make_demonstrations(
batch_size_per_learner_step)
direct_iterator = self._rl_agent.make_dataset_iterator(replay_client)
if self._config.share_iterator:
# In order to reuse the iterator return values and not lose a 2x factor on
# sample efficiency, we need to use itertools.tee().
discriminator_iterator, direct_iterator = itertools.tee(direct_iterator)
else:
discriminator_iterator = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=replay_client.server_address,
batch_size=ail_config.get_per_learner_step_batch_size(self._config),
prefetch_size=self._config.prefetch_size).as_numpy_iterator()
if self._config.policy_to_expert_data_ratio is not None:
iterator_demonstration, iterator_demonstration2 = itertools.tee(
iterator_demonstration)
direct_iterator = _generate_samples_with_demonstrations(
iterator_demonstration2, direct_iterator,
self._config.policy_to_expert_data_ratio,
self._config.direct_rl_batch_size)
is_sequence_based = self._config.is_sequence_based
# Don't flatten the discriminator batch if the iterator is not shared.
process_discriminator_sample = functools.partial(
reverb_utils.replay_sample_to_sars_transition,
is_sequence=is_sequence_based and self._config.share_iterator,
flatten_batch=is_sequence_based and self._config.share_iterator,
strip_last_transition=is_sequence_based and self._config.share_iterator)
discriminator_iterator = (
# Remove the extras to have the same nested structure as demonstrations.
process_discriminator_sample(sample)._replace(extras=())
for sample in discriminator_iterator)
return utils.device_put((learning.AILSample(*sample) for sample in zip(
discriminator_iterator, direct_iterator, iterator_demonstration)),
jax.devices()[0])
def make_adder(
self, replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[DirectPolicyNetwork]) -> Optional[adders.Adder]:
direct_rl_adder = self._rl_agent.make_adder(replay_client, environment_spec,
policy)
if self._config.share_iterator:
return direct_rl_adder
ail_adder = adders_reverb.NStepTransitionAdder(
priority_fns={self._config.replay_table_name: None},
client=replay_client,
n_step=1,
discount=self._config.discount)
# Some direct rl algorithms (such as PPO), might be passing extra data
# which we won't be able to process here properly, so we need to ignore them
return adders.ForkingAdder(
[adders.IgnoreExtrasAdder(ail_adder), direct_rl_adder])
def make_actor(
self,
random_key: jax_types.PRNGKey,
policy: DirectPolicyNetwork,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
return self._rl_agent.make_actor(random_key, policy, environment_spec,
variable_source, adder)
def make_policy(self,
networks: ail_networks.AILNetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> DirectPolicyNetwork:
return self._rl_agent.make_policy(networks.direct_rl_networks,
environment_spec, evaluation)
|
acme-master
|
acme/agents/jax/ail/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks definitions for the BC agent.
AIRL network architecture follows https://arxiv.org/pdf/1710.11248.pdf.
"""
import dataclasses
import functools
from typing import Any, Callable, Generic, Iterable, Optional
from acme import specs
from acme import types
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax.imitation_learning_types import DirectRLNetworks
import haiku as hk
import jax
from jax import numpy as jnp
import numpy as np
# Function from discriminator logit to imitation reward.
ImitationRewardFn = Callable[[networks_lib.Logits], jnp.ndarray]
State = networks_lib.Params
@dataclasses.dataclass
class AILNetworks(Generic[DirectRLNetworks]):
"""AIL networks data class.
Attributes:
discriminator_network: Networks which takes as input:
(observations, actions, next_observations, direct_rl_params)
to return the logit of the discriminator.
If the discriminator does not need direct_rl_params you can pass ().
imitation_reward_fn: Function from logit of the discriminator to imitation
reward.
direct_rl_networks: Networks of the direct RL algorithm.
"""
discriminator_network: networks_lib.FeedForwardNetwork
imitation_reward_fn: ImitationRewardFn
direct_rl_networks: DirectRLNetworks
def compute_ail_reward(discriminator_params: networks_lib.Params,
discriminator_state: State,
policy_params: Optional[networks_lib.Params],
transitions: types.Transition,
networks: AILNetworks) -> jnp.ndarray:
"""Computes the AIL reward for a given transition.
Args:
discriminator_params: Parameters of the discriminator network.
discriminator_state: State of the discriminator network.
policy_params: Parameters of the direct RL policy.
transitions: Transitions to compute the reward for.
networks: AIL networks.
Returns:
The rewards as an ndarray.
"""
logits, _ = networks.discriminator_network.apply(
discriminator_params,
policy_params,
discriminator_state,
transitions,
is_training=False,
rng=None)
return networks.imitation_reward_fn(logits)
class SpectralNormalizedLinear(hk.Module):
"""SpectralNormalizedLinear module.
This is a Linear layer with a upper-bounded Lipschitz. It is used in iResNet.
Reference:
Behrmann et al. Invertible Residual Networks. ICML 2019.
https://arxiv.org/pdf/1811.00995.pdf
"""
def __init__(
self,
output_size: int,
lipschitz_coeff: float,
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None,
):
"""Constructs the SpectralNormalizedLinear module.
Args:
output_size: Output dimensionality.
lipschitz_coeff: Spectral normalization coefficient.
with_bias: Whether to add a bias to the output.
w_init: Optional initializer for weights. By default, uses random values
from truncated normal, with stddev ``1 / sqrt(fan_in)``. See
https://arxiv.org/abs/1502.03167v3.
b_init: Optional initializer for bias. By default, zero.
name: Name of the module.
"""
super().__init__(name=name)
self.input_size = None
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.b_init = b_init or jnp.zeros
self.lipschitz_coeff = lipschitz_coeff
self.num_iterations = 100
self.eps = 1e-6
def get_normalized_weights(self,
weights: jnp.ndarray,
renormalize: bool = False) -> jnp.ndarray:
def _l2_normalize(x, axis=None, eps=1e-12):
return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps)
output_size = self.output_size
dtype = weights.dtype
assert output_size == weights.shape[-1]
sigma = hk.get_state('sigma', (), init=jnp.ones)
if renormalize:
# Power iterations to compute spectral norm V*W*U^T.
u = hk.get_state(
'u', (1, output_size), dtype, init=hk.initializers.RandomNormal())
for _ in range(self.num_iterations):
v = _l2_normalize(jnp.matmul(u, weights.transpose()), eps=self.eps)
u = _l2_normalize(jnp.matmul(v, weights), eps=self.eps)
u = jax.lax.stop_gradient(u)
v = jax.lax.stop_gradient(v)
sigma = jnp.matmul(jnp.matmul(v, weights), jnp.transpose(u))[0, 0]
hk.set_state('u', u)
hk.set_state('v', v)
hk.set_state('sigma', sigma)
factor = jnp.maximum(1, sigma / self.lipschitz_coeff)
return weights / factor
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Computes a linear transform of the input."""
if not inputs.shape:
raise ValueError('Input must not be scalar.')
input_size = self.input_size = inputs.shape[-1]
output_size = self.output_size
dtype = inputs.dtype
w_init = self.w_init
if w_init is None:
stddev = 1. / np.sqrt(self.input_size)
w_init = hk.initializers.TruncatedNormal(stddev=stddev)
w = hk.get_parameter('w', [input_size, output_size], dtype, init=w_init)
w = self.get_normalized_weights(w, renormalize=True)
out = jnp.dot(inputs, w)
if self.with_bias:
b = hk.get_parameter('b', [self.output_size], dtype, init=self.b_init)
b = jnp.broadcast_to(b, out.shape)
out = out + b
return out
class DiscriminatorMLP(hk.Module):
"""A multi-layer perceptron module."""
def __init__(
self,
hidden_layer_sizes: Iterable[int],
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
with_bias: bool = True,
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.relu,
input_dropout_rate: float = 0.,
hidden_dropout_rate: float = 0.,
spectral_normalization_lipschitz_coeff: Optional[float] = None,
name: Optional[str] = None
):
"""Constructs an MLP.
Args:
hidden_layer_sizes: Hiddent layer sizes.
w_init: Initializer for :class:`~haiku.Linear` weights.
b_init: Initializer for :class:`~haiku.Linear` bias. Must be ``None`` if
``with_bias=False``.
with_bias: Whether or not to apply a bias in each layer.
activation: Activation function to apply between :class:`~haiku.Linear`
layers. Defaults to ReLU.
input_dropout_rate: Dropout on the input.
hidden_dropout_rate: Dropout on the hidden layer outputs.
spectral_normalization_lipschitz_coeff: If not None, the network will have
spectral normalization with the given constant.
name: Optional name for this module.
Raises:
ValueError: If ``with_bias`` is ``False`` and ``b_init`` is not ``None``.
"""
if not with_bias and b_init is not None:
raise ValueError('When with_bias=False b_init must not be set.')
super().__init__(name=name)
self._activation = activation
self._input_dropout_rate = input_dropout_rate
self._hidden_dropout_rate = hidden_dropout_rate
layer_sizes = list(hidden_layer_sizes) + [1]
if spectral_normalization_lipschitz_coeff is not None:
layer_lipschitz_coeff = np.power(spectral_normalization_lipschitz_coeff,
1. / len(layer_sizes))
layer_module = functools.partial(
SpectralNormalizedLinear,
lipschitz_coeff=layer_lipschitz_coeff,
w_init=w_init,
b_init=b_init,
with_bias=with_bias)
else:
layer_module = functools.partial(
hk.Linear,
w_init=w_init,
b_init=b_init,
with_bias=with_bias)
layers = []
for index, output_size in enumerate(layer_sizes):
layers.append(
layer_module(output_size=output_size, name=f'linear_{index}'))
self._layers = tuple(layers)
def __call__(
self,
inputs: jnp.ndarray,
is_training: bool,
rng: Optional[networks_lib.PRNGKey],
) -> networks_lib.Logits:
rng = hk.PRNGSequence(rng) if rng is not None else None
out = inputs
for i, layer in enumerate(self._layers):
if is_training:
dropout_rate = (
self._input_dropout_rate if i == 0 else self._hidden_dropout_rate)
out = hk.dropout(next(rng), dropout_rate, out)
out = layer(out)
if i < len(self._layers) - 1:
out = self._activation(out)
return out
class DiscriminatorModule(hk.Module):
"""Discriminator module that concatenates its inputs."""
def __init__(self,
environment_spec: specs.EnvironmentSpec,
use_action: bool,
use_next_obs: bool,
network_core: Callable[..., Any],
observation_embedding: Callable[[networks_lib.Observation],
jnp.ndarray] = lambda x: x,
name='discriminator'):
super().__init__(name=name)
self._use_action = use_action
self._environment_spec = environment_spec
self._use_next_obs = use_next_obs
self._network_core = network_core
self._observation_embedding = observation_embedding
def __call__(self, observations: networks_lib.Observation,
actions: networks_lib.Action,
next_observations: networks_lib.Observation, is_training: bool,
rng: networks_lib.PRNGKey) -> networks_lib.Logits:
observations = self._observation_embedding(observations)
if self._use_next_obs:
next_observations = self._observation_embedding(next_observations)
data = jnp.concatenate([observations, next_observations], axis=-1)
else:
data = observations
if self._use_action:
action_spec = self._environment_spec.actions
if isinstance(action_spec, specs.DiscreteArray):
actions = jax.nn.one_hot(actions,
action_spec.num_values)
data = jnp.concatenate([data, actions], axis=-1)
output = self._network_core(data, is_training, rng)
output = jnp.squeeze(output, axis=-1)
return output
class AIRLModule(hk.Module):
"""AIRL Module."""
def __init__(self,
environment_spec: specs.EnvironmentSpec,
use_action: bool,
use_next_obs: bool,
discount: float,
g_core: Callable[..., Any],
h_core: Callable[..., Any],
observation_embedding: Callable[[networks_lib.Observation],
jnp.ndarray] = lambda x: x,
name='airl'):
super().__init__(name=name)
self._environment_spec = environment_spec
self._use_action = use_action
self._use_next_obs = use_next_obs
self._discount = discount
self._g_core = g_core
self._h_core = h_core
self._observation_embedding = observation_embedding
def __call__(self, observations: networks_lib.Observation,
actions: networks_lib.Action,
next_observations: networks_lib.Observation,
is_training: bool,
rng: networks_lib.PRNGKey) -> networks_lib.Logits:
g_output = DiscriminatorModule(
environment_spec=self._environment_spec,
use_action=self._use_action,
use_next_obs=self._use_next_obs,
network_core=self._g_core,
observation_embedding=self._observation_embedding,
name='airl_g')(observations, actions, next_observations, is_training,
rng)
h_module = DiscriminatorModule(
environment_spec=self._environment_spec,
use_action=False,
use_next_obs=False,
network_core=self._h_core,
observation_embedding=self._observation_embedding,
name='airl_h')
return (g_output + self._discount * h_module(next_observations, (),
(), is_training, rng) -
h_module(observations, (), (), is_training, rng))
# TODO(eorsini): Manipulate FeedForwardNetworks instead of transforms to
# increase compatibility with Flax.
def make_discriminator(
environment_spec: specs.EnvironmentSpec,
discriminator_transformed: hk.TransformedWithState,
logpi_fn: Optional[Callable[
[networks_lib.Params, networks_lib.Observation, networks_lib.Action],
jnp.ndarray]] = None
) -> networks_lib.FeedForwardNetwork:
"""Creates the discriminator network.
Args:
environment_spec: Environment spec
discriminator_transformed: Haiku transformed of the discriminator.
logpi_fn: If the policy logpi function is provided, its output will be
removed from the discriminator logit.
Returns:
The network.
"""
def apply_fn(params: hk.Params,
policy_params: networks_lib.Params,
state: hk.State,
transitions: types.Transition,
is_training: bool,
rng: networks_lib.PRNGKey) -> networks_lib.Logits:
output, state = discriminator_transformed.apply(
params, state, transitions.observation, transitions.action,
transitions.next_observation, is_training, rng)
if logpi_fn is not None:
logpi = logpi_fn(policy_params, transitions.observation,
transitions.action)
# Quick Maths:
# D = exp(output)/(exp(output) + pi(a|s))
# logit(D) = log(D/(1-D)) = log(exp(output)/pi(a|s))
# logit(D) = output - logpi
return output - logpi, state # pytype: disable=bad-return-type # jax-ndarray
return output, state # pytype: disable=bad-return-type # jax-ndarray
dummy_obs = utils.zeros_like(environment_spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
dummy_actions = utils.zeros_like(environment_spec.actions)
dummy_actions = utils.add_batch_dim(dummy_actions)
return networks_lib.FeedForwardNetwork(
# pylint: disable=g-long-lambda
init=lambda rng: discriminator_transformed.init(
rng, dummy_obs, dummy_actions, dummy_obs, False, rng),
apply=apply_fn)
|
acme-master
|
acme/agents/jax/ail/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AIL logits to AIL reward."""
from typing import Optional
from acme.agents.jax.ail import networks as ail_networks
from acme.jax import networks as networks_lib
import jax
import jax.numpy as jnp
def fairl_reward(
max_reward_magnitude: Optional[float] = None
) -> ail_networks.ImitationRewardFn:
"""The FAIRL reward function (https://arxiv.org/pdf/1911.02256.pdf).
Args:
max_reward_magnitude: Clipping value for the reward.
Returns:
The function from logit to imitation reward.
"""
def imitation_reward(logits: networks_lib.Logits) -> float:
rewards = jnp.exp(jnp.clip(logits, a_max=20.)) * -logits
if max_reward_magnitude is not None:
# pylint: disable=invalid-unary-operand-type
rewards = jnp.clip(
rewards, a_min=-max_reward_magnitude, a_max=max_reward_magnitude)
return rewards # pytype: disable=bad-return-type # jax-types
return imitation_reward # pytype: disable=bad-return-type # jax-ndarray
def gail_reward(
reward_balance: float = .5,
max_reward_magnitude: Optional[float] = None
) -> ail_networks.ImitationRewardFn:
"""GAIL reward function (https://arxiv.org/pdf/1606.03476.pdf).
Args:
reward_balance: 1 means log(D) reward, 0 means -log(1-D) and other values
mean an average of the two.
max_reward_magnitude: Clipping value for the reward.
Returns:
The function from logit to imitation reward.
"""
def imitation_reward(logits: networks_lib.Logits) -> float:
# Quick Maths:
# logits = ln(D) - ln(1-D)
# -softplus(-logits) = ln(D)
# softplus(logits) = -ln(1-D)
rewards = (
reward_balance * -jax.nn.softplus(-logits) +
(1 - reward_balance) * jax.nn.softplus(logits))
if max_reward_magnitude is not None:
# pylint: disable=invalid-unary-operand-type
rewards = jnp.clip(
rewards, a_min=-max_reward_magnitude, a_max=max_reward_magnitude)
return rewards
return imitation_reward # pytype: disable=bad-return-type # jax-ndarray
|
acme-master
|
acme/agents/jax/ail/rewards.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AIL discriminator losses."""
import functools
from typing import Callable, Dict, Optional, Tuple
from acme import types
from acme.jax import networks as networks_lib
import jax
import jax.numpy as jnp
import tensorflow_probability as tfp
import tree
tfp = tfp.experimental.substrates.jax
tfd = tfp.distributions
# The loss is a function taking the discriminator, its state, the demo
# transition and the replay buffer transitions.
# It returns the loss as a float and a debug dictionary with the new state.
State = networks_lib.Params
DiscriminatorOutput = Tuple[networks_lib.Logits, State]
DiscriminatorFn = Callable[[State, types.Transition], DiscriminatorOutput]
Metrics = Dict[str, float]
LossOutput = Tuple[float, Tuple[Metrics, State]]
Loss = Callable[[
DiscriminatorFn, State, types.Transition, types.Transition, networks_lib
.PRNGKey
], LossOutput]
def _binary_cross_entropy_loss(logit: jnp.ndarray,
label: jnp.ndarray) -> jnp.ndarray:
return label * jax.nn.softplus(-logit) + (1 - label) * jax.nn.softplus(logit)
@jax.vmap
def _weighted_average(x: jnp.ndarray, y: jnp.ndarray,
lambdas: jnp.ndarray) -> jnp.ndarray:
return lambdas * x + (1. - lambdas) * y
def _label_data(
rb_transitions: types.Transition,
demonstration_transitions: types.Transition, mixup_alpha: Optional[float],
key: networks_lib.PRNGKey) -> Tuple[types.Transition, jnp.ndarray]:
"""Create a tuple data, labels by concatenating the rb and dem transitions."""
data = tree.map_structure(lambda x, y: jnp.concatenate([x, y]),
rb_transitions, demonstration_transitions)
labels = jnp.concatenate([
jnp.zeros(rb_transitions.reward.shape),
jnp.ones(demonstration_transitions.reward.shape)
])
if mixup_alpha is not None:
lambda_key, mixup_key = jax.random.split(key)
lambdas = tfd.Beta(mixup_alpha, mixup_alpha).sample(
len(labels), seed=lambda_key)
shuffled_data = tree.map_structure(
lambda x: jax.random.permutation(key=mixup_key, x=x), data)
shuffled_labels = jax.random.permutation(key=mixup_key, x=labels)
data = tree.map_structure(lambda x, y: _weighted_average(x, y, lambdas),
data, shuffled_data)
labels = _weighted_average(labels, shuffled_labels, lambdas)
return data, labels
def _logit_bernoulli_entropy(logits: networks_lib.Logits) -> jnp.ndarray:
return (1. - jax.nn.sigmoid(logits)) * logits - jax.nn.log_sigmoid(logits)
def gail_loss(entropy_coefficient: float = 0.,
mixup_alpha: Optional[float] = None) -> Loss:
"""Computes the standard GAIL loss."""
def loss_fn(
discriminator_fn: DiscriminatorFn,
discriminator_state: State,
demo_transitions: types.Transition, rb_transitions: types.Transition,
rng_key: networks_lib.PRNGKey) -> LossOutput:
data, labels = _label_data(
rb_transitions=rb_transitions,
demonstration_transitions=demo_transitions,
mixup_alpha=mixup_alpha,
key=rng_key)
logits, discriminator_state = discriminator_fn(discriminator_state, data)
classification_loss = jnp.mean(_binary_cross_entropy_loss(logits, labels))
entropy = jnp.mean(_logit_bernoulli_entropy(logits))
entropy_loss = -entropy_coefficient * entropy
total_loss = classification_loss + entropy_loss
metrics = {
'total_loss': total_loss,
'entropy_loss': entropy_loss,
'classification_loss': classification_loss
}
return total_loss, (metrics, discriminator_state)
return loss_fn
def pugail_loss(positive_class_prior: float,
entropy_coefficient: float,
pugail_beta: Optional[float] = None) -> Loss:
"""Computes the PUGAIL loss (https://arxiv.org/pdf/1911.00459.pdf)."""
def loss_fn(
discriminator_fn: DiscriminatorFn,
discriminator_state: State,
demo_transitions: types.Transition, rb_transitions: types.Transition,
rng_key: networks_lib.PRNGKey) -> LossOutput:
del rng_key
demo_logits, discriminator_state = discriminator_fn(discriminator_state,
demo_transitions)
rb_logits, discriminator_state = discriminator_fn(discriminator_state,
rb_transitions)
# Quick Maths:
# output = logit(D) = ln(D) - ln(1-D)
# -softplus(-output) = ln(D)
# softplus(output) = -ln(1-D)
# prior * -ln(D(expert))
positive_loss = positive_class_prior * -jax.nn.log_sigmoid(demo_logits)
# -ln(1 - D(policy)) - prior * -ln(1 - D(expert))
negative_loss = jax.nn.softplus(
rb_logits) - positive_class_prior * jax.nn.softplus(demo_logits)
if pugail_beta is not None:
negative_loss = jnp.clip(negative_loss, a_min=-1. * pugail_beta)
classification_loss = jnp.mean(positive_loss + negative_loss)
entropy = jnp.mean(
_logit_bernoulli_entropy(jnp.concatenate([demo_logits, rb_logits])))
entropy_loss = -entropy_coefficient * entropy
total_loss = classification_loss + entropy_loss
metrics = {
'total_loss': total_loss,
'positive_loss': jnp.mean(positive_loss),
'negative_loss': jnp.mean(negative_loss),
'demo_logits': jnp.mean(demo_logits),
'rb_logits': jnp.mean(rb_logits),
'entropy_loss': entropy_loss,
'classification_loss': classification_loss
}
return total_loss, (metrics, discriminator_state)
return loss_fn
def _make_gradient_penalty_data(rb_transitions: types.Transition,
demonstration_transitions: types.Transition,
key: networks_lib.PRNGKey) -> types.Transition:
lambdas = tfd.Uniform().sample(len(rb_transitions.reward), seed=key)
return tree.map_structure(lambda x, y: _weighted_average(x, y, lambdas),
rb_transitions, demonstration_transitions)
@functools.partial(jax.vmap, in_axes=(0, None, None))
def _compute_gradient_penalty(gradient_penalty_data: types.Transition,
discriminator_fn: Callable[[types.Transition],
float],
gradient_penalty_target: float) -> float:
"""Computes a penalty based on the gradient norm on the data."""
# The input should not be batched.
assert not gradient_penalty_data.reward.shape
discriminator_gradient_fn = jax.grad(discriminator_fn)
gradients = discriminator_gradient_fn(gradient_penalty_data)
gradients = tree.map_structure(lambda x: x.flatten(), gradients)
gradients = jnp.concatenate([gradients.observation, gradients.action,
gradients.next_observation])
gradient_norms = jnp.linalg.norm(gradients + 1e-8)
k = gradient_penalty_target * jnp.ones_like(gradient_norms)
return jnp.mean(jnp.square(gradient_norms - k))
def add_gradient_penalty(base_loss: Loss,
gradient_penalty_coefficient: float,
gradient_penalty_target: float) -> Loss:
"""Adds a gradient penalty to the base_loss."""
if not gradient_penalty_coefficient:
return base_loss
def loss_fn(discriminator_fn: DiscriminatorFn,
discriminator_state: State,
demo_transitions: types.Transition,
rb_transitions: types.Transition,
rng_key: networks_lib.PRNGKey) -> LossOutput:
super_key, gradient_penalty_key = jax.random.split(rng_key)
partial_loss, (losses, discriminator_state) = base_loss(
discriminator_fn, discriminator_state, demo_transitions, rb_transitions,
super_key)
gradient_penalty_data = _make_gradient_penalty_data(
rb_transitions=rb_transitions,
demonstration_transitions=demo_transitions,
key=gradient_penalty_key)
def apply_discriminator_fn(transitions: types.Transition) -> float:
logits, _ = discriminator_fn(discriminator_state, transitions)
return logits # pytype: disable=bad-return-type # jax-ndarray
gradient_penalty = gradient_penalty_coefficient * jnp.mean(
_compute_gradient_penalty(gradient_penalty_data, apply_discriminator_fn,
gradient_penalty_target))
losses['gradient_penalty'] = gradient_penalty
total_loss = partial_loss + gradient_penalty
losses['total_loss'] = total_loss
return total_loss, (losses, discriminator_state)
return loss_fn
|
acme-master
|
acme/agents/jax/ail/losses.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AIL learner implementation."""
import functools
import itertools
import time
from typing import Any, Callable, Iterator, List, NamedTuple, Optional, Tuple
import acme
from acme import types
from acme.agents.jax.ail import losses
from acme.agents.jax.ail import networks as ail_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
from acme.utils import reverb_utils
import jax
import optax
import reverb
class DiscriminatorTrainingState(NamedTuple):
"""Contains training state for the discriminator."""
# State of the optimizer used to optimize the discriminator parameters.
optimizer_state: optax.OptState
# Parameters of the discriminator.
discriminator_params: networks_lib.Params
# State of the discriminator
discriminator_state: losses.State
# For AIRL variants, we need the policy params to compute the loss.
policy_params: Optional[networks_lib.Params]
# Key for random number generation.
key: networks_lib.PRNGKey
# Training step of the discriminator.
steps: int
class TrainingState(NamedTuple):
"""Contains training state of the AIL learner."""
rewarder_state: DiscriminatorTrainingState
learner_state: Any
def ail_update_step(
state: DiscriminatorTrainingState, data: Tuple[types.Transition,
types.Transition],
optimizer: optax.GradientTransformation,
ail_network: ail_networks.AILNetworks,
loss_fn: losses.Loss) -> Tuple[DiscriminatorTrainingState, losses.Metrics]:
"""Run an update steps on the given transitions.
Args:
state: The learner state.
data: Demo and rb transitions.
optimizer: Discriminator optimizer.
ail_network: AIL networks.
loss_fn: Discriminator loss to minimize.
Returns:
A new state and metrics.
"""
demo_transitions, rb_transitions = data
key, discriminator_key, loss_key = jax.random.split(state.key, 3)
def compute_loss(
discriminator_params: networks_lib.Params) -> losses.LossOutput:
discriminator_fn = functools.partial(
ail_network.discriminator_network.apply,
discriminator_params,
state.policy_params,
is_training=True,
rng=discriminator_key)
return loss_fn(discriminator_fn, state.discriminator_state,
demo_transitions, rb_transitions, loss_key)
loss_grad = jax.grad(compute_loss, has_aux=True)
grads, (loss, new_discriminator_state) = loss_grad(state.discriminator_params)
update, optimizer_state = optimizer.update(
grads,
state.optimizer_state,
params=state.discriminator_params)
discriminator_params = optax.apply_updates(state.discriminator_params, update)
new_state = DiscriminatorTrainingState(
optimizer_state=optimizer_state,
discriminator_params=discriminator_params,
discriminator_state=new_discriminator_state,
policy_params=state.policy_params, # Not modified.
key=key,
steps=state.steps + 1,
)
return new_state, loss
class AILSample(NamedTuple):
discriminator_sample: types.Transition
direct_sample: reverb.ReplaySample
demonstration_sample: types.Transition
class AILLearner(acme.Learner):
"""AIL learner."""
def __init__(
self,
counter: counting.Counter,
direct_rl_learner_factory: Callable[[Iterator[reverb.ReplaySample]],
acme.Learner],
loss_fn: losses.Loss,
iterator: Iterator[AILSample],
discriminator_optimizer: optax.GradientTransformation,
ail_network: ail_networks.AILNetworks,
discriminator_key: networks_lib.PRNGKey,
is_sequence_based: bool,
num_sgd_steps_per_step: int = 1,
policy_variable_name: Optional[str] = None,
logger: Optional[loggers.Logger] = None):
"""AIL Learner.
Args:
counter: Counter.
direct_rl_learner_factory: Function that creates the direct RL learner
when passed a replay sample iterator.
loss_fn: Discriminator loss.
iterator: Iterator that returns AILSamples.
discriminator_optimizer: Discriminator optax optimizer.
ail_network: AIL networks.
discriminator_key: RNG key.
is_sequence_based: If True, a direct rl algorithm is using SequenceAdder
data format. Otherwise the learner assumes that the direct rl algorithm
is using NStepTransitionAdder.
num_sgd_steps_per_step: Number of discriminator gradient updates per step.
policy_variable_name: The name of the policy variable to retrieve
direct_rl policy parameters.
logger: Logger.
"""
self._is_sequence_based = is_sequence_based
state_key, networks_key = jax.random.split(discriminator_key)
# Generator expression that works the same as an iterator.
# https://pymbook.readthedocs.io/en/latest/igd.html#generator-expressions
iterator, direct_rl_iterator = itertools.tee(iterator)
direct_rl_iterator = (
self._process_sample(sample.direct_sample)
for sample in direct_rl_iterator)
self._direct_rl_learner = direct_rl_learner_factory(direct_rl_iterator)
self._iterator = iterator
if policy_variable_name is not None:
def get_policy_params():
return self._direct_rl_learner.get_variables([policy_variable_name])[0]
self._get_policy_params = get_policy_params
else:
self._get_policy_params = lambda: None
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
steps_key=self._counter.get_steps_key())
# Use the JIT compiler.
self._update_step = functools.partial(
ail_update_step,
optimizer=discriminator_optimizer,
ail_network=ail_network,
loss_fn=loss_fn)
self._update_step = utils.process_multiple_batches(self._update_step,
num_sgd_steps_per_step)
self._update_step = jax.jit(self._update_step)
discriminator_params, discriminator_state = (
ail_network.discriminator_network.init(networks_key))
self._state = DiscriminatorTrainingState(
optimizer_state=discriminator_optimizer.init(discriminator_params),
discriminator_params=discriminator_params,
discriminator_state=discriminator_state,
policy_params=self._get_policy_params(),
key=state_key,
steps=0,
)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
self._get_reward = jax.jit(
functools.partial(
ail_networks.compute_ail_reward, networks=ail_network))
def _process_sample(self, sample: reverb.ReplaySample) -> reverb.ReplaySample:
"""Updates the reward of the replay sample.
Args:
sample: Replay sample to update the reward to.
Returns:
The replay sample with an updated reward.
"""
transitions = reverb_utils.replay_sample_to_sars_transition(
sample, is_sequence=self._is_sequence_based)
rewards = self._get_reward(self._state.discriminator_params,
self._state.discriminator_state,
self._state.policy_params, transitions)
return sample._replace(data=sample.data._replace(reward=rewards))
def step(self):
sample = next(self._iterator)
rb_transitions = sample.discriminator_sample
demo_transitions = sample.demonstration_sample
if demo_transitions.reward.shape != rb_transitions.reward.shape:
raise ValueError(
'Different shapes for demo transitions and rb_transitions: '
f'{demo_transitions.reward.shape} != {rb_transitions.reward.shape}')
# Update the parameters of the policy before doing a gradient step.
state = self._state._replace(policy_params=self._get_policy_params())
self._state, metrics = self._update_step(state,
(demo_transitions, rb_transitions))
# The order is important for AIRL.
# In AIRL, the discriminator update depends on the logpi of the direct rl
# policy.
# When updating the discriminator, we want the logpi for which the
# transitions were made with and not an updated one.
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
self._direct_rl_learner.step()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
# Attempts to write the logs.
self._logger.write({**metrics, **counts})
def get_variables(self, names: List[str]) -> List[Any]:
rewarder_dict = {'discriminator': self._state.discriminator_params}
learner_names = [name for name in names if name not in rewarder_dict]
learner_dict = {}
if learner_names:
learner_dict = dict(
zip(learner_names,
self._direct_rl_learner.get_variables(learner_names)))
variables = [
rewarder_dict.get(name, learner_dict.get(name, None)) for name in names
]
return variables
def save(self) -> TrainingState:
return TrainingState(
rewarder_state=self._state,
learner_state=self._direct_rl_learner.save())
def restore(self, state: TrainingState):
self._state = state.rewarder_state
self._direct_rl_learner.restore(state.learner_state)
|
acme-master
|
acme/agents/jax/ail/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builder for GAIL.
https://arxiv.org/pdf/1606.03476.pdf
"""
import dataclasses
from typing import Callable, Iterator
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import ppo
from acme.agents.jax.ail import builder
from acme.agents.jax.ail import config as ail_config
from acme.agents.jax.ail import losses
@dataclasses.dataclass
class GAILConfig:
"""Configuration options specific to GAIL."""
ail_config: ail_config.AILConfig
ppo_config: ppo.PPOConfig
class GAILBuilder(builder.AILBuilder[ppo.PPONetworks,
actor_core_lib.FeedForwardPolicyWithExtra]
):
"""GAIL Builder."""
def __init__(self, config: GAILConfig,
make_demonstrations: Callable[[int],
Iterator[types.Transition]]):
ppo_builder = ppo.PPOBuilder(config.ppo_config)
super().__init__(
ppo_builder,
config=config.ail_config,
discriminator_loss=losses.gail_loss(),
make_demonstrations=make_demonstrations)
|
acme-master
|
acme/agents/jax/ail/gail.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the AIL discriminator losses."""
from acme import types
from acme.agents.jax.ail import losses
from acme.jax import networks as networks_lib
import jax
import jax.numpy as jnp
import tree
from absl.testing import absltest
class AilLossTest(absltest.TestCase):
def test_gradient_penalty(self):
def dummy_discriminator(
transition: types.Transition) -> networks_lib.Logits:
return transition.observation + jnp.square(transition.action)
zero_transition = types.Transition(0., 0., 0., 0., 0.)
zero_transition = tree.map_structure(lambda x: jnp.expand_dims(x, axis=0),
zero_transition)
self.assertEqual(
losses._compute_gradient_penalty(zero_transition, dummy_discriminator,
0.), 1**2 + 0**2)
one_transition = types.Transition(1., 1., 0., 0., 0.)
one_transition = tree.map_structure(lambda x: jnp.expand_dims(x, axis=0),
one_transition)
self.assertEqual(
losses._compute_gradient_penalty(one_transition, dummy_discriminator,
0.), 1**2 + 2**2)
def test_pugail(self):
def dummy_discriminator(
state: losses.State,
transition: types.Transition) -> losses.DiscriminatorOutput:
return transition.observation, state
zero_transition = types.Transition(.1, 0., 0., 0., 0.)
zero_transition = tree.map_structure(lambda x: jnp.expand_dims(x, axis=0),
zero_transition)
one_transition = types.Transition(1., 0., 0., 0., 0.)
one_transition = tree.map_structure(lambda x: jnp.expand_dims(x, axis=0),
one_transition)
prior = .7
loss_fn = losses.pugail_loss(
positive_class_prior=prior, entropy_coefficient=0.)
loss, _ = loss_fn(dummy_discriminator, {}, one_transition,
zero_transition, ())
d_one = jax.nn.sigmoid(dummy_discriminator({}, one_transition)[0])
d_zero = jax.nn.sigmoid(dummy_discriminator({}, zero_transition)[0])
expected_loss = -prior * jnp.log(
d_one) + -jnp.log(1. - d_zero) - prior * -jnp.log(1 - d_one)
self.assertAlmostEqual(loss, expected_loss, places=6)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/ail/losses_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the builder generator."""
from acme import types
from acme.agents.jax.ail import builder
import numpy as np
import reverb
from absl.testing import absltest
_REWARD = np.zeros((3,))
class BuilderTest(absltest.TestCase):
def test_weighted_generator(self):
data0 = types.Transition(np.array([[1], [2], [3]]), (), _REWARD, (), ())
it0 = iter([data0])
data1 = types.Transition(np.array([[4], [5], [6]]), (), _REWARD, (), ())
data2 = types.Transition(np.array([[7], [8], [9]]), (), _REWARD, (), ())
it1 = iter([
reverb.ReplaySample(
info=reverb.SampleInfo(
*[() for _ in reverb.SampleInfo.tf_dtypes()]),
data=data1),
reverb.ReplaySample(
info=reverb.SampleInfo(
*[() for _ in reverb.SampleInfo.tf_dtypes()]),
data=data2)
])
weighted_it = builder._generate_samples_with_demonstrations(
it0, it1, policy_to_expert_data_ratio=2, batch_size=3)
np.testing.assert_array_equal(
next(weighted_it).data.observation, np.array([[1], [4], [5]]))
np.testing.assert_array_equal(
next(weighted_it).data.observation, np.array([[7], [8], [2]]))
self.assertRaises(StopIteration, lambda: next(weighted_it))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/jax/ail/builder_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TD3 config."""
import dataclasses
from typing import Optional, Union
from acme.adders import reverb as adders_reverb
import optax
@dataclasses.dataclass
class TD3Config:
"""Configuration options for TD3."""
# Loss options
batch_size: int = 256
policy_learning_rate: Union[optax.Schedule, float] = 3e-4
critic_learning_rate: Union[optax.Schedule, float] = 3e-4
# Policy gradient clipping is not part of the original TD3 implementation,
# used e.g. in DAC https://arxiv.org/pdf/1809.02925.pdf
policy_gradient_clipping: Optional[float] = None
discount: float = 0.99
n_step: int = 1
# TD3 specific options (https://arxiv.org/pdf/1802.09477.pdf)
sigma: float = 0.1
delay: int = 2
target_sigma: float = 0.2
noise_clip: float = 0.5
tau: float = 0.005
# Replay options
min_replay_size: int = 1000
max_replay_size: int = 1000000
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
prefetch_size: int = 4
samples_per_insert: float = 256
# Rate to be used for the SampleToInsertRatio rate limiter tolerance.
# See a formula in make_replay_tables for more details.
samples_per_insert_tolerance_rate: float = 0.1
# How many gradient updates to perform per step.
num_sgd_steps_per_step: int = 1
# Offline RL options
# if bc_alpha: if given, will add a bc regularization term to the policy loss,
# (https://arxiv.org/pdf/2106.06860.pdf), useful for offline training.
bc_alpha: Optional[float] = None
|
acme-master
|
acme/agents/jax/td3/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TD3 agent."""
from acme.agents.jax.td3.builder import TD3Builder
from acme.agents.jax.td3.config import TD3Config
from acme.agents.jax.td3.learning import TD3Learner
from acme.agents.jax.td3.networks import get_default_behavior_policy
from acme.agents.jax.td3.networks import make_networks
from acme.agents.jax.td3.networks import TD3Networks
|
acme-master
|
acme/agents/jax/td3/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TD3 Builder."""
from typing import Iterator, List, Optional
from acme import adders
from acme import core
from acme import specs
from acme.adders import reverb as adders_reverb
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.td3 import config as td3_config
from acme.agents.jax.td3 import learning
from acme.agents.jax.td3 import networks as td3_networks
from acme.datasets import reverb as datasets
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import optax
import reverb
from reverb import rate_limiters
class TD3Builder(builders.ActorLearnerBuilder[td3_networks.TD3Networks,
actor_core_lib.FeedForwardPolicy,
reverb.ReplaySample]):
"""TD3 Builder."""
def __init__(
self,
config: td3_config.TD3Config,
):
"""Creates a TD3 learner, a behavior policy and an eval actor.
Args:
config: a config with TD3 hps
"""
self._config = config
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: td3_networks.TD3Networks,
dataset: Iterator[reverb.ReplaySample],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
del environment_spec, replay_client
critic_optimizer = optax.adam(self._config.critic_learning_rate)
twin_critic_optimizer = optax.adam(self._config.critic_learning_rate)
policy_optimizer = optax.adam(self._config.policy_learning_rate)
if self._config.policy_gradient_clipping is not None:
policy_optimizer = optax.chain(
optax.clip_by_global_norm(self._config.policy_gradient_clipping),
policy_optimizer)
return learning.TD3Learner(
networks=networks,
random_key=random_key,
discount=self._config.discount,
target_sigma=self._config.target_sigma,
noise_clip=self._config.noise_clip,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
twin_critic_optimizer=twin_critic_optimizer,
num_sgd_steps_per_step=self._config.num_sgd_steps_per_step,
bc_alpha=self._config.bc_alpha,
iterator=dataset,
logger=logger_fn('learner'),
counter=counter)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: actor_core_lib.FeedForwardPolicy,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
del environment_spec
assert variable_source is not None
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(policy)
# Inference happens on CPU, so it's better to move variables there too.
variable_client = variable_utils.VariableClient(variable_source, 'policy',
device='cpu')
return actors.GenericActor(
actor_core, random_key, variable_client, adder, backend='cpu')
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: actor_core_lib.FeedForwardPolicy,
) -> List[reverb.Table]:
"""Creates reverb tables for the algorithm."""
del policy
samples_per_insert_tolerance = (
self._config.samples_per_insert_tolerance_rate *
self._config.samples_per_insert)
error_buffer = self._config.min_replay_size * samples_per_insert_tolerance
limiter = rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._config.min_replay_size,
samples_per_insert=self._config.samples_per_insert,
error_buffer=error_buffer)
return [reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=limiter,
signature=adders_reverb.NStepTransitionAdder.signature(
environment_spec))]
def make_dataset_iterator(
self, replay_client: reverb.Client) -> Iterator[reverb.ReplaySample]:
"""Creates a dataset iterator to use for learning."""
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=replay_client.server_address,
batch_size=(
self._config.batch_size * self._config.num_sgd_steps_per_step),
prefetch_size=self._config.prefetch_size,
transition_adder=True)
return utils.device_put(dataset.as_numpy_iterator(), jax.devices()[0])
def make_adder(
self, replay_client: reverb.Client,
environment_spec: Optional[specs.EnvironmentSpec],
policy: Optional[actor_core_lib.FeedForwardPolicy]
) -> Optional[adders.Adder]:
"""Creates an adder which handles observations."""
del environment_spec, policy
return adders_reverb.NStepTransitionAdder(
priority_fns={self._config.replay_table_name: None},
client=replay_client,
n_step=self._config.n_step,
discount=self._config.discount)
def make_policy(self,
networks: td3_networks.TD3Networks,
environment_spec: specs.EnvironmentSpec,
evaluation: bool = False) -> actor_core_lib.FeedForwardPolicy:
"""Creates a policy."""
sigma = 0 if evaluation else self._config.sigma
return td3_networks.get_default_behavior_policy(
networks=networks, action_specs=environment_spec.actions, sigma=sigma)
|
acme-master
|
acme/agents/jax/td3/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TD3 networks definition."""
import dataclasses
from typing import Callable, Sequence
from acme import specs
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.jax import networks as networks_lib
from acme.jax import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
@dataclasses.dataclass
class TD3Networks:
"""Network and pure functions for the TD3 agent."""
policy_network: networks_lib.FeedForwardNetwork
critic_network: networks_lib.FeedForwardNetwork
twin_critic_network: networks_lib.FeedForwardNetwork
add_policy_noise: Callable[[types.NestedArray, networks_lib.PRNGKey,
float, float], types.NestedArray]
def get_default_behavior_policy(
networks: TD3Networks, action_specs: specs.BoundedArray,
sigma: float) -> actor_core_lib.FeedForwardPolicy:
"""Selects action according to the policy."""
def behavior_policy(params: networks_lib.Params, key: networks_lib.PRNGKey,
observation: types.NestedArray):
action = networks.policy_network.apply(params, observation)
noise = jax.random.normal(key, shape=action.shape) * sigma
noisy_action = jnp.clip(action + noise,
action_specs.minimum, action_specs.maximum)
return noisy_action
return behavior_policy
def make_networks(
spec: specs.EnvironmentSpec,
hidden_layer_sizes: Sequence[int] = (256, 256)) -> TD3Networks:
"""Creates networks used by the agent.
The networks used are based on LayerNormMLP, which is different than the
MLP with relu activation described in TD3 (which empirically performs worse).
Args:
spec: Environment specs
hidden_layer_sizes: list of sizes of hidden layers in actor/critic networks
Returns:
network: TD3Networks
"""
action_specs = spec.actions
num_dimensions = np.prod(action_specs.shape, dtype=int)
def add_policy_noise(action: types.NestedArray,
key: networks_lib.PRNGKey,
target_sigma: float,
noise_clip: float) -> types.NestedArray:
"""Adds action noise to bootstrapped Q-value estimate in critic loss."""
noise = jax.random.normal(key=key, shape=action_specs.shape) * target_sigma
noise = jnp.clip(noise, -noise_clip, noise_clip)
return jnp.clip(action + noise, action_specs.minimum, action_specs.maximum)
def _actor_fn(obs: types.NestedArray) -> types.NestedArray:
network = hk.Sequential([
networks_lib.LayerNormMLP(hidden_layer_sizes,
activate_final=True),
networks_lib.NearZeroInitializedLinear(num_dimensions),
networks_lib.TanhToSpec(spec.actions),
])
return network(obs)
def _critic_fn(obs: types.NestedArray,
action: types.NestedArray) -> types.NestedArray:
network1 = hk.Sequential([
networks_lib.LayerNormMLP(list(hidden_layer_sizes) + [1]),
])
input_ = jnp.concatenate([obs, action], axis=-1)
value = network1(input_)
return jnp.squeeze(value)
policy = hk.without_apply_rng(hk.transform(_actor_fn))
critic = hk.without_apply_rng(hk.transform(_critic_fn))
# Create dummy observations and actions to create network parameters.
dummy_action = utils.zeros_like(spec.actions)
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.add_batch_dim(dummy_action)
dummy_obs = utils.add_batch_dim(dummy_obs)
network = TD3Networks(
policy_network=networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_obs), policy.apply),
critic_network=networks_lib.FeedForwardNetwork(
lambda key: critic.init(key, dummy_obs, dummy_action), critic.apply),
twin_critic_network=networks_lib.FeedForwardNetwork(
lambda key: critic.init(key, dummy_obs, dummy_action), critic.apply),
add_policy_noise=add_policy_noise)
return network
|
acme-master
|
acme/agents/jax/td3/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TD3 learner implementation."""
import time
from typing import Dict, Iterator, List, NamedTuple, Optional, Tuple
import acme
from acme import types
from acme.agents.jax.td3 import networks as td3_networks
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
import reverb
import rlax
class TrainingState(NamedTuple):
"""Contains training state for the learner."""
policy_params: networks_lib.Params
target_policy_params: networks_lib.Params
critic_params: networks_lib.Params
target_critic_params: networks_lib.Params
twin_critic_params: networks_lib.Params
target_twin_critic_params: networks_lib.Params
policy_opt_state: optax.OptState
critic_opt_state: optax.OptState
twin_critic_opt_state: optax.OptState
steps: int
random_key: networks_lib.PRNGKey
class TD3Learner(acme.Learner):
"""TD3 learner."""
_state: TrainingState
def __init__(self,
networks: td3_networks.TD3Networks,
random_key: networks_lib.PRNGKey,
discount: float,
iterator: Iterator[reverb.ReplaySample],
policy_optimizer: optax.GradientTransformation,
critic_optimizer: optax.GradientTransformation,
twin_critic_optimizer: optax.GradientTransformation,
delay: int = 2,
target_sigma: float = 0.2,
noise_clip: float = 0.5,
tau: float = 0.005,
use_sarsa_target: bool = False,
bc_alpha: Optional[float] = None,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
num_sgd_steps_per_step: int = 1):
"""Initializes the TD3 learner.
Args:
networks: TD3 networks.
random_key: a key for random number generation.
discount: discount to use for TD updates
iterator: an iterator over training data.
policy_optimizer: the policy optimizer.
critic_optimizer: the Q-function optimizer.
twin_critic_optimizer: the twin Q-function optimizer.
delay: ratio of policy updates for critic updates (see TD3),
delay=2 means 2 updates of the critic for 1 policy update.
target_sigma: std of zero mean Gaussian added to the action of
the next_state, for critic evaluation (reducing overestimation bias).
noise_clip: hard constraint on target noise.
tau: target parameters smoothing coefficient.
use_sarsa_target: compute on-policy target using iterator's actions rather
than sampled actions.
Useful for 1-step offline RL (https://arxiv.org/pdf/2106.08909.pdf).
When set to `True`, `target_policy_params` are unused.
This is only working when the learner is used as an offline algorithm.
I.e. TD3Builder does not support adding the SARSA target to the replay
buffer.
bc_alpha: bc_alpha: Implements TD3+BC.
See comments in TD3Config.bc_alpha for details.
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
num_sgd_steps_per_step: number of sgd steps to perform per learner 'step'.
"""
def policy_loss(
policy_params: networks_lib.Params,
critic_params: networks_lib.Params,
transition: types.NestedArray,
) -> jnp.ndarray:
# Computes the discrete policy gradient loss.
action = networks.policy_network.apply(
policy_params, transition.observation)
grad_critic = jax.vmap(
jax.grad(networks.critic_network.apply, argnums=2),
in_axes=(None, 0, 0))
dq_da = grad_critic(critic_params, transition.observation, action)
batch_dpg_learning = jax.vmap(rlax.dpg_loss, in_axes=(0, 0))
loss = jnp.mean(batch_dpg_learning(action, dq_da))
if bc_alpha is not None:
# BC regularization for offline RL
q_sa = networks.critic_network.apply(critic_params,
transition.observation, action)
bc_factor = jax.lax.stop_gradient(bc_alpha / jnp.mean(jnp.abs(q_sa)))
loss += jnp.mean(jnp.square(action - transition.action)) / bc_factor
return loss
def critic_loss(
critic_params: networks_lib.Params,
state: TrainingState,
transition: types.Transition,
random_key: jnp.ndarray,
):
# Computes the critic loss.
q_tm1 = networks.critic_network.apply(
critic_params, transition.observation, transition.action)
if use_sarsa_target:
# TODO(b/222674779): use N-steps Trajectories to get the next actions.
assert 'next_action' in transition.extras, (
'next actions should be given as extras for one step RL.')
action = transition.extras['next_action']
else:
action = networks.policy_network.apply(state.target_policy_params,
transition.next_observation)
action = networks.add_policy_noise(action, random_key,
target_sigma, noise_clip)
q_t = networks.critic_network.apply(
state.target_critic_params,
transition.next_observation,
action)
twin_q_t = networks.twin_critic_network.apply(
state.target_twin_critic_params,
transition.next_observation,
action)
q_t = jnp.minimum(q_t, twin_q_t)
target_q_tm1 = transition.reward + discount * transition.discount * q_t
td_error = jax.lax.stop_gradient(target_q_tm1) - q_tm1
return jnp.mean(jnp.square(td_error))
def update_step(
state: TrainingState,
transitions: types.Transition,
) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:
random_key, key_critic, key_twin = jax.random.split(state.random_key, 3)
# Updates on the critic: compute the gradients, and update using
# Polyak averaging.
critic_loss_and_grad = jax.value_and_grad(critic_loss)
critic_loss_value, critic_gradients = critic_loss_and_grad(
state.critic_params, state, transitions, key_critic)
critic_updates, critic_opt_state = critic_optimizer.update(
critic_gradients, state.critic_opt_state)
critic_params = optax.apply_updates(state.critic_params, critic_updates)
# In the original authors' implementation the critic target update is
# delayed similarly to the policy update which we found empirically to
# perform slightly worse.
target_critic_params = optax.incremental_update(
new_tensors=critic_params,
old_tensors=state.target_critic_params,
step_size=tau)
# Updates on the twin critic: compute the gradients, and update using
# Polyak averaging.
twin_critic_loss_value, twin_critic_gradients = critic_loss_and_grad(
state.twin_critic_params, state, transitions, key_twin)
twin_critic_updates, twin_critic_opt_state = twin_critic_optimizer.update(
twin_critic_gradients, state.twin_critic_opt_state)
twin_critic_params = optax.apply_updates(state.twin_critic_params,
twin_critic_updates)
# In the original authors' implementation the twin critic target update is
# delayed similarly to the policy update which we found empirically to
# perform slightly worse.
target_twin_critic_params = optax.incremental_update(
new_tensors=twin_critic_params,
old_tensors=state.target_twin_critic_params,
step_size=tau)
# Updates on the policy: compute the gradients, and update using
# Polyak averaging (if delay enabled, the update might not be applied).
policy_loss_and_grad = jax.value_and_grad(policy_loss)
policy_loss_value, policy_gradients = policy_loss_and_grad(
state.policy_params, state.critic_params, transitions)
def update_policy_step():
policy_updates, policy_opt_state = policy_optimizer.update(
policy_gradients, state.policy_opt_state)
policy_params = optax.apply_updates(state.policy_params, policy_updates)
target_policy_params = optax.incremental_update(
new_tensors=policy_params,
old_tensors=state.target_policy_params,
step_size=tau)
return policy_params, target_policy_params, policy_opt_state
# The update on the policy is applied every `delay` steps.
current_policy_state = (state.policy_params, state.target_policy_params,
state.policy_opt_state)
policy_params, target_policy_params, policy_opt_state = jax.lax.cond(
state.steps % delay == 0,
lambda _: update_policy_step(),
lambda _: current_policy_state,
operand=None)
steps = state.steps + 1
new_state = TrainingState(
policy_params=policy_params,
critic_params=critic_params,
twin_critic_params=twin_critic_params,
target_policy_params=target_policy_params,
target_critic_params=target_critic_params,
target_twin_critic_params=target_twin_critic_params,
policy_opt_state=policy_opt_state,
critic_opt_state=critic_opt_state,
twin_critic_opt_state=twin_critic_opt_state,
steps=steps,
random_key=random_key,
)
metrics = {
'policy_loss': policy_loss_value,
'critic_loss': critic_loss_value,
'twin_critic_loss': twin_critic_loss_value,
}
return new_state, metrics
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger(
'learner',
asynchronous=True,
serialize_fn=utils.fetch_devicearray,
steps_key=self._counter.get_steps_key())
# Create prefetching dataset iterator.
self._iterator = iterator
# Faster sgd step
update_step = utils.process_multiple_batches(update_step,
num_sgd_steps_per_step)
# Use the JIT compiler.
self._update_step = jax.jit(update_step)
(key_init_policy, key_init_twin, key_init_target, key_state
) = jax.random.split(random_key, 4)
# Create the network parameters and copy into the target network parameters.
initial_policy_params = networks.policy_network.init(key_init_policy)
initial_critic_params = networks.critic_network.init(key_init_twin)
initial_twin_critic_params = networks.twin_critic_network.init(
key_init_target)
initial_target_policy_params = initial_policy_params
initial_target_critic_params = initial_critic_params
initial_target_twin_critic_params = initial_twin_critic_params
# Initialize optimizers.
initial_policy_opt_state = policy_optimizer.init(initial_policy_params)
initial_critic_opt_state = critic_optimizer.init(initial_critic_params)
initial_twin_critic_opt_state = twin_critic_optimizer.init(
initial_twin_critic_params)
# Create initial state.
self._state = TrainingState(
policy_params=initial_policy_params,
target_policy_params=initial_target_policy_params,
critic_params=initial_critic_params,
twin_critic_params=initial_twin_critic_params,
target_critic_params=initial_target_critic_params,
target_twin_critic_params=initial_target_twin_critic_params,
policy_opt_state=initial_policy_opt_state,
critic_opt_state=initial_critic_opt_state,
twin_critic_opt_state=initial_twin_critic_opt_state,
steps=0,
random_key=key_state
)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
def step(self):
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
sample = next(self._iterator)
transitions = types.Transition(*sample.data)
self._state, metrics = self._update_step(self._state, transitions)
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(steps=1, walltime=elapsed_time)
# Attempts to write the logs.
self._logger.write({**metrics, **counts})
def get_variables(self, names: List[str]) -> List[networks_lib.Params]:
variables = {
'policy': self._state.policy_params,
'critic': self._state.critic_params,
'twin_critic': self._state.twin_critic_params,
}
return [variables[name] for name in names]
def save(self) -> TrainingState:
return self._state
def restore(self, state: TrainingState):
self._state = state
|
acme-master
|
acme/agents/jax/td3/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BVE config."""
import dataclasses
from typing import Callable, Sequence, Union
import numpy as np
@dataclasses.dataclass
class BVEConfig:
"""Configuration options for BVE agent.
Attributes:
epsilon: for use by epsilon-greedy policies. If multiple, the epsilons are
alternated randomly per-episode.
seed: Random seed.
learning_rate: Learning rate for Adam optimizer. Could be a number or a
function defining a schedule.
adam_eps: Epsilon for Adam optimizer.
discount: Discount rate applied to value per timestep.
target_update_period: Update target network every period.
max_gradient_norm: For gradient clipping.
max_abs_reward: Maximum absolute reward.
huber_loss_parameter: The delta parameter of the huber loss.
batch_size: Number of transitions per batch.
prefetch_size: Prefetch size for reverb replay performance.
num_sgd_steps_per_step: How many gradient updates to perform per learner
step.
"""
epsilon: Union[float, Sequence[float]] = 0.05
# TODO(b/191706065): update all clients and remove this field.
seed: int = 1
# Learning rule
learning_rate: Union[float, Callable[[int], float]] = 3e-4
adam_eps: float = 1e-8 # Eps for Adam optimizer.
discount: float = 0.99 # Discount rate applied to value per timestep.
target_update_period: int = 2500 # Update target network every period.
max_gradient_norm: float = np.inf # For gradient clipping.
max_abs_reward: float = 1. # Maximum absolute value to clip the rewards.
huber_loss_parameter: float = 1. # Huber loss delta parameter.
batch_size: int = 256 # Minibatch size.
prefetch_size = 500 # The amount of data to prefetch into the memory.
num_sgd_steps_per_step: int = 1
|
acme-master
|
acme/agents/jax/bve/config.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a behavior value estimation (BVE)."""
from acme.agents.jax.bve.builder import BVEBuilder
from acme.agents.jax.bve.config import BVEConfig
from acme.agents.jax.bve.losses import BVELoss
from acme.agents.jax.bve.networks import BVENetworks
|
acme-master
|
acme/agents/jax/bve/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BVE Builder."""
from typing import Iterator, Optional
from acme import core
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import builders
from acme.agents.jax.bve import losses
from acme.agents.jax.bve import networks as bve_networks
from acme.agents.jax.dqn import learning_lib
from acme.jax import networks as networks_lib
from acme.jax import types as jax_types
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
import haiku as hk
import optax
class BVEBuilder(builders.OfflineBuilder[bve_networks.BVENetworks,
actor_core_lib.ActorCore,
utils.PrefetchingSplit]):
"""BVE Builder."""
def __init__(self, config):
"""Build a BVE agent.
Args:
config: The config of the BVE agent.
"""
self._config = config
def make_learner(self,
random_key: jax_types.PRNGKey,
networks: bve_networks.BVENetworks,
dataset: Iterator[utils.PrefetchingSplit],
logger_fn: loggers.LoggerFactory,
environment_spec: specs.EnvironmentSpec,
counter: Optional[counting.Counter] = None) -> core.Learner:
del environment_spec
loss_fn = losses.BVELoss(
discount=self._config.discount,
max_abs_reward=self._config.max_abs_reward,
huber_loss_parameter=self._config.huber_loss_parameter,
)
return learning_lib.SGDLearner(
network=networks.policy_network,
random_key=random_key,
optimizer=optax.adam(
self._config.learning_rate, eps=self._config.adam_eps),
target_update_period=self._config.target_update_period,
data_iterator=dataset,
loss_fn=loss_fn,
counter=counter,
num_sgd_steps_per_step=self._config.num_sgd_steps_per_step,
logger=logger_fn('learner'))
def make_actor(
self,
random_key: jax_types.PRNGKey,
policy: actor_core_lib.ActorCore,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None) -> core.Actor:
"""Create the actor for the BVE to perform online evals.
Args:
random_key: prng key.
policy: The DQN policy.
environment_spec: The environment spec.
variable_source: The source of where the variables are coming from.
Returns:
Return the actor for the evaluations.
"""
del environment_spec
variable_client = variable_utils.VariableClient(
variable_source, 'policy', device='cpu')
return actors.GenericActor(policy, random_key, variable_client)
def make_policy(
self,
networks: bve_networks.BVENetworks,
environment_spec: specs.EnvironmentSpec,
evaluation: Optional[bool] = False) -> actor_core_lib.ActorCore:
"""Creates a policy."""
del environment_spec, evaluation
def behavior_policy(
params: hk.Params, key: jax_types.PRNGKey,
observation: networks_lib.Observation) -> networks_lib.Action:
network_output = networks.policy_network.apply(
params, observation, is_training=False)
return networks.sample_fn(network_output, key)
return actor_core_lib.batched_feed_forward_to_actor_core(behavior_policy)
|
acme-master
|
acme/agents/jax/bve/builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network definitions for BVE."""
import dataclasses
from typing import Optional
from acme.jax import networks as networks_lib
@dataclasses.dataclass
class BVENetworks:
"""The network and pure functions for the BVE agent.
Attributes:
policy_network: The policy network.
sample_fn: A pure function. Samples an action based on the network output.
log_prob: A pure function. Computes log-probability for an action.
"""
policy_network: networks_lib.TypedFeedForwardNetwork
sample_fn: networks_lib.SampleFn
log_prob: Optional[networks_lib.LogProbFn] = None
|
acme-master
|
acme/agents/jax/bve/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Behavior Value Estimation loss."""
import dataclasses
from typing import Tuple
from acme import types
from acme.agents.jax import dqn
from acme.jax import networks as networks_lib
import jax
import jax.numpy as jnp
import reverb
import rlax
@dataclasses.dataclass
class BVELoss(dqn.LossFn):
"""This loss implements TD-loss to estimate behavior value.
This loss function uses the next action to learn with the SARSA tuples.
It is intended to be used with dqn.SGDLearner. The method was proposed
in "Regularized Behavior Value Estimation" by Gulcehre et al to overcome
the extrapolation error in offline RL setting:
https://arxiv.org/abs/2103.09575
"""
discount: float = 0.99
max_abs_reward: float = 1.
huber_loss_parameter: float = 1.
def __call__(
self,
network: networks_lib.TypedFeedForwardNetwork,
params: networks_lib.Params,
target_params: networks_lib.Params,
batch: reverb.ReplaySample,
key: networks_lib.PRNGKey,
) -> Tuple[jax.Array, dqn.LossExtra]:
"""Calculate a loss on a single batch of data."""
transitions: types.Transition = batch.data
# Forward pass.
key1, key2 = jax.random.split(key)
q_tm1 = network.apply(
params, transitions.observation, is_training=True, key=key1)
q_t_value = network.apply(
target_params, transitions.next_observation, is_training=True, key=key2)
# Cast and clip rewards.
d_t = (transitions.discount * self.discount).astype(jnp.float32)
r_t = jnp.clip(transitions.reward, -self.max_abs_reward,
self.max_abs_reward).astype(jnp.float32)
# Compute double Q-learning n-step TD-error.
batch_error = jax.vmap(rlax.sarsa)
next_action = transitions.extras['next_action']
td_error = batch_error(q_tm1, transitions.action, r_t, d_t, q_t_value,
next_action)
batch_loss = rlax.huber_loss(td_error, self.huber_loss_parameter)
# Average:
loss = jnp.mean(batch_loss) # []
metrics = {'td_error': td_error, 'batch_loss': batch_loss}
return loss, dqn.LossExtra(
metrics=metrics,
reverb_priorities=jnp.abs(td_error).astype(jnp.float64))
|
acme-master
|
acme/agents/jax/bve/losses.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
acme-master
|
acme/agents/tf/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic actor implementation, using TensorFlow and Sonnet."""
from typing import Optional, Tuple
from acme import adders
from acme import core
from acme import types
# Internal imports.
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
import dm_env
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
class FeedForwardActor(core.Actor):
"""A feed-forward actor.
An actor based on a feed-forward policy which takes non-batched observations
and outputs non-batched actions. It also allows adding experiences to replay
and updating the weights from the policy on the learner.
"""
def __init__(
self,
policy_network: snt.Module,
adder: Optional[adders.Adder] = None,
variable_client: Optional[tf2_variable_utils.VariableClient] = None,
):
"""Initializes the actor.
Args:
policy_network: the policy to run.
adder: the adder object to which allows to add experiences to a
dataset/replay buffer.
variable_client: object which allows to copy weights from the learner copy
of the policy to the actor copy (in case they are separate).
"""
# Store these for later use.
self._adder = adder
self._variable_client = variable_client
self._policy_network = policy_network
@tf.function
def _policy(self, observation: types.NestedTensor) -> types.NestedTensor:
# Add a dummy batch dimension and as a side effect convert numpy to TF.
batched_observation = tf2_utils.add_batch_dim(observation)
# Compute the policy, conditioned on the observation.
policy = self._policy_network(batched_observation)
# Sample from the policy if it is stochastic.
action = policy.sample() if isinstance(policy, tfd.Distribution) else policy
return action
def select_action(self, observation: types.NestedArray) -> types.NestedArray:
# Pass the observation through the policy network.
action = self._policy(observation)
# Return a numpy array with squeezed out batch dimension.
return tf2_utils.to_numpy_squeeze(action)
def observe_first(self, timestep: dm_env.TimeStep):
if self._adder:
self._adder.add_first(timestep)
def observe(self, action: types.NestedArray, next_timestep: dm_env.TimeStep):
if self._adder:
self._adder.add(action, next_timestep)
def update(self, wait: bool = False):
if self._variable_client:
self._variable_client.update(wait)
class RecurrentActor(core.Actor):
"""A recurrent actor.
An actor based on a recurrent policy which takes non-batched observations and
outputs non-batched actions, and keeps track of the recurrent state inside. It
also allows adding experiences to replay and updating the weights from the
policy on the learner.
"""
def __init__(
self,
policy_network: snt.RNNCore,
adder: Optional[adders.Adder] = None,
variable_client: Optional[tf2_variable_utils.VariableClient] = None,
store_recurrent_state: bool = True,
):
"""Initializes the actor.
Args:
policy_network: the (recurrent) policy to run.
adder: the adder object to which allows to add experiences to a
dataset/replay buffer.
variable_client: object which allows to copy weights from the learner copy
of the policy to the actor copy (in case they are separate).
store_recurrent_state: Whether to pass the recurrent state to the adder.
"""
# Store these for later use.
self._adder = adder
self._variable_client = variable_client
self._network = policy_network
self._state = None
self._prev_state = None
self._store_recurrent_state = store_recurrent_state
@tf.function
def _policy(
self,
observation: types.NestedTensor,
state: types.NestedTensor,
) -> Tuple[types.NestedTensor, types.NestedTensor]:
# Add a dummy batch dimension and as a side effect convert numpy to TF.
batched_observation = tf2_utils.add_batch_dim(observation)
# Compute the policy, conditioned on the observation.
policy, new_state = self._network(batched_observation, state)
# Sample from the policy if it is stochastic.
action = policy.sample() if isinstance(policy, tfd.Distribution) else policy
return action, new_state
def select_action(self, observation: types.NestedArray) -> types.NestedArray:
# Initialize the RNN state if necessary.
if self._state is None:
self._state = self._network.initial_state(1)
# Step the recurrent policy forward given the current observation and state.
policy_output, new_state = self._policy(observation, self._state)
# Bookkeeping of recurrent states for the observe method.
self._prev_state = self._state
self._state = new_state
# Return a numpy array with squeezed out batch dimension.
return tf2_utils.to_numpy_squeeze(policy_output)
def observe_first(self, timestep: dm_env.TimeStep):
if self._adder:
self._adder.add_first(timestep)
# Set the state to None so that we re-initialize at the next policy call.
self._state = None
def observe(self, action: types.NestedArray, next_timestep: dm_env.TimeStep):
if not self._adder:
return
if not self._store_recurrent_state:
self._adder.add(action, next_timestep)
return
numpy_state = tf2_utils.to_numpy_squeeze(self._prev_state)
self._adder.add(action, next_timestep, extras=(numpy_state,))
def update(self, wait: bool = False):
if self._variable_client:
self._variable_client.update(wait)
# Internal class 1.
# Internal class 2.
|
acme-master
|
acme/agents/tf/actors.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for actors_tf2."""
from acme import environment_loop
from acme import specs
from acme.agents.tf import actors
from acme.testing import fakes
import dm_env
import numpy as np
import sonnet as snt
import tensorflow as tf
from absl.testing import absltest
def _make_fake_env() -> dm_env.Environment:
env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(10, 5), dtype=np.float32),
actions=specs.DiscreteArray(num_values=3),
rewards=specs.Array(shape=(), dtype=np.float32),
discounts=specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1.),
)
return fakes.Environment(env_spec, episode_length=10)
class ActorTest(absltest.TestCase):
def test_feedforward(self):
environment = _make_fake_env()
env_spec = specs.make_environment_spec(environment)
network = snt.Sequential([
snt.Flatten(),
snt.Linear(env_spec.actions.num_values),
lambda x: tf.argmax(x, axis=-1, output_type=env_spec.actions.dtype),
])
actor = actors.FeedForwardActor(network)
loop = environment_loop.EnvironmentLoop(environment, actor)
loop.run(20)
def test_recurrent(self):
environment = _make_fake_env()
env_spec = specs.make_environment_spec(environment)
network = snt.DeepRNN([
snt.Flatten(),
snt.Linear(env_spec.actions.num_values),
lambda x: tf.argmax(x, axis=-1, output_type=env_spec.actions.dtype),
])
actor = actors.RecurrentActor(network)
loop = environment_loop.EnvironmentLoop(environment, actor)
loop.run(20)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/actors_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the (MoG) distributional MPO distributed agent class."""
from typing import Callable, Dict, Optional
import acme
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents.tf import actors
from acme.agents.tf.mog_mpo import learning
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import variable_utils as tf2_variable_utils
from acme.utils import counting
from acme.utils import loggers
from acme.utils import lp_utils
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
class DistributedMoGMPO:
"""Program definition for distributional (MoG) MPO."""
def __init__(
self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[specs.EnvironmentSpec], Dict[str, snt.Module]],
num_actors: int = 1,
num_caches: int = 0,
environment_spec: Optional[specs.EnvironmentSpec] = None,
batch_size: int = 256,
prefetch_size: int = 4,
min_replay_size: int = 1_000,
max_replay_size: int = 1_000_000,
samples_per_insert: Optional[float] = 32.0,
n_step: int = 5,
num_samples: int = 20,
policy_evaluation_config: Optional[
learning.PolicyEvaluationConfig] = None,
additional_discount: float = 0.99,
target_policy_update_period: int = 100,
target_critic_update_period: int = 100,
policy_loss_factory: Optional[Callable[[], snt.Module]] = None,
max_actor_steps: Optional[int] = None,
log_every: float = 10.0,
):
if environment_spec is None:
environment_spec = specs.make_environment_spec(environment_factory(False))
self._environment_factory = environment_factory
self._network_factory = network_factory
self._policy_loss_factory = policy_loss_factory
self._environment_spec = environment_spec
self._num_actors = num_actors
self._num_caches = num_caches
self._batch_size = batch_size
self._prefetch_size = prefetch_size
self._min_replay_size = min_replay_size
self._max_replay_size = max_replay_size
self._samples_per_insert = samples_per_insert
self._n_step = n_step
self._additional_discount = additional_discount
self._num_samples = num_samples
self._policy_evaluation_config = policy_evaluation_config
self._target_policy_update_period = target_policy_update_period
self._target_critic_update_period = target_critic_update_period
self._max_actor_steps = max_actor_steps
self._log_every = log_every
def replay(self):
"""The replay storage."""
if self._samples_per_insert is not None:
# Create enough of an error buffer to give a 10% tolerance in rate.
samples_per_insert_tolerance = 0.1 * self._samples_per_insert
error_buffer = self._min_replay_size * samples_per_insert_tolerance
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._min_replay_size,
samples_per_insert=self._samples_per_insert,
error_buffer=error_buffer)
else:
limiter = reverb.rate_limiters.MinSize(self._min_replay_size)
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._max_replay_size,
rate_limiter=limiter,
signature=adders.NStepTransitionAdder.signature(
self._environment_spec))
return [replay_table]
def counter(self):
return tf2_savers.CheckpointingRunner(
counting.Counter(), time_delta_minutes=1, subdirectory='counter')
def coordinator(self, counter: counting.Counter, max_actor_steps: int):
return lp_utils.StepsLimiter(counter, max_actor_steps)
def learner(
self,
replay: reverb.Client,
counter: counting.Counter,
):
"""The Learning part of the agent."""
# Create online and target networks.
online_networks = self._network_factory(self._environment_spec)
target_networks = self._network_factory(self._environment_spec)
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
server_address=replay.server_address,
batch_size=self._batch_size,
prefetch_size=self._prefetch_size,
)
counter = counting.Counter(counter, 'learner')
logger = loggers.make_default_logger('learner', time_delta=self._log_every)
# Create policy loss module if a factory is passed.
if self._policy_loss_factory:
policy_loss_module = self._policy_loss_factory()
else:
policy_loss_module = None
# Return the learning agent.
return learning.MoGMPOLearner(
policy_network=online_networks['policy'],
critic_network=online_networks['critic'],
observation_network=online_networks['observation'],
target_policy_network=target_networks['policy'],
target_critic_network=target_networks['critic'],
target_observation_network=target_networks['observation'],
discount=self._additional_discount,
num_samples=self._num_samples,
policy_evaluation_config=self._policy_evaluation_config,
target_policy_update_period=self._target_policy_update_period,
target_critic_update_period=self._target_critic_update_period,
policy_loss_module=policy_loss_module,
dataset=dataset,
counter=counter,
logger=logger)
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
actor_id: int,
) -> acme.EnvironmentLoop:
"""The actor process."""
# Create environment and target networks to act with.
environment = self._environment_factory(False)
agent_networks = self._network_factory(self._environment_spec)
# Create a stochastic behavior policy.
behavior_network = snt.Sequential([
agent_networks['observation'],
agent_networks['policy'],
networks.StochasticSamplingHead(),
])
# Ensure network variables are created.
policy_variables = {'policy': behavior_network.variables}
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = tf2_variable_utils.VariableClient(
variable_source, policy_variables, update_period=1000)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Component to add things into replay.
adder = adders.NStepTransitionAdder(
client=replay, n_step=self._n_step, discount=self._additional_discount)
# Create the agent.
actor = actors.FeedForwardActor(
policy_network=behavior_network,
adder=adder,
variable_client=variable_client)
# Create logger and counter; actors will not spam bigtable.
save_data = actor_id == 0
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor', save_data=save_data, time_delta=self._log_every)
# Create the run loop and return it.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
):
"""The evaluation process."""
# Create environment and target networks to act with.
environment = self._environment_factory(True)
agent_networks = self._network_factory(self._environment_spec)
# Create a stochastic behavior policy.
evaluator_network = snt.Sequential([
agent_networks['observation'],
agent_networks['policy'],
networks.StochasticMeanHead(),
])
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = tf2_variable_utils.VariableClient(
variable_source,
variables={'policy': evaluator_network.variables},
update_period=1000)
# Make sure not to evaluate a random actor by assigning variables before
# running the environment loop.
variable_client.update_and_wait()
# Create the agent.
evaluator = actors.FeedForwardActor(
policy_network=evaluator_network, variable_client=variable_client)
# Create logger and counter.
counter = counting.Counter(counter, 'evaluator')
logger = loggers.make_default_logger(
'evaluator', time_delta=self._log_every)
# Create the run loop and return it.
return acme.EnvironmentLoop(environment, evaluator, counter, logger)
def build(self, name='dmpo'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
if self._max_actor_steps:
_ = program.add_node(
lp.CourierNode(self.coordinator, counter, self._max_actor_steps))
with program.group('learner'):
learner = program.add_node(lp.CourierNode(self.learner, replay, counter))
with program.group('evaluator'):
program.add_node(lp.CourierNode(self.evaluator, learner, counter))
if not self._num_caches:
# Use our learner as a single variable source.
sources = [learner]
else:
with program.group('cacher'):
# Create a set of learner caches.
sources = []
for _ in range(self._num_caches):
cacher = program.add_node(
lp.CacherNode(
learner, refresh_interval_ms=2000, stale_after_ms=4000))
sources.append(cacher)
with program.group('actor'):
# Add actors which pull round-robin from our variable sources.
for actor_id in range(self._num_actors):
source = sources[actor_id % len(sources)]
program.add_node(
lp.CourierNode(self.actor, replay, source, counter, actor_id))
return program
|
acme-master
|
acme/agents/tf/mog_mpo/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a (MoG) distributional MPO agent."""
from acme.agents.tf.mog_mpo.agent_distributed import DistributedMoGMPO
from acme.agents.tf.mog_mpo.learning import MoGMPOLearner
from acme.agents.tf.mog_mpo.learning import PolicyEvaluationConfig
from acme.agents.tf.mog_mpo.networks import make_default_networks
|
acme-master
|
acme/agents/tf/mog_mpo/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared helpers for different experiment flavours."""
from typing import Mapping, Sequence
from acme import specs
from acme.tf import networks
from acme.tf import utils as tf2_utils
import numpy as np
import sonnet as snt
def make_default_networks(
environment_spec: specs.EnvironmentSpec,
*,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
policy_init_scale: float = 0.7,
critic_init_scale: float = 1e-3,
critic_num_components: int = 5,
) -> Mapping[str, snt.Module]:
"""Creates networks used by the agent."""
# Unpack the environment spec to get appropriate shapes, dtypes, etc.
act_spec = environment_spec.actions
obs_spec = environment_spec.observations
num_dimensions = np.prod(act_spec.shape, dtype=int)
# Create the observation network and make sure it's a Sonnet module.
observation_network = tf2_utils.batch_concat
observation_network = tf2_utils.to_sonnet_module(observation_network)
# Create the policy network.
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
init_scale=policy_init_scale,
use_tfd_independent=True)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = snt.Sequential([
networks.CriticMultiplexer(action_network=networks.ClipToSpec(act_spec)),
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.GaussianMixtureHead(
num_dimensions=1,
num_components=critic_num_components,
init_scale=critic_init_scale)
])
# Create network variables.
# Get embedding spec by creating observation network variables.
emb_spec = tf2_utils.create_variables(observation_network, [obs_spec])
tf2_utils.create_variables(policy_network, [emb_spec])
tf2_utils.create_variables(critic_network, [emb_spec, act_spec])
return {
'policy': policy_network,
'critic': critic_network,
'observation': observation_network,
}
|
acme-master
|
acme/agents/tf/mog_mpo/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributional MPO with MoG critic learner implementation."""
import dataclasses
import time
from typing import List, Optional
import acme
from acme import types
from acme.tf import losses
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
@dataclasses.dataclass
class PolicyEvaluationConfig:
evaluate_stochastic_policy: bool = True
num_value_samples: int = 128
class MoGMPOLearner(acme.Learner):
"""Distributional (MoG) MPO learner."""
def __init__(
self,
policy_network: snt.Module,
critic_network: snt.Module,
target_policy_network: snt.Module,
target_critic_network: snt.Module,
discount: float,
num_samples: int,
target_policy_update_period: int,
target_critic_update_period: int,
dataset: tf.data.Dataset,
observation_network: snt.Module,
target_observation_network: snt.Module,
policy_evaluation_config: Optional[PolicyEvaluationConfig] = None,
policy_loss_module: Optional[snt.Module] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
dual_optimizer: Optional[snt.Optimizer] = None,
clipping: bool = True,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
# Store online and target networks.
self._policy_network = policy_network
self._critic_network = critic_network
self._observation_network = observation_network
self._target_policy_network = target_policy_network
self._target_critic_network = target_critic_network
self._target_observation_network = target_observation_network
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner')
# Other learner parameters.
self._discount = discount
self._num_samples = num_samples
if policy_evaluation_config is None:
policy_evaluation_config = PolicyEvaluationConfig()
self._policy_evaluation_config = policy_evaluation_config
self._clipping = clipping
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_policy_update_period = target_policy_update_period
self._target_critic_update_period = target_critic_update_period
# Batch dataset and create iterator.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
self._policy_loss_module = policy_loss_module or losses.MPO(
epsilon=1e-1,
epsilon_mean=3e-3,
epsilon_stddev=1e-6,
epsilon_penalty=1e-3,
init_log_temperature=10.,
init_log_alpha_mean=10.,
init_log_alpha_stddev=1000.)
# Create the optimizers.
self._critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
self._policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
self._dual_optimizer = dual_optimizer or snt.optimizers.Adam(1e-2)
# Expose the variables.
policy_network_to_expose = snt.Sequential(
[self._target_observation_network, self._target_policy_network])
self._variables = {
'critic': self._target_critic_network.variables,
'policy': policy_network_to_expose.variables,
}
# Create a checkpointer and snapshotter object.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
subdirectory='mog_mpo_learner',
objects_to_save={
'counter': self._counter,
'policy': self._policy_network,
'critic': self._critic_network,
'observation': self._observation_network,
'target_policy': self._target_policy_network,
'target_critic': self._target_critic_network,
'target_observation': self._target_observation_network,
'policy_optimizer': self._policy_optimizer,
'critic_optimizer': self._critic_optimizer,
'dual_optimizer': self._dual_optimizer,
'policy_loss_module': self._policy_loss_module,
'num_steps': self._num_steps,
})
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={
'policy':
snt.Sequential([
self._target_observation_network,
self._target_policy_network
]),
})
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
@tf.function
def _step(self, inputs: reverb.ReplaySample) -> types.NestedTensor:
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
o_tm1, a_tm1, r_t, d_t, o_t = (inputs.data.observation, inputs.data.action,
inputs.data.reward, inputs.data.discount,
inputs.data.next_observation)
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(self._discount, dtype=d_t.dtype)
with tf.GradientTape(persistent=True) as tape:
# Maybe transform the observation before feeding into policy and critic.
# Transforming the observations this way at the start of the learning
# step effectively means that the policy and critic share observation
# network weights.
o_tm1 = self._observation_network(o_tm1)
# This stop_gradient prevents gradients to propagate into the target
# observation network. In addition, since the online policy network is
# evaluated at o_t, this also means the policy loss does not influence
# the observation network training.
o_t = tf.stop_gradient(self._target_observation_network(o_t))
# Get online and target action distributions from policy networks.
online_action_distribution = self._policy_network(o_t)
target_action_distribution = self._target_policy_network(o_t)
# Sample actions to evaluate policy; of size [N, B, ...].
sampled_actions = target_action_distribution.sample(self._num_samples)
# Tile embedded observations to feed into the target critic network.
# Note: this is more efficient than tiling before the embedding layer.
tiled_o_t = tf2_utils.tile_tensor(o_t, self._num_samples) # [N, B, ...]
# Compute target-estimated distributional value of sampled actions at o_t.
sampled_q_t_distributions = self._target_critic_network(
# Merge batch dimensions; to shape [N*B, ...].
snt.merge_leading_dims(tiled_o_t, num_dims=2),
snt.merge_leading_dims(sampled_actions, num_dims=2))
# Compute online critic value distribution of a_tm1 in state o_tm1.
q_tm1_distribution = self._critic_network(o_tm1, a_tm1) # [B, ...]
# Get the return distributions used in the policy evaluation bootstrap.
if self._policy_evaluation_config.evaluate_stochastic_policy:
z_distributions = sampled_q_t_distributions
num_joint_samples = self._num_samples
else:
z_distributions = self._target_critic_network(
o_t, target_action_distribution.mean())
num_joint_samples = 1
num_value_samples = self._policy_evaluation_config.num_value_samples
num_joint_samples *= num_value_samples
z_samples = z_distributions.sample(num_value_samples)
z_samples = tf.reshape(z_samples, (num_joint_samples, -1, 1))
# Expand dims of reward and discount tensors.
reward = r_t[..., tf.newaxis] # [B, 1]
full_discount = discount * d_t[..., tf.newaxis]
target_q = reward + full_discount * z_samples # [N, B, 1]
target_q = tf.stop_gradient(target_q)
# Compute sample-based cross-entropy.
log_probs_q = q_tm1_distribution.log_prob(target_q) # [N, B, 1]
critic_loss = -tf.reduce_mean(log_probs_q, axis=0) # [B, 1]
critic_loss = tf.reduce_mean(critic_loss)
# Compute Q-values of sampled actions and reshape to [N, B].
sampled_q_values = sampled_q_t_distributions.mean()
sampled_q_values = tf.reshape(sampled_q_values, (self._num_samples, -1))
# Compute MPO policy loss.
policy_loss, policy_stats = self._policy_loss_module(
online_action_distribution=online_action_distribution,
target_action_distribution=target_action_distribution,
actions=sampled_actions,
q_values=sampled_q_values)
policy_loss = tf.reduce_mean(policy_loss)
# For clarity, explicitly define which variables are trained by which loss.
critic_trainable_variables = (
# In this agent, the critic loss trains the observation network.
self._observation_network.trainable_variables +
self._critic_network.trainable_variables)
policy_trainable_variables = self._policy_network.trainable_variables
# The following are the MPO dual variables, stored in the loss module.
dual_trainable_variables = self._policy_loss_module.trainable_variables
# Compute gradients.
critic_gradients = tape.gradient(critic_loss, critic_trainable_variables)
policy_gradients, dual_gradients = tape.gradient(
policy_loss, (policy_trainable_variables, dual_trainable_variables))
# Delete the tape manually because of the persistent=True flag.
del tape
# Maybe clip gradients.
if self._clipping:
policy_gradients = tuple(tf.clip_by_global_norm(policy_gradients, 40.)[0])
critic_gradients = tuple(tf.clip_by_global_norm(critic_gradients, 40.)[0])
# Apply gradients.
self._critic_optimizer.apply(critic_gradients, critic_trainable_variables)
self._policy_optimizer.apply(policy_gradients, policy_trainable_variables)
self._dual_optimizer.apply(dual_gradients, dual_trainable_variables)
# Losses to track.
fetches = {
'critic_loss': critic_loss,
'policy_loss': policy_loss,
}
# Log MPO stats.
fetches.update(policy_stats)
return fetches
def step(self):
self._maybe_update_target_networks()
self._num_steps.assign_add(1)
# Run the learning step.
fetches = self._step(next(self._iterator))
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
fetches.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(fetches)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables[name]) for name in names]
def _maybe_update_target_networks(self):
# Update target network.
online_policy_variables = self._policy_network.variables
target_policy_variables = self._target_policy_network.variables
online_critic_variables = (*self._observation_network.variables,
*self._critic_network.variables)
target_critic_variables = (*self._target_observation_network.variables,
*self._target_critic_network.variables)
# Make online policy -> target policy network update ops.
if tf.math.mod(self._num_steps, self._target_policy_update_period) == 0:
for src, dest in zip(online_policy_variables, target_policy_variables):
dest.assign(src)
# Make online critic -> target critic network update ops.
if tf.math.mod(self._num_steps, self._target_critic_update_period) == 0:
for src, dest in zip(online_critic_variables, target_critic_variables):
dest.assign(src)
|
acme-master
|
acme/agents/tf/mog_mpo/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for the distributed agent."""
from typing import Sequence, Tuple
import acme
from acme import specs
from acme import wrappers
from acme.agents.tf import mompo
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import lp_utils
from dm_control import suite
import launchpad as lp
import numpy as np
import sonnet as snt
import tensorflow as tf
from absl.testing import absltest
from absl.testing import parameterized
def make_networks(
action_spec: specs.BoundedArray,
num_critic_heads: int,
policy_layer_sizes: Sequence[int] = (50,),
critic_layer_sizes: Sequence[int] = (50,),
num_layers_shared: int = 1,
distributional_critic: bool = True,
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
):
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=False,
init_scale=0.69)
])
if not distributional_critic:
critic_layer_sizes = list(critic_layer_sizes) + [1]
if not num_layers_shared:
# No layers are shared
critic_network_base = None
else:
critic_network_base = networks.LayerNormMLP(
critic_layer_sizes[:num_layers_shared], activate_final=True)
critic_network_heads = [
snt.nets.MLP(critic_layer_sizes, activation=tf.nn.elu,
activate_final=False)
for _ in range(num_critic_heads)]
if distributional_critic:
critic_network_heads = [
snt.Sequential([
c, networks.DiscreteValuedHead(vmin, vmax, num_atoms)
]) for c in critic_network_heads]
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = snt.Sequential([
networks.CriticMultiplexer(
critic_network=critic_network_base,
action_network=networks.ClipToSpec(action_spec)),
networks.Multihead(network_heads=critic_network_heads),
])
return {
'policy': policy_network,
'critic': critic_network,
'observation': tf2_utils.batch_concat,
}
def make_environment(evaluation: bool = False):
del evaluation # Unused.
environment = suite.load('cartpole', 'balance')
wrapped = wrappers.SinglePrecisionWrapper(environment)
return wrapped
def compute_action_norm(target_pi_samples: tf.Tensor,
target_q_target_pi_samples: tf.Tensor) -> tf.Tensor:
"""Compute Q-values for the action norm objective from action samples."""
del target_q_target_pi_samples
action_norm = tf.norm(target_pi_samples, ord=2, axis=-1)
return tf.stop_gradient(-1 * action_norm)
def task_reward_fn(observation: tf.Tensor,
action: tf.Tensor,
reward: tf.Tensor) -> tf.Tensor:
del observation, action
return tf.stop_gradient(reward)
def make_objectives() -> Tuple[
Sequence[mompo.RewardObjective], Sequence[mompo.QValueObjective]]:
"""Define the multiple objectives for the policy to learn."""
task_reward = mompo.RewardObjective(
name='task',
reward_fn=task_reward_fn)
action_norm = mompo.QValueObjective(
name='action_norm_q',
qvalue_fn=compute_action_norm)
return [task_reward], [action_norm]
class DistributedAgentTest(parameterized.TestCase):
"""Simple integration/smoke test for the distributed agent."""
@parameterized.named_parameters(
('distributional_critic', True),
('vanilla_critic', False))
def test_agent(self, distributional_critic):
# Create objectives.
reward_objectives, qvalue_objectives = make_objectives()
network_factory = lp_utils.partial_kwargs(
make_networks, distributional_critic=distributional_critic)
agent = mompo.DistributedMultiObjectiveMPO(
reward_objectives,
qvalue_objectives,
environment_factory=make_environment,
network_factory=network_factory,
num_actors=2,
batch_size=32,
min_replay_size=32,
max_replay_size=1000,
)
program = agent.build()
(learner_node,) = program.groups['learner']
learner_node.disable_run()
lp.launch(program, launch_type='test_mt')
learner: acme.Learner = learner_node.create_handle().dereference()
for _ in range(5):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/mompo/agent_distributed_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the multi-objective MPO distributed agent class."""
from typing import Callable, Dict, Optional, Sequence
import acme
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents.tf import actors
from acme.agents.tf.mompo import learning
from acme.tf import losses
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
from acme.utils import counting
from acme.utils import loggers
from acme.utils import lp_utils
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
import tensorflow as tf
MultiObjectiveNetworkFactorySpec = Callable[
[specs.BoundedArray, int], Dict[str, snt.Module]]
MultiObjectivePolicyLossFactorySpec = Callable[[], losses.MultiObjectiveMPO]
class DistributedMultiObjectiveMPO:
"""Program definition for multi-objective MPO.
This agent distinguishes itself from the distributed MPO agent in two ways:
- Allowing for one or more objectives (see `acme/agents/tf/mompo/learning.py`
for details on what form this sequence of objectives should take)
- Optionally using a distributional critic (state-action value approximator)
as in DMPO. In other words, the critic network can output either scalar
Q-values or a DiscreteValuedDistribution.
"""
def __init__(
self,
reward_objectives: Sequence[learning.RewardObjective],
qvalue_objectives: Sequence[learning.QValueObjective],
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: MultiObjectiveNetworkFactorySpec,
num_actors: int = 1,
num_caches: int = 0,
environment_spec: Optional[specs.EnvironmentSpec] = None,
batch_size: int = 512,
prefetch_size: int = 4,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: Optional[float] = None,
n_step: int = 5,
max_in_flight_items: int = 5,
num_samples: int = 20,
additional_discount: float = 0.99,
target_policy_update_period: int = 200,
target_critic_update_period: int = 200,
policy_loss_factory: Optional[MultiObjectivePolicyLossFactorySpec] = None,
max_actor_steps: Optional[int] = None,
log_every: float = 10.0,
):
if environment_spec is None:
environment_spec = specs.make_environment_spec(environment_factory(False))
self._environment_factory = environment_factory
self._network_factory = network_factory
self._policy_loss_factory = policy_loss_factory
self._environment_spec = environment_spec
self._num_actors = num_actors
self._num_caches = num_caches
self._batch_size = batch_size
self._prefetch_size = prefetch_size
self._min_replay_size = min_replay_size
self._max_replay_size = max_replay_size
self._samples_per_insert = samples_per_insert
self._n_step = n_step
self._max_in_flight_items = max_in_flight_items
self._additional_discount = additional_discount
self._num_samples = num_samples
self._target_policy_update_period = target_policy_update_period
self._target_critic_update_period = target_critic_update_period
self._max_actor_steps = max_actor_steps
self._log_every = log_every
self._reward_objectives = reward_objectives
self._qvalue_objectives = qvalue_objectives
self._num_critic_heads = len(self._reward_objectives)
if not self._reward_objectives:
raise ValueError('Must specify at least one reward objective.')
def replay(self):
"""The replay storage."""
if self._samples_per_insert is not None:
# Create enough of an error buffer to give a 10% tolerance in rate.
samples_per_insert_tolerance = 0.1 * self._samples_per_insert
error_buffer = self._min_replay_size * samples_per_insert_tolerance
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._min_replay_size,
samples_per_insert=self._samples_per_insert,
error_buffer=error_buffer)
else:
limiter = reverb.rate_limiters.MinSize(self._min_replay_size)
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._max_replay_size,
rate_limiter=limiter,
signature=adders.NStepTransitionAdder.signature(
self._environment_spec))
return [replay_table]
def counter(self):
return tf2_savers.CheckpointingRunner(counting.Counter(),
time_delta_minutes=1,
subdirectory='counter')
def coordinator(self, counter: counting.Counter, max_actor_steps: int):
return lp_utils.StepsLimiter(counter, max_actor_steps)
def learner(
self,
replay: reverb.Client,
counter: counting.Counter,
):
"""The Learning part of the agent."""
act_spec = self._environment_spec.actions
obs_spec = self._environment_spec.observations
# Create online and target networks.
online_networks = self._network_factory(act_spec, self._num_critic_heads)
target_networks = self._network_factory(act_spec, self._num_critic_heads)
# Make sure observation network is a Sonnet Module.
observation_network = online_networks.get('observation', tf.identity)
target_observation_network = target_networks.get('observation', tf.identity)
observation_network = tf2_utils.to_sonnet_module(observation_network)
target_observation_network = tf2_utils.to_sonnet_module(
target_observation_network)
# Get embedding spec and create observation network variables.
emb_spec = tf2_utils.create_variables(observation_network, [obs_spec])
# Create variables.
tf2_utils.create_variables(online_networks['policy'], [emb_spec])
tf2_utils.create_variables(online_networks['critic'], [emb_spec, act_spec])
tf2_utils.create_variables(target_networks['policy'], [emb_spec])
tf2_utils.create_variables(target_networks['critic'], [emb_spec, act_spec])
tf2_utils.create_variables(target_observation_network, [obs_spec])
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(server_address=replay.server_address)
dataset = dataset.batch(self._batch_size, drop_remainder=True)
dataset = dataset.prefetch(self._prefetch_size)
counter = counting.Counter(counter, 'learner')
logger = loggers.make_default_logger(
'learner', time_delta=self._log_every, steps_key='learner_steps')
# Create policy loss module if a factory is passed.
if self._policy_loss_factory:
policy_loss_module = self._policy_loss_factory()
else:
policy_loss_module = None
# Return the learning agent.
return learning.MultiObjectiveMPOLearner(
reward_objectives=self._reward_objectives,
qvalue_objectives=self._qvalue_objectives,
policy_network=online_networks['policy'],
critic_network=online_networks['critic'],
observation_network=observation_network,
target_policy_network=target_networks['policy'],
target_critic_network=target_networks['critic'],
target_observation_network=target_observation_network,
discount=self._additional_discount,
num_samples=self._num_samples,
target_policy_update_period=self._target_policy_update_period,
target_critic_update_period=self._target_critic_update_period,
policy_loss_module=policy_loss_module,
dataset=dataset,
counter=counter,
logger=logger)
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
) -> acme.EnvironmentLoop:
"""The actor process."""
action_spec = self._environment_spec.actions
observation_spec = self._environment_spec.observations
# Create environment and target networks to act with.
environment = self._environment_factory(False)
agent_networks = self._network_factory(action_spec, self._num_critic_heads)
# Make sure observation network is defined.
observation_network = agent_networks.get('observation', tf.identity)
# Create a stochastic behavior policy.
behavior_network = snt.Sequential([
observation_network,
agent_networks['policy'],
networks.StochasticSamplingHead(),
])
# Ensure network variables are created.
tf2_utils.create_variables(behavior_network, [observation_spec])
policy_variables = {'policy': behavior_network.variables}
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = tf2_variable_utils.VariableClient(
variable_source, policy_variables, update_period=1000)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Component to add things into replay.
adder = adders.NStepTransitionAdder(
client=replay,
n_step=self._n_step,
max_in_flight_items=self._max_in_flight_items,
discount=self._additional_discount)
# Create the agent.
actor = actors.FeedForwardActor(
policy_network=behavior_network,
adder=adder,
variable_client=variable_client)
# Create logger and counter; actors will not spam bigtable.
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor',
save_data=False,
time_delta=self._log_every,
steps_key='actor_steps')
# Create the run loop and return it.
return acme.EnvironmentLoop(
environment, actor, counter, logger)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
):
"""The evaluation process."""
action_spec = self._environment_spec.actions
observation_spec = self._environment_spec.observations
# Create environment and target networks to act with.
environment = self._environment_factory(True)
agent_networks = self._network_factory(action_spec, self._num_critic_heads)
# Make sure observation network is defined.
observation_network = agent_networks.get('observation', tf.identity)
# Create a deterministic behavior policy.
evaluator_modules = [
observation_network,
agent_networks['policy'],
networks.StochasticMeanHead(),
]
if isinstance(action_spec, specs.BoundedArray):
evaluator_modules += [networks.ClipToSpec(action_spec)]
evaluator_network = snt.Sequential(evaluator_modules)
# Ensure network variables are created.
tf2_utils.create_variables(evaluator_network, [observation_spec])
policy_variables = {'policy': evaluator_network.variables}
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = tf2_variable_utils.VariableClient(
variable_source, policy_variables, update_period=1000)
# Make sure not to evaluate a random actor by assigning variables before
# running the environment loop.
variable_client.update_and_wait()
# Create the agent.
evaluator = actors.FeedForwardActor(
policy_network=evaluator_network, variable_client=variable_client)
# Create logger and counter.
counter = counting.Counter(counter, 'evaluator')
logger = loggers.make_default_logger(
'evaluator', time_delta=self._log_every, steps_key='evaluator_steps')
# Create the run loop and return it.
return acme.EnvironmentLoop(
environment, evaluator, counter, logger)
def build(self, name='mompo'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
if self._max_actor_steps:
_ = program.add_node(
lp.CourierNode(self.coordinator, counter, self._max_actor_steps))
with program.group('learner'):
learner = program.add_node(
lp.CourierNode(self.learner, replay, counter))
with program.group('evaluator'):
program.add_node(
lp.CourierNode(self.evaluator, learner, counter))
if not self._num_caches:
# Use our learner as a single variable source.
sources = [learner]
else:
with program.group('cacher'):
# Create a set of learner caches.
sources = []
for _ in range(self._num_caches):
cacher = program.add_node(
lp.CacherNode(
learner, refresh_interval_ms=2000, stale_after_ms=4000))
sources.append(cacher)
with program.group('actor'):
# Add actors which pull round-robin from our variable sources.
for actor_id in range(self._num_actors):
source = sources[actor_id % len(sources)]
program.add_node(lp.CourierNode(self.actor, replay, source, counter))
return program
|
acme-master
|
acme/agents/tf/mompo/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a distributional MPO agent."""
from acme.agents.tf.mompo.agent import MultiObjectiveMPO
from acme.agents.tf.mompo.agent_distributed import DistributedMultiObjectiveMPO
from acme.agents.tf.mompo.learning import MultiObjectiveMPOLearner
from acme.agents.tf.mompo.learning import QValueObjective
from acme.agents.tf.mompo.learning import RewardObjective
|
acme-master
|
acme/agents/tf/mompo/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the multi-objective MPO agent."""
from typing import Dict, Sequence, Tuple
import acme
from acme import specs
from acme.agents.tf import mompo
from acme.testing import fakes
from acme.tf import networks
import numpy as np
import sonnet as snt
import tensorflow as tf
from absl.testing import absltest
from absl.testing import parameterized
def make_networks(
action_spec: specs.Array,
num_critic_heads: int,
policy_layer_sizes: Sequence[int] = (300, 200),
critic_layer_sizes: Sequence[int] = (400, 300),
num_layers_shared: int = 1,
distributional_critic: bool = True,
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
) -> Dict[str, snt.Module]:
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=False,
init_scale=0.69)
])
if not distributional_critic:
critic_layer_sizes = list(critic_layer_sizes) + [1]
if not num_layers_shared:
# No layers are shared
critic_network_base = None
else:
critic_network_base = networks.LayerNormMLP(
critic_layer_sizes[:num_layers_shared], activate_final=True)
critic_network_heads = [
snt.nets.MLP(critic_layer_sizes, activation=tf.nn.elu,
activate_final=False)
for _ in range(num_critic_heads)]
if distributional_critic:
critic_network_heads = [
snt.Sequential([
c, networks.DiscreteValuedHead(vmin, vmax, num_atoms)
]) for c in critic_network_heads]
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = snt.Sequential([
networks.CriticMultiplexer(
critic_network=critic_network_base),
networks.Multihead(network_heads=critic_network_heads),
])
return {
'policy': policy_network,
'critic': critic_network,
}
def compute_action_norm(target_pi_samples: tf.Tensor,
target_q_target_pi_samples: tf.Tensor) -> tf.Tensor:
"""Compute Q-values for the action norm objective from action samples."""
del target_q_target_pi_samples
action_norm = tf.norm(target_pi_samples, ord=2, axis=-1)
return tf.stop_gradient(-1 * action_norm)
def task_reward_fn(observation: tf.Tensor,
action: tf.Tensor,
reward: tf.Tensor) -> tf.Tensor:
del observation, action
return tf.stop_gradient(reward)
def make_objectives() -> Tuple[
Sequence[mompo.RewardObjective], Sequence[mompo.QValueObjective]]:
"""Define the multiple objectives for the policy to learn."""
task_reward = mompo.RewardObjective(
name='task',
reward_fn=task_reward_fn)
action_norm = mompo.QValueObjective(
name='action_norm_q',
qvalue_fn=compute_action_norm)
return [task_reward], [action_norm]
class MOMPOTest(parameterized.TestCase):
@parameterized.named_parameters(
('distributional_critic', True),
('vanilla_critic', False))
def test_mompo(self, distributional_critic):
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(episode_length=10)
spec = specs.make_environment_spec(environment)
# Create objectives.
reward_objectives, qvalue_objectives = make_objectives()
num_critic_heads = len(reward_objectives)
# Create networks.
agent_networks = make_networks(
spec.actions, num_critic_heads=num_critic_heads,
distributional_critic=distributional_critic)
# Construct the agent.
agent = mompo.MultiObjectiveMPO(
reward_objectives,
qvalue_objectives,
spec,
policy_network=agent_networks['policy'],
critic_network=agent_networks['critic'],
batch_size=10,
samples_per_insert=2,
min_replay_size=10)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=2)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/mompo/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-objective MPO agent implementation."""
import copy
from typing import Optional, Sequence
from acme import datasets
from acme import specs
from acme import types
from acme.adders import reverb as adders
from acme.agents import agent
from acme.agents.tf import actors
from acme.agents.tf.mompo import learning
from acme.tf import losses
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import reverb
import sonnet as snt
import tensorflow as tf
class MultiObjectiveMPO(agent.Agent):
"""Multi-objective MPO Agent.
This implements a single-process multi-objective MPO agent. This is an
actor-critic algorithm that generates data via a behavior policy, inserts
N-step transitions into a replay buffer, and periodically updates the policy
(and as a result the behavior) by sampling uniformly from this buffer.
This agent distinguishes itself from the MPO agent in two ways:
- Allowing for one or more objectives (see `acme/agents/tf/mompo/learning.py`
for details on what form this sequence of objectives should take)
- Optionally using a distributional critic (state-action value approximator)
as in DMPO. In other words, the critic network can output either scalar
Q-values or a DiscreteValuedDistribution.
"""
def __init__(self,
reward_objectives: Sequence[learning.RewardObjective],
qvalue_objectives: Sequence[learning.QValueObjective],
environment_spec: specs.EnvironmentSpec,
policy_network: snt.Module,
critic_network: snt.Module,
observation_network: types.TensorTransformation = tf.identity,
discount: float = 0.99,
batch_size: int = 512,
prefetch_size: int = 4,
target_policy_update_period: int = 200,
target_critic_update_period: int = 200,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: float = 16.,
policy_loss_module: Optional[losses.MultiObjectiveMPO] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
n_step: int = 5,
num_samples: int = 20,
clipping: bool = True,
logger: Optional[loggers.Logger] = None,
counter: Optional[counting.Counter] = None,
checkpoint: bool = True,
replay_table_name: str = adders.DEFAULT_PRIORITY_TABLE):
"""Initialize the agent.
Args:
reward_objectives: list of the objectives that the policy should optimize;
each objective is defined by its reward function
qvalue_objectives: list of the objectives that the policy should optimize;
each objective is defined by its Q-value function
environment_spec: description of the actions, observations, etc.
policy_network: the online (optimized) policy.
critic_network: the online critic.
observation_network: optional network to transform the observations before
they are fed into any network.
discount: discount to use for TD updates.
batch_size: batch size for updates.
prefetch_size: size to prefetch from replay.
target_policy_update_period: number of updates to perform before updating
the target policy network.
target_critic_update_period: number of updates to perform before updating
the target critic network.
min_replay_size: minimum replay size before updating.
max_replay_size: maximum replay size.
samples_per_insert: number of samples to take from replay for every insert
that is made.
policy_loss_module: configured MO-MPO loss function for the policy
optimization; defaults to sensible values on the control suite.
See `acme/tf/losses/mompo.py` for more details.
policy_optimizer: optimizer to be used on the policy.
critic_optimizer: optimizer to be used on the critic.
n_step: number of steps to squash into a single transition.
num_samples: number of actions to sample when doing a Monte Carlo
integration with respect to the policy.
clipping: whether to clip gradients by global norm.
logger: logging object used to write to logs.
counter: counter object used to keep track of steps.
checkpoint: boolean indicating whether to checkpoint the learner.
replay_table_name: string indicating what name to give the replay table.
"""
# Check that at least one objective's reward function is specified.
if not reward_objectives:
raise ValueError('Must specify at least one reward objective.')
# Create a replay server to add data to.
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=max_replay_size,
rate_limiter=reverb.rate_limiters.MinSize(min_size_to_sample=1),
signature=adders.NStepTransitionAdder.signature(environment_spec))
self._server = reverb.Server([replay_table], port=None)
# The adder is used to insert observations into replay.
address = f'localhost:{self._server.port}'
adder = adders.NStepTransitionAdder(
client=reverb.Client(address),
n_step=n_step,
discount=discount)
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
table=replay_table_name,
server_address=address,
batch_size=batch_size,
prefetch_size=prefetch_size)
# Make sure observation network is a Sonnet Module.
observation_network = tf2_utils.to_sonnet_module(observation_network)
# Create target networks before creating online/target network variables.
target_policy_network = copy.deepcopy(policy_network)
target_critic_network = copy.deepcopy(critic_network)
target_observation_network = copy.deepcopy(observation_network)
# Get observation and action specs.
act_spec = environment_spec.actions
obs_spec = environment_spec.observations
emb_spec = tf2_utils.create_variables(observation_network, [obs_spec])
# Create the behavior policy.
behavior_network = snt.Sequential([
observation_network,
policy_network,
networks.StochasticSamplingHead(),
])
# Create variables.
tf2_utils.create_variables(policy_network, [emb_spec])
tf2_utils.create_variables(critic_network, [emb_spec, act_spec])
tf2_utils.create_variables(target_policy_network, [emb_spec])
tf2_utils.create_variables(target_critic_network, [emb_spec, act_spec])
tf2_utils.create_variables(target_observation_network, [obs_spec])
# Create the actor which defines how we take actions.
actor = actors.FeedForwardActor(
policy_network=behavior_network, adder=adder)
# Create optimizers.
policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
# The learner updates the parameters (and initializes them).
learner = learning.MultiObjectiveMPOLearner(
reward_objectives=reward_objectives,
qvalue_objectives=qvalue_objectives,
policy_network=policy_network,
critic_network=critic_network,
observation_network=observation_network,
target_policy_network=target_policy_network,
target_critic_network=target_critic_network,
target_observation_network=target_observation_network,
policy_loss_module=policy_loss_module,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
clipping=clipping,
discount=discount,
num_samples=num_samples,
target_policy_update_period=target_policy_update_period,
target_critic_update_period=target_critic_update_period,
dataset=dataset,
logger=logger,
counter=counter,
checkpoint=checkpoint)
super().__init__(
actor=actor,
learner=learner,
min_observations=max(batch_size, min_replay_size),
observations_per_step=float(batch_size) / samples_per_insert)
|
acme-master
|
acme/agents/tf/mompo/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-objective MPO learner implementation."""
import dataclasses
import time
from typing import Callable, List, Optional, Sequence
import acme
from acme import types
from acme.tf import losses
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow as tf
import trfl
QValueFunctionSpec = Callable[[tf.Tensor, tf.Tensor], tf.Tensor]
RewardFunctionSpec = Callable[[tf.Tensor, tf.Tensor, tf.Tensor], tf.Tensor]
_DEFAULT_EPSILON = 1e-1
_DEFAULT_EPSILON_MEAN = 1e-3
_DEFAULT_EPSILON_STDDEV = 1e-6
_DEFAULT_INIT_LOG_TEMPERATURE = 1.
_DEFAULT_INIT_LOG_ALPHA_MEAN = 1.
_DEFAULT_INIT_LOG_ALPHA_STDDEV = 10.
@dataclasses.dataclass
class QValueObjective:
"""Defines an objective by specifying its 'Q-values' directly."""
name: str
# This computes "Q-values" directly from the sampled actions and other Q's.
qvalue_fn: QValueFunctionSpec
@dataclasses.dataclass
class RewardObjective:
"""Defines an objective by specifying its reward function."""
name: str
# This computes the reward from observations, actions, and environment task
# reward. In the learner, a head will automatically be added to the critic
# network, to learn Q-values for this objective.
reward_fn: RewardFunctionSpec
class MultiObjectiveMPOLearner(acme.Learner):
"""Distributional MPO learner.
This is the learning component of a multi-objective MPO (MO-MPO) agent. Two
sequences of objectives must be specified. Otherwise, the inputs are identical
to those of the MPO / DMPO learners.
Each objective must be defined as either a RewardObjective or an
QValueObjective. These objectives are provided by the reward_objectives and
qvalue_objectives parameters, respectively. For each RewardObjective, a critic
will be trained to estimate Q-values for that objective. Whereas for each
QValueObjective, the Q-values are computed directly by its qvalue_fn.
A RewardObjective's reward_fn takes the observation, action, and environment
reward as input, and returns the reward for that objective. For example, if
the environment reward is a scalar, then an objective corresponding to the =
task would simply return the environment reward.
A QValueObjective's qvalue_fn takes the actions and reward-based objectives'
Q-values as input, and outputs the "Q-values" for that objective. For
instance, in the MO-MPO paper ([Abdolmaleki, Huang et al., 2020]), the action
norm objective in the Humanoid run task is defined by setting the qvalue_fn
to be the l2-norm of the actions.
Note: If there is only one objective and that is the task reward, then this
algorithm becomes exactly the same as (D)MPO.
(Abdolmaleki, Huang et al., 2020): https://arxiv.org/pdf/2005.07513.pdf
"""
def __init__(
self,
reward_objectives: Sequence[RewardObjective],
qvalue_objectives: Sequence[QValueObjective],
policy_network: snt.Module,
critic_network: snt.Module,
target_policy_network: snt.Module,
target_critic_network: snt.Module,
discount: float,
num_samples: int,
target_policy_update_period: int,
target_critic_update_period: int,
dataset: tf.data.Dataset,
observation_network: types.TensorTransformation = tf.identity,
target_observation_network: types.TensorTransformation = tf.identity,
policy_loss_module: Optional[losses.MultiObjectiveMPO] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
dual_optimizer: Optional[snt.Optimizer] = None,
clipping: bool = True,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
# Store online and target networks.
self._policy_network = policy_network
self._critic_network = critic_network
self._target_policy_network = target_policy_network
self._target_critic_network = target_critic_network
# Make sure observation networks are snt.Module's so they have variables.
self._observation_network = tf2_utils.to_sonnet_module(observation_network)
self._target_observation_network = tf2_utils.to_sonnet_module(
target_observation_network)
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner')
# Other learner parameters.
self._discount = discount
self._num_samples = num_samples
self._clipping = clipping
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_policy_update_period = target_policy_update_period
self._target_critic_update_period = target_critic_update_period
# Batch dataset and create iterator.
# TODO(b/155086959): Fix type stubs and remove.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# Store objectives
self._reward_objectives = reward_objectives
self._qvalue_objectives = qvalue_objectives
if self._qvalue_objectives is None:
self._qvalue_objectives = []
self._num_critic_heads = len(self._reward_objectives) # C
self._objective_names = (
[x.name for x in self._reward_objectives] +
[x.name for x in self._qvalue_objectives])
self._policy_loss_module = policy_loss_module or losses.MultiObjectiveMPO(
epsilons=[losses.KLConstraint(name, _DEFAULT_EPSILON)
for name in self._objective_names],
epsilon_mean=_DEFAULT_EPSILON_MEAN,
epsilon_stddev=_DEFAULT_EPSILON_STDDEV,
init_log_temperature=_DEFAULT_INIT_LOG_TEMPERATURE,
init_log_alpha_mean=_DEFAULT_INIT_LOG_ALPHA_MEAN,
init_log_alpha_stddev=_DEFAULT_INIT_LOG_ALPHA_STDDEV)
# Check that ordering of objectives matches the policy_loss_module's
if self._objective_names != list(self._policy_loss_module.objective_names):
raise ValueError("Agent's ordering of objectives doesn't match "
"the policy loss module's ordering of epsilons.")
# Create the optimizers.
self._critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
self._policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
self._dual_optimizer = dual_optimizer or snt.optimizers.Adam(1e-2)
# Expose the variables.
policy_network_to_expose = snt.Sequential(
[self._target_observation_network, self._target_policy_network])
self._variables = {
'critic': self._target_critic_network.variables,
'policy': policy_network_to_expose.variables,
}
# Create a checkpointer and snapshotter object.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
subdirectory='mompo_learner',
objects_to_save={
'counter': self._counter,
'policy': self._policy_network,
'critic': self._critic_network,
'observation': self._observation_network,
'target_policy': self._target_policy_network,
'target_critic': self._target_critic_network,
'target_observation': self._target_observation_network,
'policy_optimizer': self._policy_optimizer,
'critic_optimizer': self._critic_optimizer,
'dual_optimizer': self._dual_optimizer,
'policy_loss_module': self._policy_loss_module,
'num_steps': self._num_steps,
})
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={
'policy':
snt.Sequential([
self._target_observation_network,
self._target_policy_network
]),
})
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp: float = None
@tf.function
def _step(self) -> types.NestedTensor:
# Update target network.
online_policy_variables = self._policy_network.variables
target_policy_variables = self._target_policy_network.variables
online_critic_variables = (
*self._observation_network.variables,
*self._critic_network.variables,
)
target_critic_variables = (
*self._target_observation_network.variables,
*self._target_critic_network.variables,
)
# Make online policy -> target policy network update ops.
if tf.math.mod(self._num_steps, self._target_policy_update_period) == 0:
for src, dest in zip(online_policy_variables, target_policy_variables):
dest.assign(src)
# Make online critic -> target critic network update ops.
if tf.math.mod(self._num_steps, self._target_critic_update_period) == 0:
for src, dest in zip(online_critic_variables, target_critic_variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
inputs = next(self._iterator)
transitions: types.Transition = inputs.data
with tf.GradientTape(persistent=True) as tape:
# Maybe transform the observation before feeding into policy and critic.
# Transforming the observations this way at the start of the learning
# step effectively means that the policy and critic share observation
# network weights.
o_tm1 = self._observation_network(transitions.observation)
# This stop_gradient prevents gradients to propagate into the target
# observation network. In addition, since the online policy network is
# evaluated at o_t, this also means the policy loss does not influence
# the observation network training.
o_t = tf.stop_gradient(
self._target_observation_network(transitions.next_observation))
# Get online and target action distributions from policy networks.
online_action_distribution = self._policy_network(o_t)
target_action_distribution = self._target_policy_network(o_t)
# Sample actions to evaluate policy; of size [N, B, ...].
sampled_actions = target_action_distribution.sample(self._num_samples)
# Tile embedded observations to feed into the target critic network.
# Note: this is more efficient than tiling before the embedding layer.
tiled_o_t = tf2_utils.tile_tensor(o_t, self._num_samples) # [N, B, ...]
# Compute target-estimated distributional value of sampled actions at o_t.
sampled_q_t_all = self._target_critic_network(
# Merge batch dimensions; to shape [N*B, ...].
snt.merge_leading_dims(tiled_o_t, num_dims=2),
snt.merge_leading_dims(sampled_actions, num_dims=2))
# Compute online critic value distribution of a_tm1 in state o_tm1.
q_tm1_all = self._critic_network(o_tm1, transitions.action)
# Compute rewards for objectives with defined reward_fn
reward_stats = {}
r_t_all = []
for objective in self._reward_objectives:
r = objective.reward_fn(o_tm1, transitions.action, transitions.reward)
reward_stats['{}_reward'.format(objective.name)] = tf.reduce_mean(r)
r_t_all.append(r)
r_t_all = tf.stack(r_t_all, axis=-1)
r_t_all.get_shape().assert_has_rank(2) # [B, C]
if isinstance(sampled_q_t_all, list): # Distributional critics
critic_loss, sampled_q_t = _compute_distributional_critic_loss(
sampled_q_t_all, q_tm1_all, r_t_all, transitions.discount,
self._discount, self._num_samples)
else:
critic_loss, sampled_q_t = _compute_critic_loss(
sampled_q_t_all, q_tm1_all, r_t_all, transitions.discount,
self._discount, self._num_samples, self._num_critic_heads)
# Add sampled Q-values for objectives with defined qvalue_fn
sampled_q_t_k = [sampled_q_t]
for objective in self._qvalue_objectives:
sampled_q_t_k.append(tf.expand_dims(tf.stop_gradient(
objective.qvalue_fn(sampled_actions, sampled_q_t)), axis=-1))
sampled_q_t_k = tf.concat(sampled_q_t_k, axis=-1) # [N, B, K]
# Compute MPO policy loss.
policy_loss, policy_stats = self._policy_loss_module(
online_action_distribution=online_action_distribution,
target_action_distribution=target_action_distribution,
actions=sampled_actions,
q_values=sampled_q_t_k)
# For clarity, explicitly define which variables are trained by which loss.
critic_trainable_variables = (
# In this agent, the critic loss trains the observation network.
self._observation_network.trainable_variables +
self._critic_network.trainable_variables)
policy_trainable_variables = self._policy_network.trainable_variables
# The following are the MPO dual variables, stored in the loss module.
dual_trainable_variables = self._policy_loss_module.trainable_variables
# Compute gradients.
critic_gradients = tape.gradient(critic_loss, critic_trainable_variables)
policy_gradients, dual_gradients = tape.gradient(
policy_loss, (policy_trainable_variables, dual_trainable_variables))
# Delete the tape manually because of the persistent=True flag.
del tape
# Maybe clip gradients.
if self._clipping:
policy_gradients = tuple(tf.clip_by_global_norm(policy_gradients, 40.)[0])
critic_gradients = tuple(tf.clip_by_global_norm(critic_gradients, 40.)[0])
# Apply gradients.
self._critic_optimizer.apply(critic_gradients, critic_trainable_variables)
self._policy_optimizer.apply(policy_gradients, policy_trainable_variables)
self._dual_optimizer.apply(dual_gradients, dual_trainable_variables)
# Losses to track.
fetches = {
'critic_loss': critic_loss,
'policy_loss': policy_loss,
}
fetches.update(policy_stats) # Log MPO stats.
fetches.update(reward_stats) # Log reward stats.
return fetches
def step(self):
# Run the learning step.
fetches = self._step()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
fetches.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(fetches)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables[name]) for name in names]
def _compute_distributional_critic_loss(
sampled_q_t_all: List[tf.Tensor],
q_tm1_all: List[tf.Tensor],
r_t_all: tf.Tensor,
d_t: tf.Tensor,
discount: float,
num_samples: int):
"""Compute loss and sampled Q-values for distributional critics."""
# Compute average logits by first reshaping them and normalizing them
# across atoms.
batch_size = r_t_all.get_shape()[0]
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(discount, dtype=d_t.dtype)
critic_losses = []
sampled_q_ts = []
for idx, (sampled_q_t_distributions, q_tm1_distribution) in enumerate(
zip(sampled_q_t_all, q_tm1_all)):
# Compute loss for distributional critic for objective c
sampled_logits = tf.reshape(
sampled_q_t_distributions.logits,
[num_samples, batch_size, -1]) # [N, B, A]
sampled_logprobs = tf.math.log_softmax(sampled_logits, axis=-1)
averaged_logits = tf.reduce_logsumexp(sampled_logprobs, axis=0)
# Construct the expected distributional value for bootstrapping.
q_t_distribution = networks.DiscreteValuedDistribution(
values=sampled_q_t_distributions.values, logits=averaged_logits)
# Compute critic distributional loss.
critic_loss = losses.categorical(
q_tm1_distribution, r_t_all[:, idx], discount * d_t,
q_t_distribution)
critic_losses.append(tf.reduce_mean(critic_loss))
# Compute Q-values of sampled actions and reshape to [N, B].
sampled_q_ts.append(tf.reshape(
sampled_q_t_distributions.mean(), (num_samples, -1)))
critic_loss = tf.reduce_mean(critic_losses)
sampled_q_t = tf.stack(sampled_q_ts, axis=-1) # [N, B, C]
return critic_loss, sampled_q_t
def _compute_critic_loss(
sampled_q_t_all: tf.Tensor,
q_tm1_all: tf.Tensor,
r_t_all: tf.Tensor,
d_t: tf.Tensor,
discount: float,
num_samples: int,
num_critic_heads: int):
"""Compute loss and sampled Q-values for (non-distributional) critics."""
# Reshape Q-value samples back to original batch dimensions and average
# them to compute the TD-learning bootstrap target.
batch_size = r_t_all.get_shape()[0]
sampled_q_t = tf.reshape(
sampled_q_t_all,
(num_samples, batch_size, num_critic_heads)) # [N,B,C]
q_t = tf.reduce_mean(sampled_q_t, axis=0) # [B, C]
# Flatten q_t and q_tm1; necessary for trfl.td_learning
q_t = tf.reshape(q_t, [-1]) # [B*C]
q_tm1 = tf.reshape(q_tm1_all, [-1]) # [B*C]
# Flatten r_t_all; necessary for trfl.td_learning
r_t_all = tf.reshape(r_t_all, [-1]) # [B*C]
# Broadcast and then flatten d_t, to match shape of q_t and q_tm1
d_t = tf.tile(d_t, [num_critic_heads]) # [B*C]
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(discount, dtype=d_t.dtype)
# Critic loss.
critic_loss = trfl.td_learning(q_tm1, r_t_all, discount * d_t, q_t).loss
critic_loss = tf.reduce_mean(critic_loss)
return critic_loss, sampled_q_t
|
acme-master
|
acme/agents/tf/mompo/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for the distributed agent."""
from typing import Sequence
import acme
from acme import specs
from acme.agents.tf import dmpo
from acme.testing import fakes
from acme.tf import networks
from acme.tf import utils as tf2_utils
import launchpad as lp
import numpy as np
import sonnet as snt
from absl.testing import absltest
def make_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (50,),
critic_layer_sizes: Sequence[int] = (50,),
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
):
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=True,
init_scale=0.3,
fixed_scale=True,
use_tfd_independent=False)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = networks.CriticMultiplexer(
critic_network=networks.LayerNormMLP(
critic_layer_sizes, activate_final=True),
action_network=networks.ClipToSpec(action_spec))
critic_network = snt.Sequential(
[critic_network,
networks.DiscreteValuedHead(vmin, vmax, num_atoms)])
return {
'policy': policy_network,
'critic': critic_network,
'observation': tf2_utils.batch_concat,
}
class DistributedAgentTest(absltest.TestCase):
"""Simple integration/smoke test for the distributed agent."""
def test_agent(self):
agent = dmpo.DistributedDistributionalMPO(
environment_factory=lambda x: fakes.ContinuousEnvironment(bounded=True),
network_factory=make_networks,
num_actors=2,
batch_size=32,
min_replay_size=32,
max_replay_size=1000,
)
program = agent.build()
(learner_node,) = program.groups['learner']
learner_node.disable_run()
lp.launch(program, launch_type='test_mt')
learner: acme.Learner = learner_node.create_handle().dereference()
for _ in range(5):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/dmpo/agent_distributed_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the distributional MPO distributed agent class."""
from typing import Callable, Dict, Optional, Sequence
import acme
from acme import datasets
from acme import specs
from acme import types
from acme.adders import reverb as adders
from acme.agents.tf import actors
from acme.agents.tf.dmpo import learning
from acme.datasets import image_augmentation
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
from acme.utils import counting
from acme.utils import loggers
from acme.utils import lp_utils
from acme.utils import observers as observers_lib
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
import tensorflow as tf
class DistributedDistributionalMPO:
"""Program definition for distributional MPO."""
def __init__(
self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[specs.BoundedArray], Dict[str, snt.Module]],
num_actors: int = 1,
num_caches: int = 0,
environment_spec: Optional[specs.EnvironmentSpec] = None,
batch_size: int = 256,
prefetch_size: int = 4,
observation_augmentation: Optional[types.TensorTransformation] = None,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: Optional[float] = 32.0,
n_step: int = 5,
num_samples: int = 20,
additional_discount: float = 0.99,
target_policy_update_period: int = 100,
target_critic_update_period: int = 100,
variable_update_period: int = 1000,
policy_loss_factory: Optional[Callable[[], snt.Module]] = None,
max_actor_steps: Optional[int] = None,
log_every: float = 10.0,
make_observers: Optional[Callable[
[], Sequence[observers_lib.EnvLoopObserver]]] = None):
if environment_spec is None:
environment_spec = specs.make_environment_spec(environment_factory(False))
self._environment_factory = environment_factory
self._network_factory = network_factory
self._policy_loss_factory = policy_loss_factory
self._environment_spec = environment_spec
self._num_actors = num_actors
self._num_caches = num_caches
self._batch_size = batch_size
self._prefetch_size = prefetch_size
self._observation_augmentation = observation_augmentation
self._min_replay_size = min_replay_size
self._max_replay_size = max_replay_size
self._samples_per_insert = samples_per_insert
self._n_step = n_step
self._additional_discount = additional_discount
self._num_samples = num_samples
self._target_policy_update_period = target_policy_update_period
self._target_critic_update_period = target_critic_update_period
self._variable_update_period = variable_update_period
self._max_actor_steps = max_actor_steps
self._log_every = log_every
self._make_observers = make_observers
def replay(self):
"""The replay storage."""
if self._samples_per_insert is not None:
# Create enough of an error buffer to give a 10% tolerance in rate.
samples_per_insert_tolerance = 0.1 * self._samples_per_insert
error_buffer = self._min_replay_size * samples_per_insert_tolerance
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._min_replay_size,
samples_per_insert=self._samples_per_insert,
error_buffer=error_buffer)
else:
limiter = reverb.rate_limiters.MinSize(self._min_replay_size)
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._max_replay_size,
rate_limiter=limiter,
signature=adders.NStepTransitionAdder.signature(
self._environment_spec))
return [replay_table]
def counter(self):
return tf2_savers.CheckpointingRunner(counting.Counter(),
time_delta_minutes=1,
subdirectory='counter')
def coordinator(self, counter: counting.Counter, max_actor_steps: int):
return lp_utils.StepsLimiter(counter, max_actor_steps)
def learner(
self,
replay: reverb.Client,
counter: counting.Counter,
):
"""The Learning part of the agent."""
act_spec = self._environment_spec.actions
obs_spec = self._environment_spec.observations
# Create online and target networks.
online_networks = self._network_factory(act_spec)
target_networks = self._network_factory(act_spec)
# Make sure observation network is a Sonnet Module.
observation_network = online_networks.get('observation', tf.identity)
target_observation_network = target_networks.get('observation', tf.identity)
observation_network = tf2_utils.to_sonnet_module(observation_network)
target_observation_network = tf2_utils.to_sonnet_module(
target_observation_network)
# Get embedding spec and create observation network variables.
emb_spec = tf2_utils.create_variables(observation_network, [obs_spec])
# Create variables.
tf2_utils.create_variables(online_networks['policy'], [emb_spec])
tf2_utils.create_variables(online_networks['critic'], [emb_spec, act_spec])
tf2_utils.create_variables(target_networks['policy'], [emb_spec])
tf2_utils.create_variables(target_networks['critic'], [emb_spec, act_spec])
tf2_utils.create_variables(target_observation_network, [obs_spec])
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(server_address=replay.server_address)
dataset = dataset.batch(self._batch_size, drop_remainder=True)
if self._observation_augmentation:
transform = image_augmentation.make_transform(
observation_transform=self._observation_augmentation)
dataset = dataset.map(
transform, num_parallel_calls=16, deterministic=False)
dataset = dataset.prefetch(self._prefetch_size)
counter = counting.Counter(counter, 'learner')
logger = loggers.make_default_logger(
'learner', time_delta=self._log_every, steps_key='learner_steps')
# Create policy loss module if a factory is passed.
if self._policy_loss_factory:
policy_loss_module = self._policy_loss_factory()
else:
policy_loss_module = None
# Return the learning agent.
return learning.DistributionalMPOLearner(
policy_network=online_networks['policy'],
critic_network=online_networks['critic'],
observation_network=observation_network,
target_policy_network=target_networks['policy'],
target_critic_network=target_networks['critic'],
target_observation_network=target_observation_network,
discount=self._additional_discount,
num_samples=self._num_samples,
target_policy_update_period=self._target_policy_update_period,
target_critic_update_period=self._target_critic_update_period,
policy_loss_module=policy_loss_module,
dataset=dataset,
counter=counter,
logger=logger)
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
actor_id: int,
) -> acme.EnvironmentLoop:
"""The actor process."""
action_spec = self._environment_spec.actions
observation_spec = self._environment_spec.observations
# Create environment and target networks to act with.
environment = self._environment_factory(False)
agent_networks = self._network_factory(action_spec)
# Make sure observation network is defined.
observation_network = agent_networks.get('observation', tf.identity)
# Create a stochastic behavior policy.
behavior_network = snt.Sequential([
observation_network,
agent_networks['policy'],
networks.StochasticSamplingHead(),
])
# Ensure network variables are created.
tf2_utils.create_variables(behavior_network, [observation_spec])
policy_variables = {'policy': behavior_network.variables}
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = tf2_variable_utils.VariableClient(
variable_source,
policy_variables,
update_period=self._variable_update_period)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Component to add things into replay.
adder = adders.NStepTransitionAdder(
client=replay,
n_step=self._n_step,
discount=self._additional_discount)
# Create the agent.
actor = actors.FeedForwardActor(
policy_network=behavior_network,
adder=adder,
variable_client=variable_client)
# Create logger and counter; only the first actor stores logs to bigtable.
save_data = actor_id == 0
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor',
save_data=save_data,
time_delta=self._log_every,
steps_key='actor_steps')
observers = self._make_observers() if self._make_observers else ()
# Create the run loop and return it.
return acme.EnvironmentLoop(
environment, actor, counter, logger, observers=observers)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
):
"""The evaluation process."""
action_spec = self._environment_spec.actions
observation_spec = self._environment_spec.observations
# Create environment and target networks to act with.
environment = self._environment_factory(True)
agent_networks = self._network_factory(action_spec)
# Make sure observation network is defined.
observation_network = agent_networks.get('observation', tf.identity)
# Create a stochastic behavior policy.
evaluator_network = snt.Sequential([
observation_network,
agent_networks['policy'],
networks.StochasticMeanHead(),
])
# Ensure network variables are created.
tf2_utils.create_variables(evaluator_network, [observation_spec])
policy_variables = {'policy': evaluator_network.variables}
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = tf2_variable_utils.VariableClient(
variable_source,
policy_variables,
update_period=self._variable_update_period)
# Make sure not to evaluate a random actor by assigning variables before
# running the environment loop.
variable_client.update_and_wait()
# Create the agent.
evaluator = actors.FeedForwardActor(
policy_network=evaluator_network, variable_client=variable_client)
# Create logger and counter.
counter = counting.Counter(counter, 'evaluator')
logger = loggers.make_default_logger(
'evaluator', time_delta=self._log_every, steps_key='evaluator_steps')
observers = self._make_observers() if self._make_observers else ()
# Create the run loop and return it.
return acme.EnvironmentLoop(
environment,
evaluator,
counter,
logger,
observers=observers)
def build(self, name='dmpo'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
if self._max_actor_steps:
_ = program.add_node(
lp.CourierNode(self.coordinator, counter, self._max_actor_steps))
with program.group('learner'):
learner = program.add_node(
lp.CourierNode(self.learner, replay, counter))
with program.group('evaluator'):
program.add_node(
lp.CourierNode(self.evaluator, learner, counter))
if not self._num_caches:
# Use our learner as a single variable source.
sources = [learner]
else:
with program.group('cacher'):
# Create a set of learner caches.
sources = []
for _ in range(self._num_caches):
cacher = program.add_node(
lp.CacherNode(
learner, refresh_interval_ms=2000, stale_after_ms=4000))
sources.append(cacher)
with program.group('actor'):
# Add actors which pull round-robin from our variable sources.
for actor_id in range(self._num_actors):
source = sources[actor_id % len(sources)]
program.add_node(
lp.CourierNode(self.actor, replay, source, counter, actor_id))
return program
|
acme-master
|
acme/agents/tf/dmpo/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a distributional MPO agent."""
from acme.agents.tf.dmpo.agent import DistributionalMPO
from acme.agents.tf.dmpo.agent_distributed import DistributedDistributionalMPO
from acme.agents.tf.dmpo.learning import DistributionalMPOLearner
|
acme-master
|
acme/agents/tf/dmpo/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the distributional MPO agent."""
from typing import Dict, Sequence
import acme
from acme import specs
from acme.agents.tf import dmpo
from acme.testing import fakes
from acme.tf import networks
import numpy as np
import sonnet as snt
from absl.testing import absltest
def make_networks(
action_spec: specs.Array,
policy_layer_sizes: Sequence[int] = (300, 200),
critic_layer_sizes: Sequence[int] = (400, 300),
) -> Dict[str, snt.Module]:
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
critic_layer_sizes = list(critic_layer_sizes)
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes),
networks.MultivariateNormalDiagHead(num_dimensions),
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = snt.Sequential([
networks.CriticMultiplexer(
critic_network=networks.LayerNormMLP(critic_layer_sizes)),
networks.DiscreteValuedHead(0., 1., 10),
])
return {
'policy': policy_network,
'critic': critic_network,
}
class DMPOTest(absltest.TestCase):
def test_dmpo(self):
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(episode_length=10)
spec = specs.make_environment_spec(environment)
# Create networks.
agent_networks = make_networks(spec.actions)
# Construct the agent.
agent = dmpo.DistributionalMPO(
spec,
policy_network=agent_networks['policy'],
critic_network=agent_networks['critic'],
batch_size=10,
samples_per_insert=2,
min_replay_size=10)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=2)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/dmpo/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributional MPO agent implementation."""
import copy
from typing import Optional
from acme import datasets
from acme import specs
from acme import types
from acme.adders import reverb as adders
from acme.agents import agent
from acme.agents.tf import actors
from acme.agents.tf.dmpo import learning
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import reverb
import sonnet as snt
import tensorflow as tf
class DistributionalMPO(agent.Agent):
"""Distributional MPO Agent.
This implements a single-process distributional MPO agent. This is an
actor-critic algorithm that generates data via a behavior policy, inserts
N-step transitions into a replay buffer, and periodically updates the policy
(and as a result the behavior) by sampling uniformly from this buffer.
This agent distinguishes itself from the MPO agent by using a distributional
critic (state-action value approximator).
"""
def __init__(self,
environment_spec: specs.EnvironmentSpec,
policy_network: snt.Module,
critic_network: snt.Module,
observation_network: types.TensorTransformation = tf.identity,
discount: float = 0.99,
batch_size: int = 256,
prefetch_size: int = 4,
target_policy_update_period: int = 100,
target_critic_update_period: int = 100,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: float = 32.0,
policy_loss_module: Optional[snt.Module] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
n_step: int = 5,
num_samples: int = 20,
clipping: bool = True,
logger: Optional[loggers.Logger] = None,
counter: Optional[counting.Counter] = None,
checkpoint: bool = True,
replay_table_name: str = adders.DEFAULT_PRIORITY_TABLE):
"""Initialize the agent.
Args:
environment_spec: description of the actions, observations, etc.
policy_network: the online (optimized) policy.
critic_network: the online critic.
observation_network: optional network to transform the observations before
they are fed into any network.
discount: discount to use for TD updates.
batch_size: batch size for updates.
prefetch_size: size to prefetch from replay.
target_policy_update_period: number of updates to perform before updating
the target policy network.
target_critic_update_period: number of updates to perform before updating
the target critic network.
min_replay_size: minimum replay size before updating.
max_replay_size: maximum replay size.
samples_per_insert: number of samples to take from replay for every insert
that is made.
policy_loss_module: configured MPO loss function for the policy
optimization; defaults to sensible values on the control suite.
See `acme/tf/losses/mpo.py` for more details.
policy_optimizer: optimizer to be used on the policy.
critic_optimizer: optimizer to be used on the critic.
n_step: number of steps to squash into a single transition.
num_samples: number of actions to sample when doing a Monte Carlo
integration with respect to the policy.
clipping: whether to clip gradients by global norm.
logger: logging object used to write to logs.
counter: counter object used to keep track of steps.
checkpoint: boolean indicating whether to checkpoint the learner.
replay_table_name: string indicating what name to give the replay table.
"""
# Create a replay server to add data to.
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=max_replay_size,
rate_limiter=reverb.rate_limiters.MinSize(min_size_to_sample=1),
signature=adders.NStepTransitionAdder.signature(environment_spec))
self._server = reverb.Server([replay_table], port=None)
# The adder is used to insert observations into replay.
address = f'localhost:{self._server.port}'
adder = adders.NStepTransitionAdder(
client=reverb.Client(address),
n_step=n_step,
discount=discount)
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
table=replay_table_name,
server_address=address,
batch_size=batch_size,
prefetch_size=prefetch_size)
# Make sure observation network is a Sonnet Module.
observation_network = tf2_utils.to_sonnet_module(observation_network)
# Create target networks before creating online/target network variables.
target_policy_network = copy.deepcopy(policy_network)
target_critic_network = copy.deepcopy(critic_network)
target_observation_network = copy.deepcopy(observation_network)
# Get observation and action specs.
act_spec = environment_spec.actions
obs_spec = environment_spec.observations
emb_spec = tf2_utils.create_variables(observation_network, [obs_spec])
# Create the behavior policy.
behavior_network = snt.Sequential([
observation_network,
policy_network,
networks.StochasticSamplingHead(),
])
# Create variables.
tf2_utils.create_variables(policy_network, [emb_spec])
tf2_utils.create_variables(critic_network, [emb_spec, act_spec])
tf2_utils.create_variables(target_policy_network, [emb_spec])
tf2_utils.create_variables(target_critic_network, [emb_spec, act_spec])
tf2_utils.create_variables(target_observation_network, [obs_spec])
# Create the actor which defines how we take actions.
actor = actors.FeedForwardActor(
policy_network=behavior_network, adder=adder)
# Create optimizers.
policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
# The learner updates the parameters (and initializes them).
learner = learning.DistributionalMPOLearner(
policy_network=policy_network,
critic_network=critic_network,
observation_network=observation_network,
target_policy_network=target_policy_network,
target_critic_network=target_critic_network,
target_observation_network=target_observation_network,
policy_loss_module=policy_loss_module,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
clipping=clipping,
discount=discount,
num_samples=num_samples,
target_policy_update_period=target_policy_update_period,
target_critic_update_period=target_critic_update_period,
dataset=dataset,
logger=logger,
counter=counter,
checkpoint=checkpoint)
super().__init__(
actor=actor,
learner=learner,
min_observations=max(batch_size, min_replay_size),
observations_per_step=float(batch_size) / samples_per_insert)
|
acme-master
|
acme/agents/tf/dmpo/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributional MPO learner implementation."""
import time
from typing import List, Optional
import acme
from acme import types
from acme.tf import losses
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow as tf
class DistributionalMPOLearner(acme.Learner):
"""Distributional MPO learner."""
def __init__(
self,
policy_network: snt.Module,
critic_network: snt.Module,
target_policy_network: snt.Module,
target_critic_network: snt.Module,
discount: float,
num_samples: int,
target_policy_update_period: int,
target_critic_update_period: int,
dataset: tf.data.Dataset,
observation_network: types.TensorTransformation = tf.identity,
target_observation_network: types.TensorTransformation = tf.identity,
policy_loss_module: Optional[snt.Module] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
dual_optimizer: Optional[snt.Optimizer] = None,
clipping: bool = True,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
# Store online and target networks.
self._policy_network = policy_network
self._critic_network = critic_network
self._target_policy_network = target_policy_network
self._target_critic_network = target_critic_network
# Make sure observation networks are snt.Module's so they have variables.
self._observation_network = tf2_utils.to_sonnet_module(observation_network)
self._target_observation_network = tf2_utils.to_sonnet_module(
target_observation_network)
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner')
# Other learner parameters.
self._discount = discount
self._num_samples = num_samples
self._clipping = clipping
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_policy_update_period = target_policy_update_period
self._target_critic_update_period = target_critic_update_period
# Batch dataset and create iterator.
# TODO(b/155086959): Fix type stubs and remove.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
self._policy_loss_module = policy_loss_module or losses.MPO(
epsilon=1e-1,
epsilon_penalty=1e-3,
epsilon_mean=2.5e-3,
epsilon_stddev=1e-6,
init_log_temperature=10.,
init_log_alpha_mean=10.,
init_log_alpha_stddev=1000.)
# Create the optimizers.
self._critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
self._policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
self._dual_optimizer = dual_optimizer or snt.optimizers.Adam(1e-2)
# Expose the variables.
policy_network_to_expose = snt.Sequential(
[self._target_observation_network, self._target_policy_network])
self._variables = {
'critic': self._target_critic_network.variables,
'policy': policy_network_to_expose.variables,
}
# Create a checkpointer and snapshotter object.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
subdirectory='dmpo_learner',
objects_to_save={
'counter': self._counter,
'policy': self._policy_network,
'critic': self._critic_network,
'observation': self._observation_network,
'target_policy': self._target_policy_network,
'target_critic': self._target_critic_network,
'target_observation': self._target_observation_network,
'policy_optimizer': self._policy_optimizer,
'critic_optimizer': self._critic_optimizer,
'dual_optimizer': self._dual_optimizer,
'policy_loss_module': self._policy_loss_module,
'num_steps': self._num_steps,
})
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={
'policy':
snt.Sequential([
self._target_observation_network,
self._target_policy_network
]),
})
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
@tf.function
def _step(self) -> types.NestedTensor:
# Update target network.
online_policy_variables = self._policy_network.variables
target_policy_variables = self._target_policy_network.variables
online_critic_variables = (
*self._observation_network.variables,
*self._critic_network.variables,
)
target_critic_variables = (
*self._target_observation_network.variables,
*self._target_critic_network.variables,
)
# Make online policy -> target policy network update ops.
if tf.math.mod(self._num_steps, self._target_policy_update_period) == 0:
for src, dest in zip(online_policy_variables, target_policy_variables):
dest.assign(src)
# Make online critic -> target critic network update ops.
if tf.math.mod(self._num_steps, self._target_critic_update_period) == 0:
for src, dest in zip(online_critic_variables, target_critic_variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
inputs = next(self._iterator)
transitions: types.Transition = inputs.data
# Get batch size and scalar dtype.
batch_size = transitions.reward.shape[0]
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(self._discount, dtype=transitions.discount.dtype)
with tf.GradientTape(persistent=True) as tape:
# Maybe transform the observation before feeding into policy and critic.
# Transforming the observations this way at the start of the learning
# step effectively means that the policy and critic share observation
# network weights.
o_tm1 = self._observation_network(transitions.observation)
# This stop_gradient prevents gradients to propagate into the target
# observation network. In addition, since the online policy network is
# evaluated at o_t, this also means the policy loss does not influence
# the observation network training.
o_t = tf.stop_gradient(
self._target_observation_network(transitions.next_observation))
# Get online and target action distributions from policy networks.
online_action_distribution = self._policy_network(o_t)
target_action_distribution = self._target_policy_network(o_t)
# Sample actions to evaluate policy; of size [N, B, ...].
sampled_actions = target_action_distribution.sample(self._num_samples)
# Tile embedded observations to feed into the target critic network.
# Note: this is more efficient than tiling before the embedding layer.
tiled_o_t = tf2_utils.tile_tensor(o_t, self._num_samples) # [N, B, ...]
# Compute target-estimated distributional value of sampled actions at o_t.
sampled_q_t_distributions = self._target_critic_network(
# Merge batch dimensions; to shape [N*B, ...].
snt.merge_leading_dims(tiled_o_t, num_dims=2),
snt.merge_leading_dims(sampled_actions, num_dims=2))
# Compute average logits by first reshaping them and normalizing them
# across atoms.
new_shape = [self._num_samples, batch_size, -1] # [N, B, A]
sampled_logits = tf.reshape(sampled_q_t_distributions.logits, new_shape)
sampled_logprobs = tf.math.log_softmax(sampled_logits, axis=-1)
averaged_logits = tf.reduce_logsumexp(sampled_logprobs, axis=0)
# Construct the expected distributional value for bootstrapping.
q_t_distribution = networks.DiscreteValuedDistribution(
values=sampled_q_t_distributions.values, logits=averaged_logits)
# Compute online critic value distribution of a_tm1 in state o_tm1.
q_tm1_distribution = self._critic_network(o_tm1, transitions.action)
# Compute critic distributional loss.
critic_loss = losses.categorical(q_tm1_distribution, transitions.reward,
discount * transitions.discount,
q_t_distribution)
critic_loss = tf.reduce_mean(critic_loss)
# Compute Q-values of sampled actions and reshape to [N, B].
sampled_q_values = sampled_q_t_distributions.mean()
sampled_q_values = tf.reshape(sampled_q_values, (self._num_samples, -1))
# Compute MPO policy loss.
policy_loss, policy_stats = self._policy_loss_module(
online_action_distribution=online_action_distribution,
target_action_distribution=target_action_distribution,
actions=sampled_actions,
q_values=sampled_q_values)
# For clarity, explicitly define which variables are trained by which loss.
critic_trainable_variables = (
# In this agent, the critic loss trains the observation network.
self._observation_network.trainable_variables +
self._critic_network.trainable_variables)
policy_trainable_variables = self._policy_network.trainable_variables
# The following are the MPO dual variables, stored in the loss module.
dual_trainable_variables = self._policy_loss_module.trainable_variables
# Compute gradients.
critic_gradients = tape.gradient(critic_loss, critic_trainable_variables)
policy_gradients, dual_gradients = tape.gradient(
policy_loss, (policy_trainable_variables, dual_trainable_variables))
# Delete the tape manually because of the persistent=True flag.
del tape
# Maybe clip gradients.
if self._clipping:
policy_gradients = tuple(tf.clip_by_global_norm(policy_gradients, 40.)[0])
critic_gradients = tuple(tf.clip_by_global_norm(critic_gradients, 40.)[0])
# Apply gradients.
self._critic_optimizer.apply(critic_gradients, critic_trainable_variables)
self._policy_optimizer.apply(policy_gradients, policy_trainable_variables)
self._dual_optimizer.apply(dual_gradients, dual_trainable_variables)
# Losses to track.
fetches = {
'critic_loss': critic_loss,
'policy_loss': policy_loss,
}
fetches.update(policy_stats) # Log MPO stats.
return fetches
def step(self):
# Run the learning step.
fetches = self._step()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
fetches.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(fetches)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables[name]) for name in names]
|
acme-master
|
acme/agents/tf/dmpo/learning.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.