python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a behavior cloning (BC) agent."""
from acme.agents.tf.bc.learning import BCLearner
|
acme-master
|
acme/agents/tf/bc/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BC Learner implementation."""
from typing import Dict, List, Optional
import acme
from acme import types
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow as tf
class BCLearner(acme.Learner, tf2_savers.TFSaveable):
"""BC learner.
This is the learning component of a BC agent. IE it takes a dataset as input
and implements update functionality to learn from this dataset.
"""
def __init__(self,
network: snt.Module,
learning_rate: float,
dataset: tf.data.Dataset,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True):
"""Initializes the learner.
Args:
network: the BC network (the one being optimized)
learning_rate: learning rate for the cross-entropy update.
dataset: dataset to learn from.
counter: Counter object for (potentially distributed) counting.
logger: Logger object for writing logs to.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
self._counter = counter or counting.Counter()
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
# Get an iterator over the dataset.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# TODO(b/155086959): Fix type stubs and remove.
self._network = network
self._optimizer = snt.optimizers.Adam(learning_rate)
self._variables: List[List[tf.Tensor]] = [network.trainable_variables]
self._num_steps = tf.Variable(0, dtype=tf.int32)
# Create a snapshotter object.
if checkpoint:
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'network': network}, time_delta_minutes=60.)
else:
self._snapshotter = None
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
"""Do a step of SGD and update the priorities."""
# Pull out the data needed for updates/priorities.
inputs = next(self._iterator)
transitions: types.Transition = inputs.data
with tf.GradientTape() as tape:
# Evaluate our networks.
logits = self._network(transitions.observation)
cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce(transitions.action, logits)
gradients = tape.gradient(loss, self._network.trainable_variables)
self._optimizer.apply(gradients, self._network.trainable_variables)
self._num_steps.assign_add(1)
# Compute the global norm of the gradients for logging.
global_gradient_norm = tf.linalg.global_norm(gradients)
fetches = {'loss': loss, 'gradient_norm': global_gradient_norm}
return fetches
def step(self):
# Do a batch of SGD.
result = self._step()
# Update our counts and record it.
counts = self._counter.increment(steps=1)
result.update(counts)
# Snapshot and attempt to write logs.
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(result)
def get_variables(self, names: List[str]) -> List[np.ndarray]:
return tf2_utils.to_numpy(self._variables)
@property
def state(self):
"""Returns the stateful parts of the learner for checkpointing."""
return {
'network': self._network,
'optimizer': self._optimizer,
'num_steps': self._num_steps
}
|
acme-master
|
acme/agents/tf/bc/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for the distributed agent."""
from typing import Sequence
import acme
from acme import specs
from acme.agents.tf import mpo
from acme.testing import fakes
from acme.tf import networks
from acme.tf import utils as tf2_utils
import launchpad as lp
import numpy as np
import sonnet as snt
from absl.testing import absltest
def make_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (50, 50),
critic_layer_sizes: Sequence[int] = (50, 50),
):
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
observation_network = tf2_utils.batch_concat
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=True,
init_scale=0.3,
fixed_scale=True,
use_tfd_independent=False)
])
evaluator_network = snt.Sequential([
observation_network,
policy_network,
networks.StochasticMeanHead(),
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
multiplexer = networks.CriticMultiplexer(
action_network=networks.ClipToSpec(action_spec))
critic_network = snt.Sequential([
multiplexer,
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.NearZeroInitializedLinear(1),
])
return {
'policy': policy_network,
'critic': critic_network,
'observation': observation_network,
'evaluator': evaluator_network,
}
class DistributedAgentTest(absltest.TestCase):
"""Simple integration/smoke test for the distributed agent."""
def test_agent(self):
agent = mpo.DistributedMPO(
environment_factory=lambda x: fakes.ContinuousEnvironment(bounded=True),
network_factory=make_networks,
num_actors=2,
batch_size=32,
min_replay_size=32,
max_replay_size=1000,
)
program = agent.build()
(learner_node,) = program.groups['learner']
learner_node.disable_run()
lp.launch(program, launch_type='test_mt')
learner: acme.Learner = learner_node.create_handle().dereference()
for _ in range(5):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/mpo/agent_distributed_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the MPO distributed agent class."""
from typing import Callable, Dict, Optional
import acme
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents.tf import actors
from acme.agents.tf.mpo import learning
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
from acme.utils import counting
from acme.utils import loggers
from acme.utils import lp_utils
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
import tensorflow as tf
class DistributedMPO:
"""Program definition for MPO."""
def __init__(
self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[specs.BoundedArray], Dict[str, snt.Module]],
num_actors: int = 1,
num_caches: int = 0,
environment_spec: Optional[specs.EnvironmentSpec] = None,
batch_size: int = 256,
prefetch_size: int = 4,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: Optional[float] = 32.0,
n_step: int = 5,
num_samples: int = 20,
additional_discount: float = 0.99,
target_policy_update_period: int = 100,
target_critic_update_period: int = 100,
variable_update_period: int = 1000,
policy_loss_factory: Optional[Callable[[], snt.Module]] = None,
max_actor_steps: Optional[int] = None,
log_every: float = 10.0,
):
if environment_spec is None:
environment_spec = specs.make_environment_spec(environment_factory(False))
self._environment_factory = environment_factory
self._network_factory = network_factory
self._policy_loss_factory = policy_loss_factory
self._environment_spec = environment_spec
self._num_actors = num_actors
self._num_caches = num_caches
self._batch_size = batch_size
self._prefetch_size = prefetch_size
self._min_replay_size = min_replay_size
self._max_replay_size = max_replay_size
self._samples_per_insert = samples_per_insert
self._n_step = n_step
self._additional_discount = additional_discount
self._num_samples = num_samples
self._target_policy_update_period = target_policy_update_period
self._target_critic_update_period = target_critic_update_period
self._variable_update_period = variable_update_period
self._max_actor_steps = max_actor_steps
self._log_every = log_every
def replay(self):
"""The replay storage."""
if self._samples_per_insert is not None:
# Create enough of an error buffer to give a 10% tolerance in rate.
samples_per_insert_tolerance = 0.1 * self._samples_per_insert
error_buffer = self._min_replay_size * samples_per_insert_tolerance
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._min_replay_size,
samples_per_insert=self._samples_per_insert,
error_buffer=error_buffer)
else:
limiter = reverb.rate_limiters.MinSize(
min_size_to_sample=self._min_replay_size)
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._max_replay_size,
rate_limiter=limiter,
signature=adders.NStepTransitionAdder.signature(
self._environment_spec))
return [replay_table]
def counter(self):
return tf2_savers.CheckpointingRunner(counting.Counter(),
time_delta_minutes=1,
subdirectory='counter')
def coordinator(self, counter: counting.Counter, max_actor_steps: int):
return lp_utils.StepsLimiter(counter, max_actor_steps)
def learner(
self,
replay: reverb.Client,
counter: counting.Counter,
):
"""The Learning part of the agent."""
act_spec = self._environment_spec.actions
obs_spec = self._environment_spec.observations
# Create online and target networks.
online_networks = self._network_factory(act_spec)
target_networks = self._network_factory(act_spec)
# Make sure observation networks are Sonnet Modules.
observation_network = online_networks.get('observation', tf.identity)
observation_network = tf2_utils.to_sonnet_module(observation_network)
online_networks['observation'] = observation_network
target_observation_network = target_networks.get('observation', tf.identity)
target_observation_network = tf2_utils.to_sonnet_module(
target_observation_network)
target_networks['observation'] = target_observation_network
# Get embedding spec and create observation network variables.
emb_spec = tf2_utils.create_variables(observation_network, [obs_spec])
tf2_utils.create_variables(online_networks['policy'], [emb_spec])
tf2_utils.create_variables(online_networks['critic'], [emb_spec, act_spec])
tf2_utils.create_variables(target_networks['observation'], [obs_spec])
tf2_utils.create_variables(target_networks['policy'], [emb_spec])
tf2_utils.create_variables(target_networks['critic'], [emb_spec, act_spec])
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
server_address=replay.server_address)
dataset = dataset.batch(self._batch_size, drop_remainder=True)
dataset = dataset.prefetch(self._prefetch_size)
# Create a counter and logger for bookkeeping steps and performance.
counter = counting.Counter(counter, 'learner')
logger = loggers.make_default_logger(
'learner', time_delta=self._log_every, steps_key='learner_steps')
# Create policy loss module if a factory is passed.
if self._policy_loss_factory:
policy_loss_module = self._policy_loss_factory()
else:
policy_loss_module = None
# Return the learning agent.
return learning.MPOLearner(
policy_network=online_networks['policy'],
critic_network=online_networks['critic'],
observation_network=observation_network,
target_policy_network=target_networks['policy'],
target_critic_network=target_networks['critic'],
target_observation_network=target_observation_network,
discount=self._additional_discount,
num_samples=self._num_samples,
target_policy_update_period=self._target_policy_update_period,
target_critic_update_period=self._target_critic_update_period,
policy_loss_module=policy_loss_module,
dataset=dataset,
counter=counter,
logger=logger)
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
) -> acme.EnvironmentLoop:
"""The actor process."""
action_spec = self._environment_spec.actions
observation_spec = self._environment_spec.observations
# Create environment and target networks to act with.
environment = self._environment_factory(False)
agent_networks = self._network_factory(action_spec)
# Create a stochastic behavior policy.
behavior_modules = [
agent_networks.get('observation', tf.identity),
agent_networks.get('policy'),
networks.StochasticSamplingHead()
]
behavior_network = snt.Sequential(behavior_modules)
# Ensure network variables are created.
tf2_utils.create_variables(behavior_network, [observation_spec])
policy_variables = {'policy': behavior_network.variables}
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = tf2_variable_utils.VariableClient(
variable_source,
policy_variables,
update_period=self._variable_update_period)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Component to add things into replay.
adder = adders.NStepTransitionAdder(
client=replay,
n_step=self._n_step,
discount=self._additional_discount)
# Create the agent.
actor = actors.FeedForwardActor(
policy_network=behavior_network,
adder=adder,
variable_client=variable_client)
# Create logger and counter; actors will not spam bigtable.
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor',
save_data=False,
time_delta=self._log_every,
steps_key='actor_steps')
# Create the run loop and return it.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
):
"""The evaluation process."""
action_spec = self._environment_spec.actions
observation_spec = self._environment_spec.observations
# Create environment and target networks to act with.
environment = self._environment_factory(True)
agent_networks = self._network_factory(action_spec)
# Create a stochastic behavior policy.
evaluator_modules = [
agent_networks.get('observation', tf.identity),
agent_networks.get('policy'),
networks.StochasticMeanHead(),
]
if isinstance(action_spec, specs.BoundedArray):
evaluator_modules += [networks.ClipToSpec(action_spec)]
evaluator_network = snt.Sequential(evaluator_modules)
# Ensure network variables are created.
tf2_utils.create_variables(evaluator_network, [observation_spec])
policy_variables = {'policy': evaluator_network.variables}
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = tf2_variable_utils.VariableClient(
variable_source,
policy_variables,
update_period=self._variable_update_period)
# Make sure not to evaluate a random actor by assigning variables before
# running the environment loop.
variable_client.update_and_wait()
# Create the agent.
evaluator = actors.FeedForwardActor(
policy_network=evaluator_network, variable_client=variable_client)
# Create logger and counter.
counter = counting.Counter(counter, 'evaluator')
logger = loggers.make_default_logger(
'evaluator', time_delta=self._log_every, steps_key='evaluator_steps')
# Create the run loop and return it.
return acme.EnvironmentLoop(environment, evaluator, counter, logger)
def build(self, name='mpo'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
if self._max_actor_steps:
_ = program.add_node(
lp.CourierNode(self.coordinator, counter, self._max_actor_steps))
with program.group('learner'):
learner = program.add_node(
lp.CourierNode(self.learner, replay, counter))
with program.group('evaluator'):
program.add_node(
lp.CourierNode(self.evaluator, learner, counter))
if not self._num_caches:
# Use our learner as a single variable source.
sources = [learner]
else:
with program.group('cacher'):
# Create a set of learner caches.
sources = []
for _ in range(self._num_caches):
cacher = program.add_node(
lp.CacherNode(
learner, refresh_interval_ms=2000, stale_after_ms=4000))
sources.append(cacher)
with program.group('actor'):
# Add actors which pull round-robin from our variable sources.
for actor_id in range(self._num_actors):
source = sources[actor_id % len(sources)]
program.add_node(lp.CourierNode(self.actor, replay, source, counter))
return program
|
acme-master
|
acme/agents/tf/mpo/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a MPO agent."""
from acme.agents.tf.mpo.agent import MPO
from acme.agents.tf.mpo.agent_distributed import DistributedMPO
from acme.agents.tf.mpo.learning import MPOLearner
|
acme-master
|
acme/agents/tf/mpo/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the MPO agent."""
import acme
from acme import specs
from acme.agents.tf import mpo
from acme.testing import fakes
from acme.tf import networks
import numpy as np
import sonnet as snt
from absl.testing import absltest
def make_networks(
action_spec,
policy_layer_sizes=(10, 10),
critic_layer_sizes=(10, 10),
):
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
critic_layer_sizes = list(critic_layer_sizes) + [1]
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes),
networks.MultivariateNormalDiagHead(num_dimensions)
])
critic_network = networks.CriticMultiplexer(
critic_network=networks.LayerNormMLP(critic_layer_sizes))
return {
'policy': policy_network,
'critic': critic_network,
}
class MPOTest(absltest.TestCase):
def test_mpo(self):
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(episode_length=10, bounded=False)
spec = specs.make_environment_spec(environment)
# Create networks.
agent_networks = make_networks(spec.actions)
# Construct the agent.
agent = mpo.MPO(
spec,
policy_network=agent_networks['policy'],
critic_network=agent_networks['critic'],
batch_size=10,
samples_per_insert=2,
min_replay_size=10)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=2)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/mpo/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MPO agent implementation."""
import copy
from typing import Optional
from acme import datasets
from acme import specs
from acme import types
from acme.adders import reverb as adders
from acme.agents import agent
from acme.agents.tf import actors
from acme.agents.tf.mpo import learning
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import reverb
import sonnet as snt
import tensorflow as tf
class MPO(agent.Agent):
"""MPO Agent.
This implements a single-process MPO agent. This is an actor-critic algorithm
that generates data via a behavior policy, inserts N-step transitions into
a replay buffer, and periodically updates the policy (and as a result the
behavior) by sampling uniformly from this buffer. This agent distinguishes
itself from the DPG agent by using MPO to learn a stochastic policy.
"""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
policy_network: snt.Module,
critic_network: snt.Module,
observation_network: types.TensorTransformation = tf.identity,
discount: float = 0.99,
batch_size: int = 256,
prefetch_size: int = 4,
target_policy_update_period: int = 100,
target_critic_update_period: int = 100,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: float = 32.0,
policy_loss_module: Optional[snt.Module] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
n_step: int = 5,
num_samples: int = 20,
clipping: bool = True,
logger: Optional[loggers.Logger] = None,
counter: Optional[counting.Counter] = None,
checkpoint: bool = True,
save_directory: str = '~/acme',
replay_table_name: str = adders.DEFAULT_PRIORITY_TABLE,
):
"""Initialize the agent.
Args:
environment_spec: description of the actions, observations, etc.
policy_network: the online (optimized) policy.
critic_network: the online critic.
observation_network: optional network to transform the observations before
they are fed into any network.
discount: discount to use for TD updates.
batch_size: batch size for updates.
prefetch_size: size to prefetch from replay.
target_policy_update_period: number of updates to perform before updating
the target policy network.
target_critic_update_period: number of updates to perform before updating
the target critic network.
min_replay_size: minimum replay size before updating.
max_replay_size: maximum replay size.
samples_per_insert: number of samples to take from replay for every insert
that is made.
policy_loss_module: configured MPO loss function for the policy
optimization; defaults to sensible values on the control suite. See
`acme/tf/losses/mpo.py` for more details.
policy_optimizer: optimizer to be used on the policy.
critic_optimizer: optimizer to be used on the critic.
n_step: number of steps to squash into a single transition.
num_samples: number of actions to sample when doing a Monte Carlo
integration with respect to the policy.
clipping: whether to clip gradients by global norm.
logger: logging object used to write to logs.
counter: counter object used to keep track of steps.
checkpoint: boolean indicating whether to checkpoint the learner.
save_directory: string indicating where the learner should save
checkpoints and snapshots.
replay_table_name: string indicating what name to give the replay table.
"""
# Create a replay server to add data to.
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=max_replay_size,
rate_limiter=reverb.rate_limiters.MinSize(min_size_to_sample=1),
signature=adders.NStepTransitionAdder.signature(environment_spec))
self._server = reverb.Server([replay_table], port=None)
# The adder is used to insert observations into replay.
address = f'localhost:{self._server.port}'
adder = adders.NStepTransitionAdder(
client=reverb.Client(address), n_step=n_step, discount=discount)
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
table=replay_table_name,
server_address=address,
batch_size=batch_size,
prefetch_size=prefetch_size)
# Make sure observation network is a Sonnet Module.
observation_network = tf2_utils.to_sonnet_module(observation_network)
# Create target networks before creating online/target network variables.
target_policy_network = copy.deepcopy(policy_network)
target_critic_network = copy.deepcopy(critic_network)
target_observation_network = copy.deepcopy(observation_network)
# Get observation and action specs.
act_spec = environment_spec.actions
obs_spec = environment_spec.observations
emb_spec = tf2_utils.create_variables(observation_network, [obs_spec])
# Create the behavior policy.
behavior_network = snt.Sequential([
observation_network,
policy_network,
networks.StochasticSamplingHead(),
])
# Create variables.
tf2_utils.create_variables(policy_network, [emb_spec])
tf2_utils.create_variables(critic_network, [emb_spec, act_spec])
tf2_utils.create_variables(target_policy_network, [emb_spec])
tf2_utils.create_variables(target_critic_network, [emb_spec, act_spec])
tf2_utils.create_variables(target_observation_network, [obs_spec])
# Create the actor which defines how we take actions.
actor = actors.FeedForwardActor(
policy_network=behavior_network, adder=adder)
# Create optimizers.
policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
# The learner updates the parameters (and initializes them).
learner = learning.MPOLearner(
policy_network=policy_network,
critic_network=critic_network,
observation_network=observation_network,
target_policy_network=target_policy_network,
target_critic_network=target_critic_network,
target_observation_network=target_observation_network,
policy_loss_module=policy_loss_module,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
clipping=clipping,
discount=discount,
num_samples=num_samples,
target_policy_update_period=target_policy_update_period,
target_critic_update_period=target_critic_update_period,
dataset=dataset,
logger=logger,
counter=counter,
checkpoint=checkpoint,
save_directory=save_directory)
super().__init__(
actor=actor,
learner=learner,
min_observations=max(batch_size, min_replay_size),
observations_per_step=float(batch_size) / samples_per_insert)
|
acme-master
|
acme/agents/tf/mpo/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MPO learner implementation."""
import time
from typing import List, Optional
import acme
from acme import types
from acme.tf import losses
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow as tf
import trfl
class MPOLearner(acme.Learner):
"""MPO learner."""
def __init__(
self,
policy_network: snt.Module,
critic_network: snt.Module,
target_policy_network: snt.Module,
target_critic_network: snt.Module,
discount: float,
num_samples: int,
target_policy_update_period: int,
target_critic_update_period: int,
dataset: tf.data.Dataset,
observation_network: types.TensorTransformation = tf.identity,
target_observation_network: types.TensorTransformation = tf.identity,
policy_loss_module: Optional[snt.Module] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
dual_optimizer: Optional[snt.Optimizer] = None,
clipping: bool = True,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
save_directory: str = '~/acme',
):
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner')
self._discount = discount
self._num_samples = num_samples
self._clipping = clipping
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_policy_update_period = target_policy_update_period
self._target_critic_update_period = target_critic_update_period
# Batch dataset and create iterator.
# TODO(b/155086959): Fix type stubs and remove.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# Store online and target networks.
self._policy_network = policy_network
self._critic_network = critic_network
self._target_policy_network = target_policy_network
self._target_critic_network = target_critic_network
# Make sure observation networks are snt.Module's so they have variables.
self._observation_network = tf2_utils.to_sonnet_module(observation_network)
self._target_observation_network = tf2_utils.to_sonnet_module(
target_observation_network)
self._policy_loss_module = policy_loss_module or losses.MPO(
epsilon=1e-1,
epsilon_penalty=1e-3,
epsilon_mean=2.5e-3,
epsilon_stddev=1e-6,
init_log_temperature=10.,
init_log_alpha_mean=10.,
init_log_alpha_stddev=1000.)
# Create the optimizers.
self._critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
self._policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
self._dual_optimizer = dual_optimizer or snt.optimizers.Adam(1e-2)
# Expose the variables.
policy_network_to_expose = snt.Sequential(
[self._target_observation_network, self._target_policy_network])
self._variables = {
'critic': self._target_critic_network.variables,
'policy': policy_network_to_expose.variables,
}
# Create a checkpointer and snapshotter object.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
directory=save_directory,
subdirectory='mpo_learner',
objects_to_save={
'counter': self._counter,
'policy': self._policy_network,
'critic': self._critic_network,
'observation_network': self._observation_network,
'target_policy': self._target_policy_network,
'target_critic': self._target_critic_network,
'target_observation_network': self._target_observation_network,
'policy_optimizer': self._policy_optimizer,
'critic_optimizer': self._critic_optimizer,
'dual_optimizer': self._dual_optimizer,
'policy_loss_module': self._policy_loss_module,
'num_steps': self._num_steps,
})
self._snapshotter = tf2_savers.Snapshotter(
directory=save_directory,
objects_to_save={
'policy':
snt.Sequential([
self._target_observation_network,
self._target_policy_network
]),
})
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
@tf.function
def _step(self) -> types.Nest:
# Update target network.
online_policy_variables = self._policy_network.variables
target_policy_variables = self._target_policy_network.variables
online_critic_variables = (
*self._observation_network.variables,
*self._critic_network.variables,
)
target_critic_variables = (
*self._target_observation_network.variables,
*self._target_critic_network.variables,
)
# Make online policy -> target policy network update ops.
if tf.math.mod(self._num_steps, self._target_policy_update_period) == 0:
for src, dest in zip(online_policy_variables, target_policy_variables):
dest.assign(src)
# Make online critic -> target critic network update ops.
if tf.math.mod(self._num_steps, self._target_critic_update_period) == 0:
for src, dest in zip(online_critic_variables, target_critic_variables):
dest.assign(src)
# Increment number of learner steps for periodic update bookkeeping.
self._num_steps.assign_add(1)
# Get next batch of data.
inputs = next(self._iterator)
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
transitions: types.Transition = inputs.data
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(self._discount, dtype=transitions.discount.dtype)
with tf.GradientTape(persistent=True) as tape:
# Maybe transform the observation before feeding into policy and critic.
# Transforming the observations this way at the start of the learning
# step effectively means that the policy and critic share observation
# network weights.
o_tm1 = self._observation_network(transitions.observation)
# This stop_gradient prevents gradients to propagate into the target
# observation network. In addition, since the online policy network is
# evaluated at o_t, this also means the policy loss does not influence
# the observation network training.
o_t = tf.stop_gradient(
self._target_observation_network(transitions.next_observation))
# Get action distributions from policy networks.
online_action_distribution = self._policy_network(o_t)
target_action_distribution = self._target_policy_network(o_t)
# Get sampled actions to evaluate policy; of size [N, B, ...].
sampled_actions = target_action_distribution.sample(self._num_samples)
tiled_o_t = tf2_utils.tile_tensor(o_t, self._num_samples) # [N, B, ...]
# Compute the target critic's Q-value of the sampled actions in state o_t.
sampled_q_t = self._target_critic_network(
# Merge batch dimensions; to shape [N*B, ...].
snt.merge_leading_dims(tiled_o_t, num_dims=2),
snt.merge_leading_dims(sampled_actions, num_dims=2))
# Reshape Q-value samples back to original batch dimensions and average
# them to compute the TD-learning bootstrap target.
sampled_q_t = tf.reshape(sampled_q_t, (self._num_samples, -1)) # [N, B]
q_t = tf.reduce_mean(sampled_q_t, axis=0) # [B]
# Compute online critic value of a_tm1 in state o_tm1.
q_tm1 = self._critic_network(o_tm1, transitions.action) # [B, 1]
q_tm1 = tf.squeeze(q_tm1, axis=-1) # [B]; necessary for trfl.td_learning.
# Critic loss.
critic_loss = trfl.td_learning(q_tm1, transitions.reward,
discount * transitions.discount, q_t).loss
critic_loss = tf.reduce_mean(critic_loss)
# Actor learning.
policy_loss, policy_stats = self._policy_loss_module(
online_action_distribution=online_action_distribution,
target_action_distribution=target_action_distribution,
actions=sampled_actions,
q_values=sampled_q_t)
# For clarity, explicitly define which variables are trained by which loss.
critic_trainable_variables = (
# In this agent, the critic loss trains the observation network.
self._observation_network.trainable_variables +
self._critic_network.trainable_variables)
policy_trainable_variables = self._policy_network.trainable_variables
# The following are the MPO dual variables, stored in the loss module.
dual_trainable_variables = self._policy_loss_module.trainable_variables
# Compute gradients.
critic_gradients = tape.gradient(critic_loss, critic_trainable_variables)
policy_gradients, dual_gradients = tape.gradient(
policy_loss, (policy_trainable_variables, dual_trainable_variables))
# Delete the tape manually because of the persistent=True flag.
del tape
# Maybe clip gradients.
if self._clipping:
policy_gradients = tuple(tf.clip_by_global_norm(policy_gradients, 40.)[0])
critic_gradients = tuple(tf.clip_by_global_norm(critic_gradients, 40.)[0])
# Apply gradients.
self._critic_optimizer.apply(critic_gradients, critic_trainable_variables)
self._policy_optimizer.apply(policy_gradients, policy_trainable_variables)
self._dual_optimizer.apply(dual_gradients, dual_trainable_variables)
# Losses to track.
fetches = {
'critic_loss': critic_loss,
'policy_loss': policy_loss,
}
fetches.update(policy_stats) # Log MPO stats.
return fetches
def step(self):
# Run the learning step.
fetches = self._step()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
fetches.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(fetches)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables[name]) for name in names]
|
acme-master
|
acme/agents/tf/mpo/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for the distributed agent."""
import acme
from acme import specs
from acme.agents.tf import d4pg
from acme.testing import fakes
from acme.tf import networks
from acme.tf import utils as tf2_utils
import launchpad as lp
import numpy as np
import sonnet as snt
from absl.testing import absltest
def make_networks(action_spec: specs.BoundedArray):
"""Simple networks for testing.."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormMLP([50], activate_final=True),
networks.NearZeroInitializedLinear(num_dimensions),
networks.TanhToSpec(action_spec)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = snt.Sequential([
networks.CriticMultiplexer(
critic_network=networks.LayerNormMLP(
[50], activate_final=True)),
networks.DiscreteValuedHead(-1., 1., 10)
])
return {
'policy': policy_network,
'critic': critic_network,
'observation': tf2_utils.batch_concat,
}
class DistributedAgentTest(absltest.TestCase):
"""Simple integration/smoke test for the distributed agent."""
def test_control_suite(self):
"""Tests that the agent can run on the control suite without crashing."""
agent = d4pg.DistributedD4PG(
environment_factory=lambda x: fakes.ContinuousEnvironment(bounded=True),
network_factory=make_networks,
accelerator='CPU',
num_actors=2,
batch_size=32,
min_replay_size=32,
max_replay_size=1000,
)
program = agent.build()
(learner_node,) = program.groups['learner']
learner_node.disable_run()
lp.launch(program, launch_type='test_mt')
learner: acme.Learner = learner_node.create_handle().dereference()
for _ in range(5):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/d4pg/agent_distributed_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the D4PG agent class."""
import copy
from typing import Callable, Dict, Optional
import acme
from acme import specs
from acme.agents.tf.d4pg import agent
from acme.tf import savers as tf2_savers
from acme.utils import counting
from acme.utils import loggers
from acme.utils import lp_utils
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
import tensorflow as tf
# Valid values of the "accelerator" argument.
_ACCELERATORS = ('CPU', 'GPU', 'TPU')
class DistributedD4PG:
"""Program definition for D4PG."""
def __init__(
self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[specs.BoundedArray], Dict[str, snt.Module]],
accelerator: Optional[str] = None,
num_actors: int = 1,
num_caches: int = 0,
environment_spec: Optional[specs.EnvironmentSpec] = None,
batch_size: int = 256,
prefetch_size: int = 4,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: Optional[float] = 32.0,
n_step: int = 5,
sigma: float = 0.3,
clipping: bool = True,
discount: float = 0.99,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
target_update_period: int = 100,
variable_update_period: int = 1000,
max_actor_steps: Optional[int] = None,
log_every: float = 10.0,
):
if accelerator is not None and accelerator not in _ACCELERATORS:
raise ValueError(f'Accelerator must be one of {_ACCELERATORS}, '
f'not "{accelerator}".')
if not environment_spec:
environment_spec = specs.make_environment_spec(environment_factory(False))
# TODO(mwhoffman): Make network_factory directly return the struct.
# TODO(mwhoffman): Make the factory take the entire spec.
def wrapped_network_factory(action_spec):
networks_dict = network_factory(action_spec)
networks = agent.D4PGNetworks(
policy_network=networks_dict.get('policy'),
critic_network=networks_dict.get('critic'),
observation_network=networks_dict.get('observation', tf.identity))
return networks
self._environment_factory = environment_factory
self._network_factory = wrapped_network_factory
self._environment_spec = environment_spec
self._sigma = sigma
self._num_actors = num_actors
self._num_caches = num_caches
self._max_actor_steps = max_actor_steps
self._log_every = log_every
self._accelerator = accelerator
self._variable_update_period = variable_update_period
self._builder = agent.D4PGBuilder(
# TODO(mwhoffman): pass the config dataclass in directly.
# TODO(mwhoffman): use the limiter rather than the workaround below.
agent.D4PGConfig(
accelerator=accelerator,
discount=discount,
batch_size=batch_size,
prefetch_size=prefetch_size,
target_update_period=target_update_period,
variable_update_period=variable_update_period,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
min_replay_size=min_replay_size,
max_replay_size=max_replay_size,
samples_per_insert=samples_per_insert,
n_step=n_step,
sigma=sigma,
clipping=clipping,
))
def replay(self):
"""The replay storage."""
return self._builder.make_replay_tables(self._environment_spec)
def counter(self):
return tf2_savers.CheckpointingRunner(counting.Counter(),
time_delta_minutes=1,
subdirectory='counter')
def coordinator(self, counter: counting.Counter):
return lp_utils.StepsLimiter(counter, self._max_actor_steps)
def learner(
self,
replay: reverb.Client,
counter: counting.Counter,
):
"""The Learning part of the agent."""
# If we are running on multiple accelerator devices, this replicates
# weights and updates across devices.
replicator = agent.get_replicator(self._accelerator)
with replicator.scope():
# Create the networks to optimize (online) and target networks.
online_networks = self._network_factory(self._environment_spec.actions)
target_networks = copy.deepcopy(online_networks)
# Initialize the networks.
online_networks.init(self._environment_spec)
target_networks.init(self._environment_spec)
dataset = self._builder.make_dataset_iterator(replay)
counter = counting.Counter(counter, 'learner')
logger = loggers.make_default_logger(
'learner', time_delta=self._log_every, steps_key='learner_steps')
return self._builder.make_learner(
networks=(online_networks, target_networks),
dataset=dataset,
counter=counter,
logger=logger,
checkpoint=True,
)
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
) -> acme.EnvironmentLoop:
"""The actor process."""
# Create the behavior policy.
networks = self._network_factory(self._environment_spec.actions)
networks.init(self._environment_spec)
policy_network = networks.make_policy(
environment_spec=self._environment_spec,
sigma=self._sigma,
)
# Create the agent.
actor = self._builder.make_actor(
policy_network=policy_network,
adder=self._builder.make_adder(replay),
variable_source=variable_source,
)
# Create the environment.
environment = self._environment_factory(False)
# Create logger and counter; actors will not spam bigtable.
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor',
save_data=False,
time_delta=self._log_every,
steps_key='actor_steps')
# Create the loop to connect environment and agent.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
logger: Optional[loggers.Logger] = None,
):
"""The evaluation process."""
# Create the behavior policy.
networks = self._network_factory(self._environment_spec.actions)
networks.init(self._environment_spec)
policy_network = networks.make_policy(self._environment_spec)
# Create the agent.
actor = self._builder.make_actor(
policy_network=policy_network,
variable_source=variable_source,
)
# Make the environment.
environment = self._environment_factory(True)
# Create logger and counter.
counter = counting.Counter(counter, 'evaluator')
logger = logger or loggers.make_default_logger(
'evaluator',
time_delta=self._log_every,
steps_key='evaluator_steps',
)
# Create the run loop and return it.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def build(self, name='d4pg'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
if self._max_actor_steps:
with program.group('coordinator'):
_ = program.add_node(lp.CourierNode(self.coordinator, counter))
with program.group('learner'):
learner = program.add_node(lp.CourierNode(self.learner, replay, counter))
with program.group('evaluator'):
program.add_node(lp.CourierNode(self.evaluator, learner, counter))
if not self._num_caches:
# Use our learner as a single variable source.
sources = [learner]
else:
with program.group('cacher'):
# Create a set of learner caches.
sources = []
for _ in range(self._num_caches):
cacher = program.add_node(
lp.CacherNode(
learner, refresh_interval_ms=2000, stale_after_ms=4000))
sources.append(cacher)
with program.group('actor'):
# Add actors which pull round-robin from our variable sources.
for actor_id in range(self._num_actors):
source = sources[actor_id % len(sources)]
program.add_node(lp.CourierNode(self.actor, replay, source, counter))
return program
|
acme-master
|
acme/agents/tf/d4pg/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a D4PG agent."""
from acme.agents.tf.d4pg.agent import D4PG
from acme.agents.tf.d4pg.agent_distributed import DistributedD4PG
from acme.agents.tf.d4pg.learning import D4PGLearner
from acme.agents.tf.d4pg.networks import make_default_networks
|
acme-master
|
acme/agents/tf/d4pg/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared helpers for different experiment flavours."""
from typing import Mapping, Sequence
from acme import specs
from acme import types
from acme.tf import networks
from acme.tf import utils as tf2_utils
import numpy as np
import sonnet as snt
def make_default_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
) -> Mapping[str, types.TensorTransformation]:
"""Creates networks used by the agent."""
# Get total number of action dimensions from action spec.
num_dimensions = np.prod(action_spec.shape, dtype=int)
# Create the shared observation network; here simply a state-less operation.
observation_network = tf2_utils.batch_concat
# Create the policy network.
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.NearZeroInitializedLinear(num_dimensions),
networks.TanhToSpec(action_spec),
])
# Create the critic network.
critic_network = snt.Sequential([
# The multiplexer concatenates the observations/actions.
networks.CriticMultiplexer(),
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.DiscreteValuedHead(vmin, vmax, num_atoms),
])
return {
'policy': policy_network,
'critic': critic_network,
'observation': observation_network,
}
|
acme-master
|
acme/agents/tf/d4pg/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the D4PG agent."""
import sys
from typing import Dict, Sequence
import acme
from acme import specs
from acme import types
from acme.agents.tf import d4pg
from acme.testing import fakes
from acme.tf import networks
import numpy as np
import sonnet as snt
import tensorflow as tf
from absl.testing import absltest
def make_networks(
action_spec: types.NestedSpec,
policy_layer_sizes: Sequence[int] = (10, 10),
critic_layer_sizes: Sequence[int] = (10, 10),
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
) -> Dict[str, snt.Module]:
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_layer_sizes = list(policy_layer_sizes) + [num_dimensions]
policy_network = snt.Sequential(
[networks.LayerNormMLP(policy_layer_sizes), tf.tanh])
critic_network = snt.Sequential([
networks.CriticMultiplexer(
critic_network=networks.LayerNormMLP(
critic_layer_sizes, activate_final=True)),
networks.DiscreteValuedHead(vmin, vmax, num_atoms)
])
return {
'policy': policy_network,
'critic': critic_network,
}
class D4PGTest(absltest.TestCase):
def test_d4pg(self):
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(episode_length=10, bounded=True)
spec = specs.make_environment_spec(environment)
# Create the networks.
agent_networks = make_networks(spec.actions)
# Construct the agent.
agent = d4pg.D4PG(
environment_spec=spec,
accelerator='CPU',
policy_network=agent_networks['policy'],
critic_network=agent_networks['critic'],
batch_size=10,
samples_per_insert=2,
min_replay_size=10,
)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=2)
# Imports check
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/d4pg/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""D4PG agent implementation."""
import copy
import dataclasses
import functools
from typing import Iterator, List, Optional, Tuple, Union, Sequence
from acme import adders
from acme import core
from acme import datasets
from acme import specs
from acme import types
from acme.adders import reverb as reverb_adders
from acme.agents import agent
from acme.agents.tf import actors
from acme.agents.tf.d4pg import learning
from acme.tf import networks as network_utils
from acme.tf import utils
from acme.tf import variable_utils
from acme.utils import counting
from acme.utils import loggers
import reverb
import sonnet as snt
import tensorflow as tf
Replicator = Union[snt.distribute.Replicator, snt.distribute.TpuReplicator]
@dataclasses.dataclass
class D4PGConfig:
"""Configuration options for the D4PG agent."""
accelerator: Optional[str] = None
discount: float = 0.99
batch_size: int = 256
prefetch_size: int = 4
target_update_period: int = 100
variable_update_period: int = 1000
policy_optimizer: Optional[snt.Optimizer] = None
critic_optimizer: Optional[snt.Optimizer] = None
min_replay_size: int = 1000
max_replay_size: int = 1000000
samples_per_insert: Optional[float] = 32.0
n_step: int = 5
sigma: float = 0.3
clipping: bool = True
replay_table_name: str = reverb_adders.DEFAULT_PRIORITY_TABLE
@dataclasses.dataclass
class D4PGNetworks:
"""Structure containing the networks for D4PG."""
policy_network: snt.Module
critic_network: snt.Module
observation_network: snt.Module
def __init__(
self,
policy_network: snt.Module,
critic_network: snt.Module,
observation_network: types.TensorTransformation,
):
# This method is implemented (rather than added by the dataclass decorator)
# in order to allow observation network to be passed as an arbitrary tensor
# transformation rather than as a snt Module.
# TODO(mwhoffman): use Protocol rather than Module/TensorTransformation.
self.policy_network = policy_network
self.critic_network = critic_network
self.observation_network = utils.to_sonnet_module(observation_network)
def init(self, environment_spec: specs.EnvironmentSpec):
"""Initialize the networks given an environment spec."""
# Get observation and action specs.
act_spec = environment_spec.actions
obs_spec = environment_spec.observations
# Create variables for the observation net and, as a side-effect, get a
# spec describing the embedding space.
emb_spec = utils.create_variables(self.observation_network, [obs_spec])
# Create variables for the policy and critic nets.
_ = utils.create_variables(self.policy_network, [emb_spec])
_ = utils.create_variables(self.critic_network, [emb_spec, act_spec])
def make_policy(
self,
environment_spec: specs.EnvironmentSpec,
sigma: float = 0.0,
) -> snt.Module:
"""Create a single network which evaluates the policy."""
# Stack the observation and policy networks.
stack = [
self.observation_network,
self.policy_network,
]
# If a stochastic/non-greedy policy is requested, add Gaussian noise on
# top to enable a simple form of exploration.
# TODO(mwhoffman): Refactor this to remove it from the class.
if sigma > 0.0:
stack += [
network_utils.ClippedGaussian(sigma),
network_utils.ClipToSpec(environment_spec.actions),
]
# Return a network which sequentially evaluates everything in the stack.
return snt.Sequential(stack)
class D4PGBuilder:
"""Builder for D4PG which constructs individual components of the agent."""
def __init__(self, config: D4PGConfig):
self._config = config
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
) -> List[reverb.Table]:
"""Create tables to insert data into."""
if self._config.samples_per_insert is None:
# We will take a samples_per_insert ratio of None to mean that there is
# no limit, i.e. this only implies a min size limit.
limiter = reverb.rate_limiters.MinSize(self._config.min_replay_size)
else:
# Create enough of an error buffer to give a 10% tolerance in rate.
samples_per_insert_tolerance = 0.1 * self._config.samples_per_insert
error_buffer = self._config.min_replay_size * samples_per_insert_tolerance
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._config.min_replay_size,
samples_per_insert=self._config.samples_per_insert,
error_buffer=error_buffer)
replay_table = reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=limiter,
signature=reverb_adders.NStepTransitionAdder.signature(
environment_spec))
return [replay_table]
def make_dataset_iterator(
self,
reverb_client: reverb.Client,
) -> Iterator[reverb.ReplaySample]:
"""Create a dataset iterator to use for learning/updating the agent."""
# The dataset provides an interface to sample from replay.
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=reverb_client.server_address,
batch_size=self._config.batch_size,
prefetch_size=self._config.prefetch_size)
replicator = get_replicator(self._config.accelerator)
dataset = replicator.experimental_distribute_dataset(dataset)
# TODO(b/155086959): Fix type stubs and remove.
return iter(dataset) # pytype: disable=wrong-arg-types
def make_adder(
self,
replay_client: reverb.Client,
) -> adders.Adder:
"""Create an adder which records data generated by the actor/environment."""
return reverb_adders.NStepTransitionAdder(
priority_fns={self._config.replay_table_name: lambda x: 1.},
client=replay_client,
n_step=self._config.n_step,
discount=self._config.discount)
def make_actor(
self,
policy_network: snt.Module,
adder: Optional[adders.Adder] = None,
variable_source: Optional[core.VariableSource] = None,
):
"""Create an actor instance."""
if variable_source:
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = variable_utils.VariableClient(
client=variable_source,
variables={'policy': policy_network.variables},
update_period=self._config.variable_update_period,
)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
else:
variable_client = None
# Create the actor which defines how we take actions.
return actors.FeedForwardActor(
policy_network=policy_network,
adder=adder,
variable_client=variable_client,
)
def make_learner(
self,
networks: Tuple[D4PGNetworks, D4PGNetworks],
dataset: Iterator[reverb.ReplaySample],
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = False,
):
"""Creates an instance of the learner."""
online_networks, target_networks = networks
# The learner updates the parameters (and initializes them).
return learning.D4PGLearner(
policy_network=online_networks.policy_network,
critic_network=online_networks.critic_network,
observation_network=online_networks.observation_network,
target_policy_network=target_networks.policy_network,
target_critic_network=target_networks.critic_network,
target_observation_network=target_networks.observation_network,
policy_optimizer=self._config.policy_optimizer,
critic_optimizer=self._config.critic_optimizer,
clipping=self._config.clipping,
discount=self._config.discount,
target_update_period=self._config.target_update_period,
dataset_iterator=dataset,
replicator=get_replicator(self._config.accelerator),
counter=counter,
logger=logger,
checkpoint=checkpoint,
)
class D4PG(agent.Agent):
"""D4PG Agent.
This implements a single-process D4PG agent. This is an actor-critic algorithm
that generates data via a behavior policy, inserts N-step transitions into
a replay buffer, and periodically updates the policy (and as a result the
behavior) by sampling uniformly from this buffer.
"""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
policy_network: snt.Module,
critic_network: snt.Module,
observation_network: types.TensorTransformation = tf.identity,
accelerator: Optional[str] = None,
discount: float = 0.99,
batch_size: int = 256,
prefetch_size: int = 4,
target_update_period: int = 100,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: float = 32.0,
n_step: int = 5,
sigma: float = 0.3,
clipping: bool = True,
replay_table_name: str = reverb_adders.DEFAULT_PRIORITY_TABLE,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
"""Initialize the agent.
Args:
environment_spec: description of the actions, observations, etc.
policy_network: the online (optimized) policy.
critic_network: the online critic.
observation_network: optional network to transform the observations before
they are fed into any network.
accelerator: 'TPU', 'GPU', or 'CPU'. If omitted, the first available
accelerator type from ['TPU', 'GPU', 'CPU'] will be selected.
discount: discount to use for TD updates.
batch_size: batch size for updates.
prefetch_size: size to prefetch from replay.
target_update_period: number of learner steps to perform before updating
the target networks.
policy_optimizer: optimizer for the policy network updates.
critic_optimizer: optimizer for the critic network updates.
min_replay_size: minimum replay size before updating.
max_replay_size: maximum replay size.
samples_per_insert: number of samples to take from replay for every insert
that is made.
n_step: number of steps to squash into a single transition.
sigma: standard deviation of zero-mean, Gaussian exploration noise.
clipping: whether to clip gradients by global norm.
replay_table_name: string indicating what name to give the replay table.
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
if not accelerator:
accelerator = _get_first_available_accelerator_type(['TPU', 'GPU', 'CPU'])
# Create the Builder object which will internally create agent components.
builder = D4PGBuilder(
# TODO(mwhoffman): pass the config dataclass in directly.
# TODO(mwhoffman): use the limiter rather than the workaround below.
# Right now this modifies min_replay_size and samples_per_insert so that
# they are not controlled by a limiter and are instead handled by the
# Agent base class (the above TODO directly references this behavior).
D4PGConfig(
accelerator=accelerator,
discount=discount,
batch_size=batch_size,
prefetch_size=prefetch_size,
target_update_period=target_update_period,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
min_replay_size=1, # Let the Agent class handle this.
max_replay_size=max_replay_size,
samples_per_insert=None, # Let the Agent class handle this.
n_step=n_step,
sigma=sigma,
clipping=clipping,
replay_table_name=replay_table_name,
))
replicator = get_replicator(accelerator)
with replicator.scope():
# TODO(mwhoffman): pass the network dataclass in directly.
online_networks = D4PGNetworks(policy_network=policy_network,
critic_network=critic_network,
observation_network=observation_network)
# Target networks are just a copy of the online networks.
target_networks = copy.deepcopy(online_networks)
# Initialize the networks.
online_networks.init(environment_spec)
target_networks.init(environment_spec)
# TODO(mwhoffman): either make this Dataclass or pass only one struct.
# The network struct passed to make_learner is just a tuple for the
# time-being (for backwards compatibility).
networks = (online_networks, target_networks)
# Create the behavior policy.
policy_network = online_networks.make_policy(environment_spec, sigma)
# Create the replay server and grab its address.
replay_tables = builder.make_replay_tables(environment_spec)
replay_server = reverb.Server(replay_tables, port=None)
replay_client = reverb.Client(f'localhost:{replay_server.port}')
# Create actor, dataset, and learner for generating, storing, and consuming
# data respectively.
adder = builder.make_adder(replay_client)
actor = builder.make_actor(policy_network, adder)
dataset = builder.make_dataset_iterator(replay_client)
learner = builder.make_learner(networks, dataset, counter, logger,
checkpoint)
super().__init__(
actor=actor,
learner=learner,
min_observations=max(batch_size, min_replay_size),
observations_per_step=float(batch_size) / samples_per_insert)
# Save the replay so we don't garbage collect it.
self._replay_server = replay_server
def _ensure_accelerator(accelerator: str) -> str:
"""Checks for the existence of the expected accelerator type.
Args:
accelerator: 'CPU', 'GPU' or 'TPU'.
Returns:
The validated `accelerator` argument.
Raises:
RuntimeError: Thrown if the expected accelerator isn't found.
"""
devices = tf.config.get_visible_devices(device_type=accelerator)
if devices:
return accelerator
else:
error_messages = [f'Couldn\'t find any {accelerator} devices.',
'tf.config.get_visible_devices() returned:']
error_messages.extend([str(d) for d in devices])
raise RuntimeError('\n'.join(error_messages))
def _get_first_available_accelerator_type(
wishlist: Sequence[str] = ('TPU', 'GPU', 'CPU')) -> str:
"""Returns the first available accelerator type listed in a wishlist.
Args:
wishlist: A sequence of elements from {'CPU', 'GPU', 'TPU'}, listed in
order of descending preference.
Returns:
The first available accelerator type from `wishlist`.
Raises:
RuntimeError: Thrown if no accelerators from the `wishlist` are found.
"""
get_visible_devices = tf.config.get_visible_devices
for wishlist_device in wishlist:
devices = get_visible_devices(device_type=wishlist_device)
if devices:
return wishlist_device
available = ', '.join(
sorted(frozenset([d.type for d in get_visible_devices()])))
raise RuntimeError(
'Couldn\'t find any devices from {wishlist}.' +
f'Only the following types are available: {available}.')
# Only instantiate one replicator per (process, accelerator type), in case
# a replicator stores state that needs to be carried between its method calls.
@functools.lru_cache()
def get_replicator(accelerator: Optional[str]) -> Replicator:
"""Returns a replicator instance appropriate for the given accelerator.
This caches the instance using functools.cache, so that only one replicator
is instantiated per process and argument value.
Args:
accelerator: None, 'TPU', 'GPU', or 'CPU'. If None, the first available
accelerator type will be chosen from ('TPU', 'GPU', 'CPU').
Returns:
A replicator, for replciating weights, datasets, and updates across
one or more accelerators.
"""
if accelerator:
accelerator = _ensure_accelerator(accelerator)
else:
accelerator = _get_first_available_accelerator_type()
if accelerator == 'TPU':
tf.tpu.experimental.initialize_tpu_system()
return snt.distribute.TpuReplicator()
else:
return snt.distribute.Replicator()
|
acme-master
|
acme/agents/tf/d4pg/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""D4PG learner implementation."""
import time
from typing import Dict, Iterator, List, Optional, Union, Sequence
import acme
from acme import types
from acme.tf import losses
from acme.tf import networks as acme_nets
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import tree
Replicator = Union[snt.distribute.Replicator, snt.distribute.TpuReplicator]
class D4PGLearner(acme.Learner):
"""D4PG learner.
This is the learning component of a D4PG agent. IE it takes a dataset as input
and implements update functionality to learn from this dataset.
"""
def __init__(
self,
policy_network: snt.Module,
critic_network: snt.Module,
target_policy_network: snt.Module,
target_critic_network: snt.Module,
discount: float,
target_update_period: int,
dataset_iterator: Iterator[reverb.ReplaySample],
replicator: Optional[Replicator] = None,
observation_network: types.TensorTransformation = lambda x: x,
target_observation_network: types.TensorTransformation = lambda x: x,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
clipping: bool = True,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
"""Initializes the learner.
Args:
policy_network: the online (optimized) policy.
critic_network: the online critic.
target_policy_network: the target policy (which lags behind the online
policy).
target_critic_network: the target critic.
discount: discount to use for TD updates.
target_update_period: number of learner steps to perform before updating
the target networks.
dataset_iterator: dataset to learn from, whether fixed or from a replay
buffer (see `acme.datasets.reverb.make_reverb_dataset` documentation).
replicator: Replicates variables and their update methods over multiple
accelerators, such as the multiple chips in a TPU.
observation_network: an optional online network to process observations
before the policy and the critic.
target_observation_network: the target observation network.
policy_optimizer: the optimizer to be applied to the DPG (policy) loss.
critic_optimizer: the optimizer to be applied to the distributional
Bellman loss.
clipping: whether to clip gradients by global norm.
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
# Store online and target networks.
self._policy_network = policy_network
self._critic_network = critic_network
self._target_policy_network = target_policy_network
self._target_critic_network = target_critic_network
# Make sure observation networks are snt.Module's so they have variables.
self._observation_network = tf2_utils.to_sonnet_module(observation_network)
self._target_observation_network = tf2_utils.to_sonnet_module(
target_observation_network)
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner')
# Other learner parameters.
self._discount = discount
self._clipping = clipping
# Replicates Variables across multiple accelerators
if not replicator:
accelerator = _get_first_available_accelerator_type()
if accelerator == 'TPU':
replicator = snt.distribute.TpuReplicator()
else:
replicator = snt.distribute.Replicator()
self._replicator = replicator
with replicator.scope():
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_update_period = target_update_period
# Create optimizers if they aren't given.
self._critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
self._policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
# Batch dataset and create iterator.
self._iterator = dataset_iterator
# Expose the variables.
policy_network_to_expose = snt.Sequential(
[self._target_observation_network, self._target_policy_network])
self._variables = {
'critic': self._target_critic_network.variables,
'policy': policy_network_to_expose.variables,
}
# Create a checkpointer and snapshotter objects.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
subdirectory='d4pg_learner',
objects_to_save={
'counter': self._counter,
'policy': self._policy_network,
'critic': self._critic_network,
'observation': self._observation_network,
'target_policy': self._target_policy_network,
'target_critic': self._target_critic_network,
'target_observation': self._target_observation_network,
'policy_optimizer': self._policy_optimizer,
'critic_optimizer': self._critic_optimizer,
'num_steps': self._num_steps,
})
critic_mean = snt.Sequential(
[self._critic_network, acme_nets.StochasticMeanHead()])
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={
'policy': self._policy_network,
'critic': critic_mean,
})
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
@tf.function
def _step(self, sample) -> Dict[str, tf.Tensor]:
transitions: types.Transition = sample.data # Assuming ReverbSample.
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(self._discount, dtype=transitions.discount.dtype)
with tf.GradientTape(persistent=True) as tape:
# Maybe transform the observation before feeding into policy and critic.
# Transforming the observations this way at the start of the learning
# step effectively means that the policy and critic share observation
# network weights.
o_tm1 = self._observation_network(transitions.observation)
o_t = self._target_observation_network(transitions.next_observation)
# This stop_gradient prevents gradients to propagate into the target
# observation network. In addition, since the online policy network is
# evaluated at o_t, this also means the policy loss does not influence
# the observation network training.
o_t = tree.map_structure(tf.stop_gradient, o_t)
# Critic learning.
q_tm1 = self._critic_network(o_tm1, transitions.action)
q_t = self._target_critic_network(o_t, self._target_policy_network(o_t))
# Critic loss.
critic_loss = losses.categorical(q_tm1, transitions.reward,
discount * transitions.discount, q_t)
critic_loss = tf.reduce_mean(critic_loss, axis=[0])
# Actor learning.
dpg_a_t = self._policy_network(o_t)
dpg_z_t = self._critic_network(o_t, dpg_a_t)
dpg_q_t = dpg_z_t.mean()
# Actor loss. If clipping is true use dqda clipping and clip the norm.
dqda_clipping = 1.0 if self._clipping else None
policy_loss = losses.dpg(
dpg_q_t,
dpg_a_t,
tape=tape,
dqda_clipping=dqda_clipping,
clip_norm=self._clipping)
policy_loss = tf.reduce_mean(policy_loss, axis=[0])
# Get trainable variables.
policy_variables = self._policy_network.trainable_variables
critic_variables = (
# In this agent, the critic loss trains the observation network.
self._observation_network.trainable_variables +
self._critic_network.trainable_variables)
# Compute gradients.
replica_context = tf.distribute.get_replica_context()
policy_gradients = _average_gradients_across_replicas(
replica_context,
tape.gradient(policy_loss, policy_variables))
critic_gradients = _average_gradients_across_replicas(
replica_context,
tape.gradient(critic_loss, critic_variables))
# Delete the tape manually because of the persistent=True flag.
del tape
# Maybe clip gradients.
if self._clipping:
policy_gradients = tf.clip_by_global_norm(policy_gradients, 40.)[0]
critic_gradients = tf.clip_by_global_norm(critic_gradients, 40.)[0]
# Apply gradients.
self._policy_optimizer.apply(policy_gradients, policy_variables)
self._critic_optimizer.apply(critic_gradients, critic_variables)
# Losses to track.
return {
'critic_loss': critic_loss,
'policy_loss': policy_loss,
}
@tf.function
def _replicated_step(self):
# Update target network
online_variables = (
*self._observation_network.variables,
*self._critic_network.variables,
*self._policy_network.variables,
)
target_variables = (
*self._target_observation_network.variables,
*self._target_critic_network.variables,
*self._target_policy_network.variables,
)
# Make online -> target network update ops.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(online_variables, target_variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
sample = next(self._iterator)
# This mirrors the structure of the fetches returned by self._step(),
# but the Tensors are replaced with replicated Tensors, one per accelerator.
replicated_fetches = self._replicator.run(self._step, args=(sample,))
def reduce_mean_over_replicas(replicated_value):
"""Averages a replicated_value across replicas."""
# The "axis=None" arg means reduce across replicas, not internal axes.
return self._replicator.reduce(
reduce_op=tf.distribute.ReduceOp.MEAN,
value=replicated_value,
axis=None)
fetches = tree.map_structure(reduce_mean_over_replicas, replicated_fetches)
return fetches
def step(self):
# Run the learning step.
fetches = self._replicated_step()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
fetches.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(fetches)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables[name]) for name in names]
def _get_first_available_accelerator_type(
wishlist: Sequence[str] = ('TPU', 'GPU', 'CPU')) -> str:
"""Returns the first available accelerator type listed in a wishlist.
Args:
wishlist: A sequence of elements from {'CPU', 'GPU', 'TPU'}, listed in
order of descending preference.
Returns:
The first available accelerator type from `wishlist`.
Raises:
RuntimeError: Thrown if no accelerators from the `wishlist` are found.
"""
get_visible_devices = tf.config.get_visible_devices
for wishlist_device in wishlist:
devices = get_visible_devices(device_type=wishlist_device)
if devices:
return wishlist_device
available = ', '.join(
sorted(frozenset([d.type for d in get_visible_devices()])))
raise RuntimeError(
'Couldn\'t find any devices from {wishlist}.' +
f'Only the following types are available: {available}.')
def _average_gradients_across_replicas(replica_context, gradients):
"""Computes the average gradient across replicas.
This computes the gradient locally on this device, then copies over the
gradients computed on the other replicas, and takes the average across
replicas.
This is faster than copying the gradients from TPU to CPU, and averaging
them on the CPU (which is what we do for the losses/fetches).
Args:
replica_context: the return value of `tf.distribute.get_replica_context()`.
gradients: The output of tape.gradients(loss, variables)
Returns:
A list of (d_loss/d_varabiable)s.
"""
# We must remove any Nones from gradients before passing them to all_reduce.
# Nones occur when you call tape.gradient(loss, variables) with some
# variables that don't affect the loss.
# See: https://github.com/tensorflow/tensorflow/issues/783
gradients_without_nones = [g for g in gradients if g is not None]
original_indices = [i for i, g in enumerate(gradients) if g is not None]
results_without_nones = replica_context.all_reduce('mean',
gradients_without_nones)
results = [None] * len(gradients)
for ii, result in zip(original_indices, results_without_nones):
results[ii] = result
return results
|
acme-master
|
acme/agents/tf/d4pg/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for the distributed agent."""
import acme
from acme import specs
from acme.agents.tf import ddpg
from acme.testing import fakes
from acme.tf import networks
from acme.tf import utils as tf2_utils
import launchpad as lp
import numpy as np
import sonnet as snt
from absl.testing import absltest
def make_networks(action_spec: specs.BoundedArray):
"""Creates simple networks for testing.."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
# Create the observation network shared between the policy and critic.
observation_network = tf2_utils.batch_concat
# Create the policy network (head) and the evaluation network.
policy_network = snt.Sequential([
networks.LayerNormMLP([50], activate_final=True),
networks.NearZeroInitializedLinear(num_dimensions),
networks.TanhToSpec(action_spec)
])
evaluator_network = snt.Sequential([observation_network, policy_network])
# Create the critic network.
critic_network = snt.Sequential([
# The multiplexer concatenates the observations/actions.
networks.CriticMultiplexer(),
networks.LayerNormMLP([50], activate_final=True),
networks.NearZeroInitializedLinear(1),
])
return {
'policy': policy_network,
'critic': critic_network,
'observation': observation_network,
'evaluator': evaluator_network,
}
class DistributedAgentTest(absltest.TestCase):
"""Simple integration/smoke test for the distributed agent."""
def test_agent(self):
agent = ddpg.DistributedDDPG(
environment_factory=lambda x: fakes.ContinuousEnvironment(bounded=True),
network_factory=make_networks,
num_actors=2,
batch_size=32,
min_replay_size=32,
max_replay_size=1000,
)
program = agent.build()
(learner_node,) = program.groups['learner']
learner_node.disable_run()
lp.launch(program, launch_type='test_mt')
learner: acme.Learner = learner_node.create_handle().dereference()
for _ in range(5):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/ddpg/agent_distributed_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the distribted DDPG (D3PG) agent class."""
from typing import Callable, Dict, Optional
import acme
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents.tf import actors
from acme.agents.tf.ddpg import learning
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
from acme.utils import counting
from acme.utils import loggers
from acme.utils import lp_utils
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
import tensorflow as tf
class DistributedDDPG:
"""Program definition for distributed DDPG (D3PG)."""
def __init__(
self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[specs.BoundedArray], Dict[str, snt.Module]],
num_actors: int = 1,
num_caches: int = 0,
environment_spec: Optional[specs.EnvironmentSpec] = None,
batch_size: int = 256,
prefetch_size: int = 4,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: Optional[float] = 32.0,
n_step: int = 5,
sigma: float = 0.3,
clipping: bool = True,
discount: float = 0.99,
target_update_period: int = 100,
variable_update_period: int = 1000,
max_actor_steps: Optional[int] = None,
log_every: float = 10.0,
):
if not environment_spec:
environment_spec = specs.make_environment_spec(environment_factory(False))
self._environment_factory = environment_factory
self._network_factory = network_factory
self._environment_spec = environment_spec
self._num_actors = num_actors
self._num_caches = num_caches
self._batch_size = batch_size
self._prefetch_size = prefetch_size
self._min_replay_size = min_replay_size
self._max_replay_size = max_replay_size
self._samples_per_insert = samples_per_insert
self._n_step = n_step
self._sigma = sigma
self._clipping = clipping
self._discount = discount
self._target_update_period = target_update_period
self._variable_update_period = variable_update_period
self._max_actor_steps = max_actor_steps
self._log_every = log_every
def replay(self):
"""The replay storage."""
if self._samples_per_insert is not None:
# Create enough of an error buffer to give a 10% tolerance in rate.
samples_per_insert_tolerance = 0.1 * self._samples_per_insert
error_buffer = self._min_replay_size * samples_per_insert_tolerance
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._min_replay_size,
samples_per_insert=self._samples_per_insert,
error_buffer=error_buffer)
else:
limiter = reverb.rate_limiters.MinSize(self._min_replay_size)
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._max_replay_size,
rate_limiter=limiter,
signature=adders.NStepTransitionAdder.signature(
self._environment_spec))
return [replay_table]
def counter(self):
return tf2_savers.CheckpointingRunner(counting.Counter(),
time_delta_minutes=1,
subdirectory='counter')
def coordinator(self, counter: counting.Counter, max_actor_steps: int):
return lp_utils.StepsLimiter(counter, max_actor_steps)
def learner(
self,
replay: reverb.Client,
counter: counting.Counter,
):
"""The Learning part of the agent."""
act_spec = self._environment_spec.actions
obs_spec = self._environment_spec.observations
# Create the networks to optimize (online) and target networks.
online_networks = self._network_factory(act_spec)
target_networks = self._network_factory(act_spec)
# Make sure observation network is a Sonnet Module.
observation_network = online_networks.get('observation', tf.identity)
target_observation_network = target_networks.get('observation', tf.identity)
observation_network = tf2_utils.to_sonnet_module(observation_network)
target_observation_network = tf2_utils.to_sonnet_module(
target_observation_network)
# Get embedding spec and create observation network variables.
emb_spec = tf2_utils.create_variables(observation_network, [obs_spec])
# Create variables.
tf2_utils.create_variables(online_networks['policy'], [emb_spec])
tf2_utils.create_variables(online_networks['critic'], [emb_spec, act_spec])
tf2_utils.create_variables(target_networks['policy'], [emb_spec])
tf2_utils.create_variables(target_networks['critic'], [emb_spec, act_spec])
tf2_utils.create_variables(target_observation_network, [obs_spec])
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
server_address=replay.server_address,
batch_size=self._batch_size,
prefetch_size=self._prefetch_size)
# Create optimizers.
policy_optimizer = snt.optimizers.Adam(learning_rate=1e-4)
critic_optimizer = snt.optimizers.Adam(learning_rate=1e-4)
counter = counting.Counter(counter, 'learner')
logger = loggers.make_default_logger(
'learner', time_delta=self._log_every, steps_key='learner_steps')
# Return the learning agent.
return learning.DDPGLearner(
policy_network=online_networks['policy'],
critic_network=online_networks['critic'],
observation_network=observation_network,
target_policy_network=target_networks['policy'],
target_critic_network=target_networks['critic'],
target_observation_network=target_observation_network,
discount=self._discount,
target_update_period=self._target_update_period,
dataset=dataset,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
clipping=self._clipping,
counter=counter,
logger=logger,
)
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
):
"""The actor process."""
action_spec = self._environment_spec.actions
observation_spec = self._environment_spec.observations
# Create environment and behavior networks
environment = self._environment_factory(False)
agent_networks = self._network_factory(action_spec)
# Create behavior network by adding some random dithering.
behavior_network = snt.Sequential([
agent_networks.get('observation', tf.identity),
agent_networks.get('policy'),
networks.ClippedGaussian(self._sigma),
])
# Ensure network variables are created.
tf2_utils.create_variables(behavior_network, [observation_spec])
variables = {'policy': behavior_network.variables}
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = tf2_variable_utils.VariableClient(
variable_source, variables, update_period=self._variable_update_period)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Component to add things into replay.
adder = adders.NStepTransitionAdder(
client=replay, n_step=self._n_step, discount=self._discount)
# Create the agent.
actor = actors.FeedForwardActor(
behavior_network, adder=adder, variable_client=variable_client)
# Create logger and counter; actors will not spam bigtable.
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor',
save_data=False,
time_delta=self._log_every,
steps_key='actor_steps')
# Create the loop to connect environment and agent.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
):
"""The evaluation process."""
action_spec = self._environment_spec.actions
observation_spec = self._environment_spec.observations
# Create environment and evaluator networks
environment = self._environment_factory(True)
agent_networks = self._network_factory(action_spec)
# Create evaluator network.
evaluator_network = snt.Sequential([
agent_networks.get('observation', tf.identity),
agent_networks.get('policy'),
])
# Ensure network variables are created.
tf2_utils.create_variables(evaluator_network, [observation_spec])
variables = {'policy': evaluator_network.variables}
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = tf2_variable_utils.VariableClient(
variable_source, variables, update_period=self._variable_update_period)
# Make sure not to evaluate a random actor by assigning variables before
# running the environment loop.
variable_client.update_and_wait()
# Create the evaluator; note it will not add experience to replay.
evaluator = actors.FeedForwardActor(
evaluator_network, variable_client=variable_client)
# Create logger and counter.
counter = counting.Counter(counter, 'evaluator')
logger = loggers.make_default_logger(
'evaluator', time_delta=self._log_every, steps_key='evaluator_steps')
# Create the run loop and return it.
return acme.EnvironmentLoop(
environment, evaluator, counter, logger)
def build(self, name='ddpg'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
if self._max_actor_steps:
_ = program.add_node(
lp.CourierNode(self.coordinator, counter, self._max_actor_steps))
with program.group('learner'):
learner = program.add_node(
lp.CourierNode(self.learner, replay, counter))
with program.group('evaluator'):
program.add_node(
lp.CourierNode(self.evaluator, learner, counter))
if not self._num_caches:
# Use our learner as a single variable source.
sources = [learner]
else:
with program.group('cacher'):
# Create a set of learner caches.
sources = []
for _ in range(self._num_caches):
cacher = program.add_node(
lp.CacherNode(
learner, refresh_interval_ms=2000, stale_after_ms=4000))
sources.append(cacher)
with program.group('actor'):
# Add actors which pull round-robin from our variable sources.
for actor_id in range(self._num_actors):
source = sources[actor_id % len(sources)]
program.add_node(lp.CourierNode(self.actor, replay, source, counter))
return program
|
acme-master
|
acme/agents/tf/ddpg/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a DDPG agent."""
from acme.agents.tf.ddpg.agent import DDPG
from acme.agents.tf.ddpg.agent_distributed import DistributedDDPG
from acme.agents.tf.ddpg.learning import DDPGLearner
|
acme-master
|
acme/agents/tf/ddpg/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the DDPG agent."""
from typing import Dict, Sequence
import acme
from acme import specs
from acme import types
from acme.agents.tf import ddpg
from acme.testing import fakes
from acme.tf import networks
import numpy as np
import sonnet as snt
import tensorflow as tf
from absl.testing import absltest
def make_networks(
action_spec: types.NestedSpec,
policy_layer_sizes: Sequence[int] = (10, 10),
critic_layer_sizes: Sequence[int] = (10, 10),
) -> Dict[str, snt.Module]:
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_layer_sizes = list(policy_layer_sizes) + [num_dimensions]
critic_layer_sizes = list(critic_layer_sizes) + [1]
policy_network = snt.Sequential(
[networks.LayerNormMLP(policy_layer_sizes), tf.tanh])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = networks.CriticMultiplexer(
critic_network=networks.LayerNormMLP(critic_layer_sizes))
return {
'policy': policy_network,
'critic': critic_network,
}
class DDPGTest(absltest.TestCase):
def test_ddpg(self):
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(episode_length=10, bounded=True)
spec = specs.make_environment_spec(environment)
# Create the networks to optimize (online) and target networks.
agent_networks = make_networks(spec.actions)
# Construct the agent.
agent = ddpg.DDPG(
environment_spec=spec,
policy_network=agent_networks['policy'],
critic_network=agent_networks['critic'],
batch_size=10,
samples_per_insert=2,
min_replay_size=10,
)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=2)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/ddpg/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DDPG agent implementation."""
import copy
from typing import Optional
from acme import datasets
from acme import specs
from acme import types
from acme.adders import reverb as adders
from acme.agents import agent
from acme.agents.tf import actors
from acme.agents.tf.ddpg import learning
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import reverb
import sonnet as snt
import tensorflow as tf
class DDPG(agent.Agent):
"""DDPG Agent.
This implements a single-process DDPG agent. This is an actor-critic algorithm
that generates data via a behavior policy, inserts N-step transitions into
a replay buffer, and periodically updates the policy (and as a result the
behavior) by sampling uniformly from this buffer.
"""
def __init__(self,
environment_spec: specs.EnvironmentSpec,
policy_network: snt.Module,
critic_network: snt.Module,
observation_network: types.TensorTransformation = tf.identity,
discount: float = 0.99,
batch_size: int = 256,
prefetch_size: int = 4,
target_update_period: int = 100,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: float = 32.0,
n_step: int = 5,
sigma: float = 0.3,
clipping: bool = True,
logger: Optional[loggers.Logger] = None,
counter: Optional[counting.Counter] = None,
checkpoint: bool = True,
replay_table_name: str = adders.DEFAULT_PRIORITY_TABLE):
"""Initialize the agent.
Args:
environment_spec: description of the actions, observations, etc.
policy_network: the online (optimized) policy.
critic_network: the online critic.
observation_network: optional network to transform the observations before
they are fed into any network.
discount: discount to use for TD updates.
batch_size: batch size for updates.
prefetch_size: size to prefetch from replay.
target_update_period: number of learner steps to perform before updating
the target networks.
min_replay_size: minimum replay size before updating.
max_replay_size: maximum replay size.
samples_per_insert: number of samples to take from replay for every insert
that is made.
n_step: number of steps to squash into a single transition.
sigma: standard deviation of zero-mean, Gaussian exploration noise.
clipping: whether to clip gradients by global norm.
logger: logger object to be used by learner.
counter: counter object used to keep track of steps.
checkpoint: boolean indicating whether to checkpoint the learner.
replay_table_name: string indicating what name to give the replay table.
"""
# Create a replay server to add data to. This uses no limiter behavior in
# order to allow the Agent interface to handle it.
replay_table = reverb.Table(
name=replay_table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=max_replay_size,
rate_limiter=reverb.rate_limiters.MinSize(1),
signature=adders.NStepTransitionAdder.signature(environment_spec))
self._server = reverb.Server([replay_table], port=None)
# The adder is used to insert observations into replay.
address = f'localhost:{self._server.port}'
adder = adders.NStepTransitionAdder(
priority_fns={replay_table_name: lambda x: 1.},
client=reverb.Client(address),
n_step=n_step,
discount=discount)
# The dataset provides an interface to sample from replay.
dataset = datasets.make_reverb_dataset(
table=replay_table_name,
server_address=address,
batch_size=batch_size,
prefetch_size=prefetch_size)
# Make sure observation network is a Sonnet Module.
observation_network = tf2_utils.to_sonnet_module(observation_network)
# Get observation and action specs.
act_spec = environment_spec.actions
obs_spec = environment_spec.observations
emb_spec = tf2_utils.create_variables(observation_network, [obs_spec])
# Create target networks.
target_policy_network = copy.deepcopy(policy_network)
target_critic_network = copy.deepcopy(critic_network)
target_observation_network = copy.deepcopy(observation_network)
# Create the behavior policy.
behavior_network = snt.Sequential([
observation_network,
policy_network,
networks.ClippedGaussian(sigma),
networks.ClipToSpec(act_spec),
])
# Create variables.
tf2_utils.create_variables(policy_network, [emb_spec])
tf2_utils.create_variables(critic_network, [emb_spec, act_spec])
tf2_utils.create_variables(target_policy_network, [emb_spec])
tf2_utils.create_variables(target_critic_network, [emb_spec, act_spec])
tf2_utils.create_variables(target_observation_network, [obs_spec])
# Create the actor which defines how we take actions.
actor = actors.FeedForwardActor(behavior_network, adder=adder)
# Create optimizers.
policy_optimizer = snt.optimizers.Adam(learning_rate=1e-4)
critic_optimizer = snt.optimizers.Adam(learning_rate=1e-4)
# The learner updates the parameters (and initializes them).
learner = learning.DDPGLearner(
policy_network=policy_network,
critic_network=critic_network,
observation_network=observation_network,
target_policy_network=target_policy_network,
target_critic_network=target_critic_network,
target_observation_network=target_observation_network,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
clipping=clipping,
discount=discount,
target_update_period=target_update_period,
dataset=dataset,
counter=counter,
logger=logger,
checkpoint=checkpoint,
)
super().__init__(
actor=actor,
learner=learner,
min_observations=max(batch_size, min_replay_size),
observations_per_step=float(batch_size) / samples_per_insert)
|
acme-master
|
acme/agents/tf/ddpg/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DDPG learner implementation."""
import time
from typing import List, Optional
import acme
from acme import types
from acme.tf import losses
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow as tf
import tree
import trfl
class DDPGLearner(acme.Learner):
"""DDPG learner.
This is the learning component of a DDPG agent. IE it takes a dataset as input
and implements update functionality to learn from this dataset.
"""
def __init__(
self,
policy_network: snt.Module,
critic_network: snt.Module,
target_policy_network: snt.Module,
target_critic_network: snt.Module,
discount: float,
target_update_period: int,
dataset: tf.data.Dataset,
observation_network: types.TensorTransformation = lambda x: x,
target_observation_network: types.TensorTransformation = lambda x: x,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
clipping: bool = True,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
"""Initializes the learner.
Args:
policy_network: the online (optimized) policy.
critic_network: the online critic.
target_policy_network: the target policy (which lags behind the online
policy).
target_critic_network: the target critic.
discount: discount to use for TD updates.
target_update_period: number of learner steps to perform before updating
the target networks.
dataset: dataset to learn from, whether fixed or from a replay buffer
(see `acme.datasets.reverb.make_reverb_dataset` documentation).
observation_network: an optional online network to process observations
before the policy and the critic.
target_observation_network: the target observation network.
policy_optimizer: the optimizer to be applied to the DPG (policy) loss.
critic_optimizer: the optimizer to be applied to the critic loss.
clipping: whether to clip gradients by global norm.
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
# Store online and target networks.
self._policy_network = policy_network
self._critic_network = critic_network
self._target_policy_network = target_policy_network
self._target_critic_network = target_critic_network
# Make sure observation networks are snt.Module's so they have variables.
self._observation_network = tf2_utils.to_sonnet_module(observation_network)
self._target_observation_network = tf2_utils.to_sonnet_module(
target_observation_network)
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner')
# Other learner parameters.
self._discount = discount
self._clipping = clipping
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_update_period = target_update_period
# Create an iterator to go through the dataset.
# TODO(b/155086959): Fix type stubs and remove.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# Create optimizers if they aren't given.
self._critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
self._policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
# Expose the variables.
policy_network_to_expose = snt.Sequential(
[self._target_observation_network, self._target_policy_network])
self._variables = {
'critic': target_critic_network.variables,
'policy': policy_network_to_expose.variables,
}
self._checkpointer = tf2_savers.Checkpointer(
time_delta_minutes=5,
objects_to_save={
'counter': self._counter,
'policy': self._policy_network,
'critic': self._critic_network,
'target_policy': self._target_policy_network,
'target_critic': self._target_critic_network,
'policy_optimizer': self._policy_optimizer,
'critic_optimizer': self._critic_optimizer,
'num_steps': self._num_steps,
},
enable_checkpointing=checkpoint,
)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
@tf.function
def _step(self):
# Update target network.
online_variables = (
*self._observation_network.variables,
*self._critic_network.variables,
*self._policy_network.variables,
)
target_variables = (
*self._target_observation_network.variables,
*self._target_critic_network.variables,
*self._target_policy_network.variables,
)
# Make online -> target network update ops.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(online_variables, target_variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
inputs = next(self._iterator)
transitions: types.Transition = inputs.data
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(self._discount, dtype=transitions.discount.dtype)
with tf.GradientTape(persistent=True) as tape:
# Maybe transform the observation before feeding into policy and critic.
# Transforming the observations this way at the start of the learning
# step effectively means that the policy and critic share observation
# network weights.
o_tm1 = self._observation_network(transitions.observation)
o_t = self._target_observation_network(transitions.next_observation)
# This stop_gradient prevents gradients to propagate into the target
# observation network. In addition, since the online policy network is
# evaluated at o_t, this also means the policy loss does not influence
# the observation network training.
o_t = tree.map_structure(tf.stop_gradient, o_t)
# Critic learning.
q_tm1 = self._critic_network(o_tm1, transitions.action)
q_t = self._target_critic_network(o_t, self._target_policy_network(o_t))
# Squeeze into the shape expected by the td_learning implementation.
q_tm1 = tf.squeeze(q_tm1, axis=-1) # [B]
q_t = tf.squeeze(q_t, axis=-1) # [B]
# Critic loss.
critic_loss = trfl.td_learning(q_tm1, transitions.reward,
discount * transitions.discount, q_t).loss
critic_loss = tf.reduce_mean(critic_loss, axis=0)
# Actor learning.
dpg_a_t = self._policy_network(o_t)
dpg_q_t = self._critic_network(o_t, dpg_a_t)
# Actor loss. If clipping is true use dqda clipping and clip the norm.
dqda_clipping = 1.0 if self._clipping else None
policy_loss = losses.dpg(
dpg_q_t,
dpg_a_t,
tape=tape,
dqda_clipping=dqda_clipping,
clip_norm=self._clipping)
policy_loss = tf.reduce_mean(policy_loss, axis=0)
# Get trainable variables.
policy_variables = self._policy_network.trainable_variables
critic_variables = (
# In this agent, the critic loss trains the observation network.
self._observation_network.trainable_variables +
self._critic_network.trainable_variables)
# Compute gradients.
policy_gradients = tape.gradient(policy_loss, policy_variables)
critic_gradients = tape.gradient(critic_loss, critic_variables)
# Delete the tape manually because of the persistent=True flag.
del tape
# Maybe clip gradients.
if self._clipping:
policy_gradients = tf.clip_by_global_norm(policy_gradients, 40.)[0]
critic_gradients = tf.clip_by_global_norm(critic_gradients, 40.)[0]
# Apply gradients.
self._policy_optimizer.apply(policy_gradients, policy_variables)
self._critic_optimizer.apply(critic_gradients, critic_variables)
# Losses to track.
return {
'critic_loss': critic_loss,
'policy_loss': policy_loss,
}
def step(self):
# Run the learning step.
fetches = self._step()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
fetches.update(counts)
# Checkpoint and attempt to write the logs.
self._checkpointer.save()
self._logger.write(fetches)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables[name]) for name in names]
|
acme-master
|
acme/agents/tf/ddpg/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for the distributed agent."""
import acme
from acme.agents.tf import impala
from acme.testing import fakes
from acme.tf import networks
import launchpad as lp
from absl.testing import absltest
class DistributedAgentTest(absltest.TestCase):
"""Simple integration/smoke test for the distributed agent."""
def test_atari(self):
"""Tests that the agent can run for some steps without crashing."""
env_factory = lambda x: fakes.fake_atari_wrapped(oar_wrapper=True)
net_factory = lambda spec: networks.IMPALAAtariNetwork(spec.num_values)
agent = impala.DistributedIMPALA(
environment_factory=env_factory,
network_factory=net_factory,
num_actors=2,
batch_size=32,
sequence_length=5,
sequence_period=1,
)
program = agent.build()
(learner_node,) = program.groups['learner']
learner_node.disable_run()
lp.launch(program, launch_type='test_mt')
learner: acme.Learner = learner_node.create_handle().dereference()
for _ in range(5):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/impala/agent_distributed_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the IMPALA Launchpad program."""
from typing import Callable, Optional
import acme
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents.tf.impala import acting
from acme.agents.tf.impala import learning
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
from acme.utils import counting
from acme.utils import loggers
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
import tensorflow as tf
class DistributedIMPALA:
"""Program definition for IMPALA."""
def __init__(self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[specs.DiscreteArray], snt.RNNCore],
num_actors: int,
sequence_length: int,
sequence_period: int,
environment_spec: Optional[specs.EnvironmentSpec] = None,
batch_size: int = 256,
prefetch_size: int = 4,
max_queue_size: int = 10_000,
learning_rate: float = 1e-3,
discount: float = 0.99,
entropy_cost: float = 0.01,
baseline_cost: float = 0.5,
max_abs_reward: Optional[float] = None,
max_gradient_norm: Optional[float] = None,
variable_update_period: int = 1000,
save_logs: bool = False):
if environment_spec is None:
environment_spec = specs.make_environment_spec(environment_factory(False))
self._environment_factory = environment_factory
self._network_factory = network_factory
self._environment_spec = environment_spec
self._num_actors = num_actors
self._batch_size = batch_size
self._prefetch_size = prefetch_size
self._sequence_length = sequence_length
self._max_queue_size = max_queue_size
self._sequence_period = sequence_period
self._discount = discount
self._learning_rate = learning_rate
self._entropy_cost = entropy_cost
self._baseline_cost = baseline_cost
self._max_abs_reward = max_abs_reward
self._max_gradient_norm = max_gradient_norm
self._variable_update_period = variable_update_period
self._save_logs = save_logs
def queue(self):
"""The queue."""
num_actions = self._environment_spec.actions.num_values
network = self._network_factory(self._environment_spec.actions)
extra_spec = {
'core_state': network.initial_state(1),
'logits': tf.ones(shape=(1, num_actions), dtype=tf.float32)
}
# Remove batch dimensions.
extra_spec = tf2_utils.squeeze_batch_dim(extra_spec)
signature = adders.SequenceAdder.signature(
self._environment_spec,
extra_spec,
sequence_length=self._sequence_length)
queue = reverb.Table.queue(
name=adders.DEFAULT_PRIORITY_TABLE,
max_size=self._max_queue_size,
signature=signature)
return [queue]
def counter(self):
"""Creates the master counter process."""
return tf2_savers.CheckpointingRunner(
counting.Counter(), time_delta_minutes=1, subdirectory='counter')
def learner(self, queue: reverb.Client, counter: counting.Counter):
"""The Learning part of the agent."""
# Use architect and create the environment.
# Create the networks.
network = self._network_factory(self._environment_spec.actions)
tf2_utils.create_variables(network, [self._environment_spec.observations])
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
server_address=queue.server_address,
batch_size=self._batch_size,
prefetch_size=self._prefetch_size)
logger = loggers.make_default_logger('learner', steps_key='learner_steps')
counter = counting.Counter(counter, 'learner')
# Return the learning agent.
learner = learning.IMPALALearner(
environment_spec=self._environment_spec,
network=network,
dataset=dataset,
discount=self._discount,
learning_rate=self._learning_rate,
entropy_cost=self._entropy_cost,
baseline_cost=self._baseline_cost,
max_abs_reward=self._max_abs_reward,
max_gradient_norm=self._max_gradient_norm,
counter=counter,
logger=logger,
)
return tf2_savers.CheckpointingRunner(learner,
time_delta_minutes=5,
subdirectory='impala_learner')
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
) -> acme.EnvironmentLoop:
"""The actor process."""
environment = self._environment_factory(False)
network = self._network_factory(self._environment_spec.actions)
tf2_utils.create_variables(network, [self._environment_spec.observations])
# Component to add things into the queue.
adder = adders.SequenceAdder(
client=replay,
period=self._sequence_period,
sequence_length=self._sequence_length)
variable_client = tf2_variable_utils.VariableClient(
client=variable_source,
variables={'policy': network.variables},
update_period=self._variable_update_period)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Create the agent.
actor = acting.IMPALAActor(
network=network,
variable_client=variable_client,
adder=adder)
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor', save_data=False, steps_key='actor_steps')
# Create the loop to connect environment and agent.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def evaluator(self, variable_source: acme.VariableSource,
counter: counting.Counter):
"""The evaluation process."""
environment = self._environment_factory(True)
network = self._network_factory(self._environment_spec.actions)
tf2_utils.create_variables(network, [self._environment_spec.observations])
variable_client = tf2_variable_utils.VariableClient(
client=variable_source,
variables={'policy': network.variables},
update_period=self._variable_update_period)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Create the agent.
actor = acting.IMPALAActor(
network=network, variable_client=variable_client)
# Create the run loop and return it.
logger = loggers.make_default_logger(
'evaluator', steps_key='evaluator_steps')
counter = counting.Counter(counter, 'evaluator')
return acme.EnvironmentLoop(environment, actor, counter, logger)
def build(self, name='impala'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
queue = program.add_node(lp.ReverbNode(self.queue))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
with program.group('learner'):
learner = program.add_node(
lp.CourierNode(self.learner, queue, counter))
with program.group('evaluator'):
program.add_node(lp.CourierNode(self.evaluator, learner, counter))
with program.group('cacher'):
cacher = program.add_node(
lp.CacherNode(learner, refresh_interval_ms=2000, stale_after_ms=4000))
with program.group('actor'):
for _ in range(self._num_actors):
program.add_node(lp.CourierNode(self.actor, queue, cacher, counter))
return program
|
acme-master
|
acme/agents/tf/impala/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Importance-weighted actor-learner architecture (IMPALA) agent."""
from acme.agents.tf.impala.acting import IMPALAActor
from acme.agents.tf.impala.agent import IMPALA
from acme.agents.tf.impala.agent_distributed import DistributedIMPALA
from acme.agents.tf.impala.learning import IMPALALearner
|
acme-master
|
acme/agents/tf/impala/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for IMPALA agent."""
import acme
from acme import specs
from acme.agents.tf import impala
from acme.testing import fakes
from acme.tf import networks
import numpy as np
import sonnet as snt
from absl.testing import absltest
def _make_network(action_spec: specs.DiscreteArray) -> snt.RNNCore:
return snt.DeepRNN([
snt.Flatten(),
snt.LSTM(20),
snt.nets.MLP([50, 50]),
networks.PolicyValueHead(action_spec.num_values),
])
class IMPALATest(absltest.TestCase):
# TODO(b/200509080): This test case is timing out.
@absltest.SkipTest
def test_impala(self):
# Create a fake environment to test with.
environment = fakes.DiscreteEnvironment(
num_actions=5,
num_observations=10,
obs_dtype=np.float32,
episode_length=10)
spec = specs.make_environment_spec(environment)
# Construct the agent.
agent = impala.IMPALA(
environment_spec=spec,
network=_make_network(spec.actions),
sequence_length=3,
sequence_period=3,
batch_size=6,
)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=20)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/impala/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Importance weighted advantage actor-critic (IMPALA) agent implementation."""
from typing import Optional
import acme
from acme import datasets
from acme import specs
from acme import types
from acme.adders import reverb as adders
from acme.agents.tf.impala import acting
from acme.agents.tf.impala import learning
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import dm_env
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
class IMPALA(acme.Actor):
"""IMPALA Agent."""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
network: snt.RNNCore,
sequence_length: int,
sequence_period: int,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
discount: float = 0.99,
max_queue_size: int = 100000,
batch_size: int = 16,
learning_rate: float = 1e-3,
entropy_cost: float = 0.01,
baseline_cost: float = 0.5,
max_abs_reward: Optional[float] = None,
max_gradient_norm: Optional[float] = None,
):
num_actions = environment_spec.actions.num_values
self._logger = logger or loggers.TerminalLogger('agent')
extra_spec = {
'core_state': network.initial_state(1),
'logits': tf.ones(shape=(1, num_actions), dtype=tf.float32)
}
# Remove batch dimensions.
extra_spec = tf2_utils.squeeze_batch_dim(extra_spec)
queue = reverb.Table.queue(
name=adders.DEFAULT_PRIORITY_TABLE,
max_size=max_queue_size,
signature=adders.SequenceAdder.signature(
environment_spec,
extras_spec=extra_spec,
sequence_length=sequence_length))
self._server = reverb.Server([queue], port=None)
self._can_sample = lambda: queue.can_sample(batch_size)
address = f'localhost:{self._server.port}'
# Component to add things into replay.
adder = adders.SequenceAdder(
client=reverb.Client(address),
period=sequence_period,
sequence_length=sequence_length,
)
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
server_address=address,
batch_size=batch_size)
tf2_utils.create_variables(network, [environment_spec.observations])
self._actor = acting.IMPALAActor(network, adder)
self._learner = learning.IMPALALearner(
environment_spec=environment_spec,
network=network,
dataset=dataset,
counter=counter,
logger=logger,
discount=discount,
learning_rate=learning_rate,
entropy_cost=entropy_cost,
baseline_cost=baseline_cost,
max_gradient_norm=max_gradient_norm,
max_abs_reward=max_abs_reward,
)
def observe_first(self, timestep: dm_env.TimeStep):
self._actor.observe_first(timestep)
def observe(
self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
):
self._actor.observe(action, next_timestep)
def update(self, wait: bool = False):
# Run a number of learner steps (usually gradient steps).
while self._can_sample():
self._learner.step()
def select_action(self, observation: np.ndarray) -> int:
return self._actor.select_action(observation)
|
acme-master
|
acme/agents/tf/impala/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learner for the IMPALA actor-critic agent."""
import time
from typing import Dict, List, Mapping, Optional
import acme
from acme import specs
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
import tree
import trfl
tfd = tfp.distributions
class IMPALALearner(acme.Learner, tf2_savers.TFSaveable):
"""Learner for an importanced-weighted advantage actor-critic."""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
network: snt.RNNCore,
dataset: tf.data.Dataset,
learning_rate: float,
discount: float = 0.99,
entropy_cost: float = 0.,
baseline_cost: float = 1.,
max_abs_reward: Optional[float] = None,
max_gradient_norm: Optional[float] = None,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
):
# Internalise, optimizer, and dataset.
self._env_spec = environment_spec
self._optimizer = snt.optimizers.Adam(learning_rate=learning_rate)
self._network = network
self._variables = network.variables
# TODO(b/155086959): Fix type stubs and remove.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# Hyperparameters.
self._discount = discount
self._entropy_cost = entropy_cost
self._baseline_cost = baseline_cost
# Set up reward/gradient clipping.
if max_abs_reward is None:
max_abs_reward = np.inf
if max_gradient_norm is None:
max_gradient_norm = 1e10 # A very large number. Infinity results in NaNs.
self._max_abs_reward = tf.convert_to_tensor(max_abs_reward)
self._max_gradient_norm = tf.convert_to_tensor(max_gradient_norm)
# Set up logging/counting.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'network': network}, time_delta_minutes=60.)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
@property
def state(self) -> Mapping[str, tf2_savers.Checkpointable]:
"""Returns the stateful objects for checkpointing."""
return {
'network': self._network,
'optimizer': self._optimizer,
}
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
"""Does an SGD step on a batch of sequences."""
# Retrieve a batch of data from replay.
inputs: reverb.ReplaySample = next(self._iterator)
data = tf2_utils.batch_to_sequence(inputs.data)
observations, actions, rewards, discounts, extra = (data.observation,
data.action,
data.reward,
data.discount,
data.extras)
core_state = tree.map_structure(lambda s: s[0], extra['core_state'])
#
actions = actions[:-1] # [T-1]
rewards = rewards[:-1] # [T-1]
discounts = discounts[:-1] # [T-1]
with tf.GradientTape() as tape:
# Unroll current policy over observations.
(logits, values), _ = snt.static_unroll(self._network, observations,
core_state)
# Compute importance sampling weights: current policy / behavior policy.
behaviour_logits = extra['logits']
pi_behaviour = tfd.Categorical(logits=behaviour_logits[:-1])
pi_target = tfd.Categorical(logits=logits[:-1])
log_rhos = pi_target.log_prob(actions) - pi_behaviour.log_prob(actions)
# Optionally clip rewards.
rewards = tf.clip_by_value(rewards,
tf.cast(-self._max_abs_reward, rewards.dtype),
tf.cast(self._max_abs_reward, rewards.dtype))
# Critic loss.
vtrace_returns = trfl.vtrace_from_importance_weights(
log_rhos=tf.cast(log_rhos, tf.float32),
discounts=tf.cast(self._discount * discounts, tf.float32),
rewards=tf.cast(rewards, tf.float32),
values=tf.cast(values[:-1], tf.float32),
bootstrap_value=values[-1],
)
critic_loss = tf.square(vtrace_returns.vs - values[:-1])
# Policy-gradient loss.
policy_gradient_loss = trfl.policy_gradient(
policies=pi_target,
actions=actions,
action_values=vtrace_returns.pg_advantages,
)
# Entropy regulariser.
entropy_loss = trfl.policy_entropy_loss(pi_target).loss
# Combine weighted sum of actor & critic losses.
loss = tf.reduce_mean(policy_gradient_loss +
self._baseline_cost * critic_loss +
self._entropy_cost * entropy_loss)
# Compute gradients and optionally apply clipping.
gradients = tape.gradient(loss, self._network.trainable_variables)
gradients, _ = tf.clip_by_global_norm(gradients, self._max_gradient_norm)
self._optimizer.apply(gradients, self._network.trainable_variables)
metrics = {
'loss': loss,
'critic_loss': tf.reduce_mean(critic_loss),
'entropy_loss': tf.reduce_mean(entropy_loss),
'policy_gradient_loss': tf.reduce_mean(policy_gradient_loss),
}
return metrics
def step(self):
"""Does a step of SGD and logs the results."""
# Do a batch of SGD.
results = self._step()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
results.update(counts)
# Snapshot and attempt to write logs.
self._snapshotter.save()
self._logger.write(results)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables)]
|
acme-master
|
acme/agents/tf/impala/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IMPALA actor implementation."""
from typing import Optional
from acme import adders
from acme import core
from acme import types
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
import dm_env
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
class IMPALAActor(core.Actor):
"""A recurrent actor."""
def __init__(
self,
network: snt.RNNCore,
adder: Optional[adders.Adder] = None,
variable_client: Optional[tf2_variable_utils.VariableClient] = None,
):
# Store these for later use.
self._adder = adder
self._variable_client = variable_client
self._network = network
# TODO(b/152382420): Ideally we would call tf.function(network) instead but
# this results in an error when using acme RNN snapshots.
self._policy = tf.function(network.__call__)
self._state = None
self._prev_state = None
self._prev_logits = None
def select_action(self, observation: types.NestedArray) -> types.NestedArray:
# Add a dummy batch dimension and as a side effect convert numpy to TF.
batched_obs = tf2_utils.add_batch_dim(observation)
if self._state is None:
self._state = self._network.initial_state(1)
# Forward.
(logits, _), new_state = self._policy(batched_obs, self._state)
self._prev_logits = logits
self._prev_state = self._state
self._state = new_state
action = tfd.Categorical(logits).sample()
action = tf2_utils.to_numpy_squeeze(action)
return action
def observe_first(self, timestep: dm_env.TimeStep):
if self._adder:
self._adder.add_first(timestep)
# Set the state to None so that we re-initialize at the next policy call.
self._state = None
def observe(
self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
):
if not self._adder:
return
extras = {'logits': self._prev_logits, 'core_state': self._prev_state}
extras = tf2_utils.to_numpy_squeeze(extras)
self._adder.add(action, next_timestep, extras)
def update(self, wait: bool = False):
if self._variable_client:
self._variable_client.update(wait)
|
acme-master
|
acme/agents/tf/impala/acting.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for the distributed agent."""
from typing import Sequence
import acme
from acme import specs
from acme.agents.tf import svg0_prior
from acme.testing import fakes
from acme.tf import networks
from acme.tf import utils as tf2_utils
import launchpad as lp
import numpy as np
import sonnet as snt
from absl.testing import absltest
def make_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (10, 10),
critic_layer_sizes: Sequence[int] = (10, 10),
):
"""Simple networks for testing.."""
# Get total number of action dimensions from action spec.
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
tf2_utils.batch_concat,
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=True,
min_scale=0.3,
init_scale=0.7,
fixed_scale=False,
use_tfd_independent=False)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
multiplexer = networks.CriticMultiplexer()
critic_network = snt.Sequential([
multiplexer,
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.NearZeroInitializedLinear(1),
])
return {
'policy': policy_network,
'critic': critic_network,
}
class DistributedAgentTest(absltest.TestCase):
"""Simple integration/smoke test for the distributed agent."""
def test_control_suite(self):
"""Tests that the agent can run on the control suite without crashing."""
agent = svg0_prior.DistributedSVG0(
environment_factory=lambda x: fakes.ContinuousEnvironment(),
network_factory=make_networks,
num_actors=2,
batch_size=32,
min_replay_size=32,
max_replay_size=1000,
)
program = agent.build()
(learner_node,) = program.groups['learner']
learner_node.disable_run()
lp.launch(program, launch_type='test_mt')
learner: acme.Learner = learner_node.create_handle().dereference()
for _ in range(5):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/svg0_prior/agent_distributed_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the SVG0 agent class."""
import copy
from typing import Callable, Dict, Optional
import acme
from acme import specs
from acme.agents.tf.svg0_prior import agent
from acme.tf import savers as tf2_savers
from acme.utils import counting
from acme.utils import loggers
from acme.utils import lp_utils
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
class DistributedSVG0:
"""Program definition for SVG0."""
def __init__(
self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[specs.BoundedArray], Dict[str, snt.Module]],
num_actors: int = 1,
num_caches: int = 0,
environment_spec: Optional[specs.EnvironmentSpec] = None,
batch_size: int = 256,
prefetch_size: int = 4,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: Optional[float] = 32.0,
sequence_length: int = 10,
sigma: float = 0.3,
discount: float = 0.99,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
prior_optimizer: Optional[snt.Optimizer] = None,
distillation_cost: Optional[float] = 1e-3,
entropy_regularizer_cost: Optional[float] = 1e-3,
target_update_period: int = 100,
max_actor_steps: Optional[int] = None,
log_every: float = 10.0,
):
if not environment_spec:
environment_spec = specs.make_environment_spec(environment_factory(False))
# TODO(mwhoffman): Make network_factory directly return the struct.
# TODO(mwhoffman): Make the factory take the entire spec.
def wrapped_network_factory(action_spec):
networks_dict = network_factory(action_spec)
networks = agent.SVG0Networks(
policy_network=networks_dict.get('policy'),
critic_network=networks_dict.get('critic'),
prior_network=networks_dict.get('prior', None),)
return networks
self._environment_factory = environment_factory
self._network_factory = wrapped_network_factory
self._environment_spec = environment_spec
self._sigma = sigma
self._num_actors = num_actors
self._num_caches = num_caches
self._max_actor_steps = max_actor_steps
self._log_every = log_every
self._sequence_length = sequence_length
self._builder = agent.SVG0Builder(
# TODO(mwhoffman): pass the config dataclass in directly.
# TODO(mwhoffman): use the limiter rather than the workaround below.
agent.SVG0Config(
discount=discount,
batch_size=batch_size,
prefetch_size=prefetch_size,
target_update_period=target_update_period,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
prior_optimizer=prior_optimizer,
min_replay_size=min_replay_size,
max_replay_size=max_replay_size,
samples_per_insert=samples_per_insert,
sequence_length=sequence_length,
sigma=sigma,
distillation_cost=distillation_cost,
entropy_regularizer_cost=entropy_regularizer_cost,
))
def replay(self):
"""The replay storage."""
return self._builder.make_replay_tables(self._environment_spec,
self._sequence_length)
def counter(self):
return tf2_savers.CheckpointingRunner(counting.Counter(),
time_delta_minutes=1,
subdirectory='counter')
def coordinator(self, counter: counting.Counter):
return lp_utils.StepsLimiter(counter, self._max_actor_steps)
def learner(
self,
replay: reverb.Client,
counter: counting.Counter,
):
"""The Learning part of the agent."""
# Create the networks to optimize (online) and target networks.
online_networks = self._network_factory(self._environment_spec.actions)
target_networks = copy.deepcopy(online_networks)
# Initialize the networks.
online_networks.init(self._environment_spec)
target_networks.init(self._environment_spec)
dataset = self._builder.make_dataset_iterator(replay)
counter = counting.Counter(counter, 'learner')
logger = loggers.make_default_logger(
'learner', time_delta=self._log_every, steps_key='learner_steps')
return self._builder.make_learner(
networks=(online_networks, target_networks),
dataset=dataset,
counter=counter,
logger=logger,
)
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
) -> acme.EnvironmentLoop:
"""The actor process."""
# Create the behavior policy.
networks = self._network_factory(self._environment_spec.actions)
networks.init(self._environment_spec)
policy_network = networks.make_policy()
# Create the agent.
actor = self._builder.make_actor(
policy_network=policy_network,
adder=self._builder.make_adder(replay),
variable_source=variable_source,
)
# Create the environment.
environment = self._environment_factory(False)
# Create logger and counter; actors will not spam bigtable.
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor',
save_data=False,
time_delta=self._log_every,
steps_key='actor_steps')
# Create the loop to connect environment and agent.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
logger: Optional[loggers.Logger] = None,
):
"""The evaluation process."""
# Create the behavior policy.
networks = self._network_factory(self._environment_spec.actions)
networks.init(self._environment_spec)
policy_network = networks.make_policy()
# Create the agent.
actor = self._builder.make_actor(
policy_network=policy_network,
variable_source=variable_source,
deterministic_policy=True,
)
# Make the environment.
environment = self._environment_factory(True)
# Create logger and counter.
counter = counting.Counter(counter, 'evaluator')
logger = logger or loggers.make_default_logger(
'evaluator',
time_delta=self._log_every,
steps_key='evaluator_steps',
)
# Create the run loop and return it.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def build(self, name='svg0'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
if self._max_actor_steps:
with program.group('coordinator'):
_ = program.add_node(lp.CourierNode(self.coordinator, counter))
with program.group('learner'):
learner = program.add_node(lp.CourierNode(self.learner, replay, counter))
with program.group('evaluator'):
program.add_node(lp.CourierNode(self.evaluator, learner, counter))
if not self._num_caches:
# Use our learner as a single variable source.
sources = [learner]
else:
with program.group('cacher'):
# Create a set of learner caches.
sources = []
for _ in range(self._num_caches):
cacher = program.add_node(
lp.CacherNode(
learner, refresh_interval_ms=2000, stale_after_ms=4000))
sources.append(cacher)
with program.group('actor'):
# Add actors which pull round-robin from our variable sources.
for actor_id in range(self._num_actors):
source = sources[actor_id % len(sources)]
program.add_node(lp.CourierNode(self.actor, replay, source, counter))
return program
|
acme-master
|
acme/agents/tf/svg0_prior/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a SVG0 agent with prior."""
from acme.agents.tf.svg0_prior.agent import SVG0
from acme.agents.tf.svg0_prior.agent_distributed import DistributedSVG0
from acme.agents.tf.svg0_prior.learning import SVG0Learner
from acme.agents.tf.svg0_prior.networks import make_default_networks
from acme.agents.tf.svg0_prior.networks import make_network_with_prior
|
acme-master
|
acme/agents/tf/svg0_prior/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared helpers for different experiment flavours."""
import functools
from typing import Mapping, Sequence, Optional
from acme import specs
from acme import types
from acme.agents.tf.svg0_prior import utils as svg0_utils
from acme.tf import networks
from acme.tf import utils as tf2_utils
import numpy as np
import sonnet as snt
def make_default_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
) -> Mapping[str, types.TensorTransformation]:
"""Creates networks used by the agent."""
# Get total number of action dimensions from action spec.
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
tf2_utils.batch_concat,
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=True,
min_scale=0.3,
init_scale=0.7,
fixed_scale=False,
use_tfd_independent=False)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
multiplexer = networks.CriticMultiplexer(
action_network=networks.ClipToSpec(action_spec))
critic_network = snt.Sequential([
multiplexer,
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.NearZeroInitializedLinear(1),
])
return {
"policy": policy_network,
"critic": critic_network,
}
def make_network_with_prior(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (200, 100),
critic_layer_sizes: Sequence[int] = (400, 300),
prior_layer_sizes: Sequence[int] = (200, 100),
policy_keys: Optional[Sequence[str]] = None,
prior_keys: Optional[Sequence[str]] = None,
) -> Mapping[str, types.TensorTransformation]:
"""Creates networks used by the agent."""
# Get total number of action dimensions from action spec.
num_dimensions = np.prod(action_spec.shape, dtype=int)
flatten_concat_policy = functools.partial(
svg0_utils.batch_concat_selection, concat_keys=policy_keys)
flatten_concat_prior = functools.partial(
svg0_utils.batch_concat_selection, concat_keys=prior_keys)
policy_network = snt.Sequential([
flatten_concat_policy,
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=True,
min_scale=0.1,
init_scale=0.7,
fixed_scale=False,
use_tfd_independent=False)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
multiplexer = networks.CriticMultiplexer(
observation_network=flatten_concat_policy,
action_network=networks.ClipToSpec(action_spec))
critic_network = snt.Sequential([
multiplexer,
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.NearZeroInitializedLinear(1),
])
prior_network = snt.Sequential([
flatten_concat_prior,
networks.LayerNormMLP(prior_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=True,
min_scale=0.1,
init_scale=0.7,
fixed_scale=False,
use_tfd_independent=False)
])
return {
"policy": policy_network,
"critic": critic_network,
"prior": prior_network,
}
|
acme-master
|
acme/agents/tf/svg0_prior/networks.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the SVG agent."""
import sys
from typing import Dict, Sequence
import acme
from acme import specs
from acme import types
from acme.agents.tf import svg0_prior
from acme.testing import fakes
from acme.tf import networks
from acme.tf import utils as tf2_utils
import numpy as np
import sonnet as snt
from absl.testing import absltest
def make_networks(
action_spec: types.NestedSpec,
policy_layer_sizes: Sequence[int] = (10, 10),
critic_layer_sizes: Sequence[int] = (10, 10),
) -> Dict[str, snt.Module]:
"""Creates networks used by the agent."""
# Get total number of action dimensions from action spec.
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
tf2_utils.batch_concat,
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=True,
min_scale=0.3,
init_scale=0.7,
fixed_scale=False,
use_tfd_independent=False)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
multiplexer = networks.CriticMultiplexer()
critic_network = snt.Sequential([
multiplexer,
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.NearZeroInitializedLinear(1),
])
return {
'policy': policy_network,
'critic': critic_network,
}
class SVG0Test(absltest.TestCase):
def test_svg0(self):
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(episode_length=10)
spec = specs.make_environment_spec(environment)
# Create the networks.
agent_networks = make_networks(spec.actions)
# Construct the agent.
agent = svg0_prior.SVG0(
environment_spec=spec,
policy_network=agent_networks['policy'],
critic_network=agent_networks['critic'],
batch_size=10,
samples_per_insert=2,
min_replay_size=10,
)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=2)
# Imports check
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/svg0_prior/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for SVG0 algorithm with priors."""
import collections
from typing import Tuple, Optional, Dict, Iterable
from acme import types
from acme.tf import utils as tf2_utils
import sonnet as snt
import tensorflow as tf
import tree
class OnlineTargetPiQ(snt.Module):
"""Core to unroll online and target policies and Q functions at once.
A core that runs online and target policies and Q functions. This can be more
efficient if the core needs to be unrolled across time and called many times.
"""
def __init__(self,
online_pi: snt.Module,
online_q: snt.Module,
target_pi: snt.Module,
target_q: snt.Module,
num_samples: int,
online_prior: Optional[snt.Module] = None,
target_prior: Optional[snt.Module] = None,
name='OnlineTargetPiQ'):
super().__init__(name)
self._online_pi = online_pi
self._target_pi = target_pi
self._online_q = online_q
self._target_q = target_q
self._online_prior = online_prior
self._target_prior = target_prior
self._num_samples = num_samples
output_list = [
'online_samples', 'target_samples', 'target_log_probs_behavior_actions',
'online_log_probs', 'online_q', 'target_q'
]
if online_prior is not None:
output_list += ['analytic_kl_divergence', 'analytic_kl_to_target']
self._output_tuple = collections.namedtuple(
'OnlineTargetPiQ', output_list)
def __call__(self, input_obs_and_action: Tuple[tf.Tensor, tf.Tensor]):
(obs, action) = input_obs_and_action
online_pi_dist = self._online_pi(obs)
target_pi_dist = self._target_pi(obs)
online_samples = online_pi_dist.sample(self._num_samples)
target_samples = target_pi_dist.sample(self._num_samples)
target_log_probs_behavior_actions = target_pi_dist.log_prob(action)
online_log_probs = online_pi_dist.log_prob(tf.stop_gradient(online_samples))
online_q_out = self._online_q(obs, action)
target_q_out = self._target_q(obs, action)
output_list = [
online_samples, target_samples, target_log_probs_behavior_actions,
online_log_probs, online_q_out, target_q_out
]
if self._online_prior is not None:
prior_dist = self._online_prior(obs)
target_prior_dist = self._target_prior(obs)
analytic_kl_divergence = online_pi_dist.kl_divergence(prior_dist)
analytic_kl_to_target = online_pi_dist.kl_divergence(target_prior_dist)
output_list += [analytic_kl_divergence, analytic_kl_to_target]
output = self._output_tuple(*output_list)
return output
def static_rnn(core: snt.Module, inputs: types.NestedTensor,
unroll_length: int):
"""Unroll core along inputs for unroll_length steps.
Note: for time-major input tensors whose leading dimension is less than
unroll_length, `None` would be provided instead.
Args:
core: an instance of snt.Module.
inputs: a `nest` of time-major input tensors.
unroll_length: number of time steps to unroll.
Returns:
step_outputs: a `nest` of time-major stacked output tensors of length
`unroll_length`.
"""
step_outputs = []
for time_dim in range(unroll_length):
inputs_t = tree.map_structure(
lambda t, i_=time_dim: t[i_] if i_ < t.shape[0] else None, inputs)
step_output = core(inputs_t)
step_outputs.append(step_output)
step_outputs = _nest_stack(step_outputs)
return step_outputs
def mask_out_restarting(tensor: tf.Tensor, start_of_episode: tf.Tensor):
"""Mask out `tensor` taken on the step that resets the environment.
Args:
tensor: a time-major 2-D `Tensor` of shape [T, B].
start_of_episode: a 2-D `Tensor` of shape [T, B] that contains the points
where the episode restarts.
Returns:
tensor of shape [T, B] with elements are masked out according to step_types,
restarting weights of shape [T, B]
"""
tensor.get_shape().assert_has_rank(2)
start_of_episode.get_shape().assert_has_rank(2)
weights = tf.cast(~start_of_episode, dtype=tf.float32)
masked_tensor = tensor * weights
return masked_tensor
def batch_concat_selection(observation_dict: Dict[str, types.NestedTensor],
concat_keys: Optional[Iterable[str]] = None,
output_dtype=tf.float32) -> tf.Tensor:
"""Concatenate a dict of observations into 2-D tensors."""
concat_keys = concat_keys or sorted(observation_dict.keys())
to_concat = []
for obs in concat_keys:
if obs not in observation_dict:
raise KeyError(
'Missing observation. Requested: {} (available: {})'.format(
obs, list(observation_dict.keys())))
to_concat.append(tf.cast(observation_dict[obs], output_dtype))
return tf2_utils.batch_concat(to_concat)
def _nest_stack(list_of_nests, axis=0):
"""Convert a list of nests to a nest of stacked lists."""
return tree.map_structure(lambda *ts: tf.stack(ts, axis=axis), *list_of_nests)
|
acme-master
|
acme/agents/tf/svg0_prior/utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SVG0 agent implementation."""
import copy
import dataclasses
from typing import Iterator, List, Optional, Tuple
from acme import adders
from acme import core
from acme import datasets
from acme import specs
from acme.adders import reverb as reverb_adders
from acme.agents import agent
from acme.agents.tf.svg0_prior import acting
from acme.agents.tf.svg0_prior import learning
from acme.tf import utils
from acme.tf import variable_utils
from acme.utils import counting
from acme.utils import loggers
import reverb
import sonnet as snt
import tensorflow as tf
@dataclasses.dataclass
class SVG0Config:
"""Configuration options for the agent."""
discount: float = 0.99
batch_size: int = 256
prefetch_size: int = 4
target_update_period: int = 100
policy_optimizer: Optional[snt.Optimizer] = None
critic_optimizer: Optional[snt.Optimizer] = None
prior_optimizer: Optional[snt.Optimizer] = None
min_replay_size: int = 1000
max_replay_size: int = 1000000
samples_per_insert: Optional[float] = 32.0
sequence_length: int = 10
sigma: float = 0.3
replay_table_name: str = reverb_adders.DEFAULT_PRIORITY_TABLE
distillation_cost: Optional[float] = 1e-3
entropy_regularizer_cost: Optional[float] = 1e-3
@dataclasses.dataclass
class SVG0Networks:
"""Structure containing the networks for SVG0."""
policy_network: snt.Module
critic_network: snt.Module
prior_network: Optional[snt.Module]
def __init__(
self,
policy_network: snt.Module,
critic_network: snt.Module,
prior_network: Optional[snt.Module] = None
):
# This method is implemented (rather than added by the dataclass decorator)
# in order to allow observation network to be passed as an arbitrary tensor
# transformation rather than as a snt Module.
# TODO(mwhoffman): use Protocol rather than Module/TensorTransformation.
self.policy_network = policy_network
self.critic_network = critic_network
self.prior_network = prior_network
def init(self, environment_spec: specs.EnvironmentSpec):
"""Initialize the networks given an environment spec."""
# Get observation and action specs.
act_spec = environment_spec.actions
obs_spec = environment_spec.observations
# Create variables for the policy and critic nets.
_ = utils.create_variables(self.policy_network, [obs_spec])
_ = utils.create_variables(self.critic_network, [obs_spec, act_spec])
if self.prior_network is not None:
_ = utils.create_variables(self.prior_network, [obs_spec])
def make_policy(
self,
) -> snt.Module:
"""Create a single network which evaluates the policy."""
return self.policy_network
def make_prior(
self,
) -> snt.Module:
"""Create a single network which evaluates the prior."""
behavior_prior = self.prior_network
return behavior_prior
class SVG0Builder:
"""Builder for SVG0 which constructs individual components of the agent."""
def __init__(self, config: SVG0Config):
self._config = config
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
sequence_length: int,
) -> List[reverb.Table]:
"""Create tables to insert data into."""
if self._config.samples_per_insert is None:
# We will take a samples_per_insert ratio of None to mean that there is
# no limit, i.e. this only implies a min size limit.
limiter = reverb.rate_limiters.MinSize(self._config.min_replay_size)
else:
error_buffer = max(1, self._config.samples_per_insert)
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._config.min_replay_size,
samples_per_insert=self._config.samples_per_insert,
error_buffer=error_buffer)
extras_spec = {
'log_prob': tf.ones(
shape=(), dtype=tf.float32)
}
replay_table = reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=limiter,
signature=reverb_adders.SequenceAdder.signature(
environment_spec,
extras_spec=extras_spec,
sequence_length=sequence_length + 1))
return [replay_table]
def make_dataset_iterator(
self,
reverb_client: reverb.Client,
) -> Iterator[reverb.ReplaySample]:
"""Create a dataset iterator to use for learning/updating the agent."""
# The dataset provides an interface to sample from replay.
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=reverb_client.server_address,
batch_size=self._config.batch_size,
prefetch_size=self._config.prefetch_size)
# TODO(b/155086959): Fix type stubs and remove.
return iter(dataset) # pytype: disable=wrong-arg-types
def make_adder(
self,
replay_client: reverb.Client,
) -> adders.Adder:
"""Create an adder which records data generated by the actor/environment."""
return reverb_adders.SequenceAdder(
client=replay_client,
sequence_length=self._config.sequence_length+1,
priority_fns={self._config.replay_table_name: lambda x: 1.},
period=self._config.sequence_length,
end_of_episode_behavior=reverb_adders.EndBehavior.CONTINUE,
)
def make_actor(
self,
policy_network: snt.Module,
adder: Optional[adders.Adder] = None,
variable_source: Optional[core.VariableSource] = None,
deterministic_policy: Optional[bool] = False,
):
"""Create an actor instance."""
if variable_source:
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = variable_utils.VariableClient(
client=variable_source,
variables={'policy': policy_network.variables},
update_period=1000,
)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
else:
variable_client = None
# Create the actor which defines how we take actions.
return acting.SVG0Actor(
policy_network=policy_network,
adder=adder,
variable_client=variable_client,
deterministic_policy=deterministic_policy
)
def make_learner(
self,
networks: Tuple[SVG0Networks, SVG0Networks],
dataset: Iterator[reverb.ReplaySample],
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = False,
):
"""Creates an instance of the learner."""
online_networks, target_networks = networks
# The learner updates the parameters (and initializes them).
return learning.SVG0Learner(
policy_network=online_networks.policy_network,
critic_network=online_networks.critic_network,
target_policy_network=target_networks.policy_network,
target_critic_network=target_networks.critic_network,
prior_network=online_networks.prior_network,
target_prior_network=target_networks.prior_network,
policy_optimizer=self._config.policy_optimizer,
critic_optimizer=self._config.critic_optimizer,
prior_optimizer=self._config.prior_optimizer,
distillation_cost=self._config.distillation_cost,
entropy_regularizer_cost=self._config.entropy_regularizer_cost,
discount=self._config.discount,
target_update_period=self._config.target_update_period,
dataset_iterator=dataset,
counter=counter,
logger=logger,
checkpoint=checkpoint,
)
class SVG0(agent.Agent):
"""SVG0 Agent with prior.
This implements a single-process SVG0 agent. This is an actor-critic algorithm
that generates data via a behavior policy, inserts N-step transitions into
a replay buffer, and periodically updates the policy (and as a result the
behavior) by sampling uniformly from this buffer.
"""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
policy_network: snt.Module,
critic_network: snt.Module,
discount: float = 0.99,
batch_size: int = 256,
prefetch_size: int = 4,
target_update_period: int = 100,
prior_network: Optional[snt.Module] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
prior_optimizer: Optional[snt.Optimizer] = None,
distillation_cost: Optional[float] = 1e-3,
entropy_regularizer_cost: Optional[float] = 1e-3,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: float = 32.0,
sequence_length: int = 10,
sigma: float = 0.3,
replay_table_name: str = reverb_adders.DEFAULT_PRIORITY_TABLE,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
"""Initialize the agent.
Args:
environment_spec: description of the actions, observations, etc.
policy_network: the online (optimized) policy.
critic_network: the online critic.
discount: discount to use for TD updates.
batch_size: batch size for updates.
prefetch_size: size to prefetch from replay.
target_update_period: number of learner steps to perform before updating
the target networks.
prior_network: an optional `behavior prior` to regularize against.
policy_optimizer: optimizer for the policy network updates.
critic_optimizer: optimizer for the critic network updates.
prior_optimizer: optimizer for the prior network updates.
distillation_cost: a multiplier to be used when adding distillation
against the prior to the losses.
entropy_regularizer_cost: a multiplier used for per state sample based
entropy added to the actor loss.
min_replay_size: minimum replay size before updating.
max_replay_size: maximum replay size.
samples_per_insert: number of samples to take from replay for every insert
that is made.
sequence_length: number of timesteps to store for each trajectory.
sigma: standard deviation of zero-mean, Gaussian exploration noise.
replay_table_name: string indicating what name to give the replay table.
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
# Create the Builder object which will internally create agent components.
builder = SVG0Builder(
# TODO(mwhoffman): pass the config dataclass in directly.
# TODO(mwhoffman): use the limiter rather than the workaround below.
# Right now this modifies min_replay_size and samples_per_insert so that
# they are not controlled by a limiter and are instead handled by the
# Agent base class (the above TODO directly references this behavior).
SVG0Config(
discount=discount,
batch_size=batch_size,
prefetch_size=prefetch_size,
target_update_period=target_update_period,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
prior_optimizer=prior_optimizer,
distillation_cost=distillation_cost,
entropy_regularizer_cost=entropy_regularizer_cost,
min_replay_size=1, # Let the Agent class handle this.
max_replay_size=max_replay_size,
samples_per_insert=None, # Let the Agent class handle this.
sequence_length=sequence_length,
sigma=sigma,
replay_table_name=replay_table_name,
))
# TODO(mwhoffman): pass the network dataclass in directly.
online_networks = SVG0Networks(policy_network=policy_network,
critic_network=critic_network,
prior_network=prior_network,)
# Target networks are just a copy of the online networks.
target_networks = copy.deepcopy(online_networks)
# Initialize the networks.
online_networks.init(environment_spec)
target_networks.init(environment_spec)
# TODO(mwhoffman): either make this Dataclass or pass only one struct.
# The network struct passed to make_learner is just a tuple for the
# time-being (for backwards compatibility).
networks = (online_networks, target_networks)
# Create the behavior policy.
policy_network = online_networks.make_policy()
# Create the replay server and grab its address.
replay_tables = builder.make_replay_tables(environment_spec,
sequence_length)
replay_server = reverb.Server(replay_tables, port=None)
replay_client = reverb.Client(f'localhost:{replay_server.port}')
# Create actor, dataset, and learner for generating, storing, and consuming
# data respectively.
adder = builder.make_adder(replay_client)
actor = builder.make_actor(policy_network, adder)
dataset = builder.make_dataset_iterator(replay_client)
learner = builder.make_learner(networks, dataset, counter, logger,
checkpoint)
super().__init__(
actor=actor,
learner=learner,
min_observations=max(batch_size, min_replay_size),
observations_per_step=float(batch_size) / samples_per_insert)
# Save the replay so we don't garbage collect it.
self._replay_server = replay_server
|
acme-master
|
acme/agents/tf/svg0_prior/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SVG learner implementation."""
import time
from typing import Dict, Iterator, List, Optional
import acme
from acme.agents.tf.svg0_prior import utils as svg0_utils
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
from trfl import continuous_retrace_ops
_MIN_LOG_VAL = 1e-20
class SVG0Learner(acme.Learner):
"""SVG0 learner with optional prior.
This is the learning component of an SVG0 agent. IE it takes a dataset as
input and implements update functionality to learn from this dataset.
"""
def __init__(
self,
policy_network: snt.Module,
critic_network: snt.Module,
target_policy_network: snt.Module,
target_critic_network: snt.Module,
discount: float,
target_update_period: int,
dataset_iterator: Iterator[reverb.ReplaySample],
prior_network: Optional[snt.Module] = None,
target_prior_network: Optional[snt.Module] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
prior_optimizer: Optional[snt.Optimizer] = None,
distillation_cost: Optional[float] = 1e-3,
entropy_regularizer_cost: Optional[float] = 1e-3,
num_action_samples: int = 10,
lambda_: float = 1.0,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
"""Initializes the learner.
Args:
policy_network: the online (optimized) policy.
critic_network: the online critic.
target_policy_network: the target policy (which lags behind the online
policy).
target_critic_network: the target critic.
discount: discount to use for TD updates.
target_update_period: number of learner steps to perform before updating
the target networks.
dataset_iterator: dataset to learn from, whether fixed or from a replay
buffer (see `acme.datasets.reverb.make_reverb_dataset` documentation).
prior_network: the online (optimized) prior.
target_prior_network: the target prior (which lags behind the online
prior).
policy_optimizer: the optimizer to be applied to the SVG-0 (policy) loss.
critic_optimizer: the optimizer to be applied to the distributional
Bellman loss.
prior_optimizer: the optimizer to be applied to the prior (distillation)
loss.
distillation_cost: a multiplier to be used when adding distillation
against the prior to the losses.
entropy_regularizer_cost: a multiplier used for per state sample based
entropy added to the actor loss.
num_action_samples: the number of action samples to use for estimating the
value function and sample based entropy.
lambda_: the `lambda` value to be used with retrace.
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
# Store online and target networks.
self._policy_network = policy_network
self._critic_network = critic_network
self._target_policy_network = target_policy_network
self._target_critic_network = target_critic_network
self._prior_network = prior_network
self._target_prior_network = target_prior_network
self._lambda = lambda_
self._num_action_samples = num_action_samples
self._distillation_cost = distillation_cost
self._entropy_regularizer_cost = entropy_regularizer_cost
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner')
# Other learner parameters.
self._discount = discount
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_update_period = target_update_period
# Batch dataset and create iterator.
self._iterator = dataset_iterator
# Create optimizers if they aren't given.
self._critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
self._policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
self._prior_optimizer = prior_optimizer or snt.optimizers.Adam(1e-4)
# Expose the variables.
self._variables = {
'critic': self._critic_network.variables,
'policy': self._policy_network.variables,
}
if self._prior_network is not None:
self._variables['prior'] = self._prior_network.variables
# Create a checkpointer and snapshotter objects.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
objects_to_save = {
'counter': self._counter,
'policy': self._policy_network,
'critic': self._critic_network,
'target_policy': self._target_policy_network,
'target_critic': self._target_critic_network,
'policy_optimizer': self._policy_optimizer,
'critic_optimizer': self._critic_optimizer,
'num_steps': self._num_steps,
}
if self._prior_network is not None:
objects_to_save['prior'] = self._prior_network
objects_to_save['target_prior'] = self._target_prior_network
objects_to_save['prior_optimizer'] = self._prior_optimizer
self._checkpointer = tf2_savers.Checkpointer(
subdirectory='svg0_learner',
objects_to_save=objects_to_save)
objects_to_snapshot = {
'policy': self._policy_network,
'critic': self._critic_network,
}
if self._prior_network is not None:
objects_to_snapshot['prior'] = self._prior_network
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save=objects_to_snapshot)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
# Update target network
online_variables = [
*self._critic_network.variables,
*self._policy_network.variables,
]
if self._prior_network is not None:
online_variables += [*self._prior_network.variables]
online_variables = tuple(online_variables)
target_variables = [
*self._target_critic_network.variables,
*self._target_policy_network.variables,
]
if self._prior_network is not None:
target_variables += [*self._target_prior_network.variables]
target_variables = tuple(target_variables)
# Make online -> target network update ops.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(online_variables, target_variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Get data from replay (dropping extras if any) and flip to `[T, B, ...]`.
sample: reverb.ReplaySample = next(self._iterator)
data = tf2_utils.batch_to_sequence(sample.data)
observations, actions, rewards, discounts, extra = (data.observation,
data.action,
data.reward,
data.discount,
data.extras)
online_target_pi_q = svg0_utils.OnlineTargetPiQ(
online_pi=self._policy_network,
online_q=self._critic_network,
target_pi=self._target_policy_network,
target_q=self._target_critic_network,
num_samples=self._num_action_samples,
online_prior=self._prior_network,
target_prior=self._target_prior_network,
)
with tf.GradientTape(persistent=True) as tape:
step_outputs = svg0_utils.static_rnn(
core=online_target_pi_q,
inputs=(observations, actions),
unroll_length=rewards.shape[0])
# Flip target samples to have shape [S, T+1, B, ...] where 'S' is the
# number of action samples taken.
target_pi_samples = tf2_utils.batch_to_sequence(
step_outputs.target_samples)
# Tile observations to have shape [S, T+1, B,..].
tiled_observations = tf2_utils.tile_nested(observations,
self._num_action_samples)
# Finally compute target Q values on the new action samples.
# Shape: [S, T+1, B, 1]
target_q_target_pi_samples = snt.BatchApply(self._target_critic_network,
3)(tiled_observations,
target_pi_samples)
# Compute the value estimate by averaging over the action dimension.
# Shape: [T+1, B, 1].
target_v_target_pi = tf.reduce_mean(target_q_target_pi_samples, axis=0)
# Split the target V's into the target for learning
# `value_function_target` and the bootstrap value. Shape: [T, B].
value_function_target = tf.squeeze(target_v_target_pi[:-1], axis=-1)
# Shape: [B].
bootstrap_value = tf.squeeze(target_v_target_pi[-1], axis=-1)
# When learning with a prior, add entropy terms to value targets.
if self._prior_network is not None:
value_function_target -= self._distillation_cost * tf.stop_gradient(
step_outputs.analytic_kl_to_target[:-1]
)
bootstrap_value -= self._distillation_cost * tf.stop_gradient(
step_outputs.analytic_kl_to_target[-1])
# Get target log probs and behavior log probs from rollout.
# Shape: [T+1, B].
target_log_probs_behavior_actions = (
step_outputs.target_log_probs_behavior_actions)
behavior_log_probs = extra['log_prob']
# Calculate importance weights. Shape: [T+1, B].
rhos = tf.exp(target_log_probs_behavior_actions - behavior_log_probs)
# Filter the importance weights to mask out episode restarts. Ignore the
# last action and consider the step type of the next step for masking.
# Shape: [T, B].
episode_start_mask = tf2_utils.batch_to_sequence(
sample.data.start_of_episode)[1:]
rhos = svg0_utils.mask_out_restarting(rhos[:-1], episode_start_mask)
# rhos = rhos[:-1]
# Compute the log importance weights with a small value added for
# stability.
# Shape: [T, B]
log_rhos = tf.math.log(rhos + _MIN_LOG_VAL)
# Retrieve the target and online Q values and throw away the last action.
# Shape: [T, B].
target_q_values = tf.squeeze(step_outputs.target_q[:-1], -1)
online_q_values = tf.squeeze(step_outputs.online_q[:-1], -1)
# Flip target samples to have shape [S, T+1, B, ...] where 'S' is the
# number of action samples taken.
online_pi_samples = tf2_utils.batch_to_sequence(
step_outputs.online_samples)
target_q_online_pi_samples = snt.BatchApply(self._target_critic_network,
3)(tiled_observations,
online_pi_samples)
expected_q = tf.reduce_mean(
tf.squeeze(target_q_online_pi_samples, -1), axis=0)
# Flip online_log_probs to be of shape [S, T+1, B] and then compute
# entropy by averaging over num samples. Final shape: [T+1, B].
online_log_probs = tf2_utils.batch_to_sequence(
step_outputs.online_log_probs)
sample_based_entropy = tf.reduce_mean(-online_log_probs, axis=0)
retrace_outputs = continuous_retrace_ops.retrace_from_importance_weights(
log_rhos=log_rhos,
discounts=self._discount * discounts[:-1],
rewards=rewards[:-1],
q_values=target_q_values,
values=value_function_target,
bootstrap_value=bootstrap_value,
lambda_=self._lambda,
)
# Critic loss. Shape: [T, B].
critic_loss = 0.5 * tf.math.squared_difference(
tf.stop_gradient(retrace_outputs.qs), online_q_values)
# Policy loss- SVG0 with sample based entropy. Shape: [T, B]
policy_loss = -(
expected_q + self._entropy_regularizer_cost * sample_based_entropy)
policy_loss = policy_loss[:-1]
if self._prior_network is not None:
# When training the prior, also add the per-timestep KL cost.
policy_loss += (
self._distillation_cost * step_outputs.analytic_kl_to_target[:-1])
# Ensure episode restarts are masked out when computing the losses.
critic_loss = svg0_utils.mask_out_restarting(critic_loss,
episode_start_mask)
critic_loss = tf.reduce_mean(critic_loss)
policy_loss = svg0_utils.mask_out_restarting(policy_loss,
episode_start_mask)
policy_loss = tf.reduce_mean(policy_loss)
if self._prior_network is not None:
prior_loss = step_outputs.analytic_kl_divergence[:-1]
prior_loss = svg0_utils.mask_out_restarting(prior_loss,
episode_start_mask)
prior_loss = tf.reduce_mean(prior_loss)
# Get trainable variables.
policy_variables = self._policy_network.trainable_variables
critic_variables = self._critic_network.trainable_variables
# Compute gradients.
policy_gradients = tape.gradient(policy_loss, policy_variables)
critic_gradients = tape.gradient(critic_loss, critic_variables)
if self._prior_network is not None:
prior_variables = self._prior_network.trainable_variables
prior_gradients = tape.gradient(prior_loss, prior_variables)
# Delete the tape manually because of the persistent=True flag.
del tape
# Apply gradients.
self._policy_optimizer.apply(policy_gradients, policy_variables)
self._critic_optimizer.apply(critic_gradients, critic_variables)
losses = {
'critic_loss': critic_loss,
'policy_loss': policy_loss,
}
if self._prior_network is not None:
self._prior_optimizer.apply(prior_gradients, prior_variables)
losses['prior_loss'] = prior_loss
# Losses to track.
return losses
def step(self):
# Run the learning step.
fetches = self._step()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
fetches.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(fetches)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables[name]) for name in names]
|
acme-master
|
acme/agents/tf/svg0_prior/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SVG0 actor implementation."""
from typing import Optional
from acme import adders
from acme import types
from acme.agents.tf import actors
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
import dm_env
import sonnet as snt
class SVG0Actor(actors.FeedForwardActor):
"""An actor that also returns `log_prob`."""
def __init__(
self,
policy_network: snt.Module,
adder: Optional[adders.Adder] = None,
variable_client: Optional[tf2_variable_utils.VariableClient] = None,
deterministic_policy: Optional[bool] = False,
):
super().__init__(policy_network, adder, variable_client)
self._log_prob = None
self._deterministic_policy = deterministic_policy
def select_action(self, observation: types.NestedArray) -> types.NestedArray:
# Add a dummy batch dimension and as a side effect convert numpy to TF.
batched_observation = tf2_utils.add_batch_dim(observation)
# Compute the policy, conditioned on the observation.
policy = self._policy_network(batched_observation)
if self._deterministic_policy:
action = policy.mean()
else:
action = policy.sample()
self._log_prob = policy.log_prob(action)
return tf2_utils.to_numpy_squeeze(action)
def observe(
self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
):
if not self._adder:
return
extras = {'log_prob': self._log_prob}
extras = tf2_utils.to_numpy_squeeze(extras)
self._adder.add(action, next_timestep, extras)
|
acme-master
|
acme/agents/tf/svg0_prior/acting.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for the distributed agent."""
import acme
from acme.agents.tf import dqn
from acme.testing import fakes
from acme.tf import networks
import launchpad as lp
from absl.testing import absltest
class DistributedAgentTest(absltest.TestCase):
"""Simple integration/smoke test for the distributed agent."""
def test_atari(self):
"""Tests that the agent can run for some steps without crashing."""
env_factory = lambda x: fakes.fake_atari_wrapped()
net_factory = lambda spec: networks.DQNAtariNetwork(spec.num_values)
agent = dqn.DistributedDQN(
environment_factory=env_factory,
network_factory=net_factory,
num_actors=2,
batch_size=32,
min_replay_size=32,
max_replay_size=1000,
)
program = agent.build()
(learner_node,) = program.groups['learner']
learner_node.disable_run()
lp.launch(program, launch_type='test_mt')
learner: acme.Learner = learner_node.create_handle().dereference()
for _ in range(5):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/dqn/agent_distributed_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the DQN agent class."""
import copy
from typing import Callable, Optional
import acme
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents.tf import actors
from acme.agents.tf.dqn import learning
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
from acme.utils import counting
from acme.utils import loggers
from acme.utils import lp_utils
import dm_env
import launchpad as lp
import numpy as np
import reverb
import sonnet as snt
import trfl
class DistributedDQN:
"""Distributed DQN agent."""
def __init__(
self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[specs.DiscreteArray], snt.Module],
num_actors: int,
num_caches: int = 1,
batch_size: int = 256,
prefetch_size: int = 4,
target_update_period: int = 100,
samples_per_insert: float = 32.0,
min_replay_size: int = 1000,
max_replay_size: int = 1_000_000,
importance_sampling_exponent: float = 0.2,
priority_exponent: float = 0.6,
n_step: int = 5,
learning_rate: float = 1e-3,
evaluator_epsilon: float = 0.,
max_actor_steps: Optional[int] = None,
discount: float = 0.99,
environment_spec: Optional[specs.EnvironmentSpec] = None,
variable_update_period: int = 1000,
):
assert num_caches >= 1
if environment_spec is None:
environment_spec = specs.make_environment_spec(environment_factory(False))
self._environment_factory = environment_factory
self._network_factory = network_factory
self._num_actors = num_actors
self._num_caches = num_caches
self._env_spec = environment_spec
self._batch_size = batch_size
self._prefetch_size = prefetch_size
self._target_update_period = target_update_period
self._samples_per_insert = samples_per_insert
self._min_replay_size = min_replay_size
self._max_replay_size = max_replay_size
self._importance_sampling_exponent = importance_sampling_exponent
self._priority_exponent = priority_exponent
self._n_step = n_step
self._learning_rate = learning_rate
self._evaluator_epsilon = evaluator_epsilon
self._max_actor_steps = max_actor_steps
self._discount = discount
self._variable_update_period = variable_update_period
def replay(self):
"""The replay storage."""
if self._samples_per_insert:
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._min_replay_size,
samples_per_insert=self._samples_per_insert,
error_buffer=self._batch_size)
else:
limiter = reverb.rate_limiters.MinSize(self._min_replay_size)
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Prioritized(self._priority_exponent),
remover=reverb.selectors.Fifo(),
max_size=self._max_replay_size,
rate_limiter=limiter,
signature=adders.NStepTransitionAdder.signature(self._env_spec))
return [replay_table]
def counter(self):
"""Creates the master counter process."""
return tf2_savers.CheckpointingRunner(
counting.Counter(), time_delta_minutes=1, subdirectory='counter')
def coordinator(self, counter: counting.Counter, max_actor_steps: int):
return lp_utils.StepsLimiter(counter, max_actor_steps)
def learner(self, replay: reverb.Client, counter: counting.Counter):
"""The Learning part of the agent."""
# Create the networks.
network = self._network_factory(self._env_spec.actions)
target_network = copy.deepcopy(network)
tf2_utils.create_variables(network, [self._env_spec.observations])
tf2_utils.create_variables(target_network, [self._env_spec.observations])
# The dataset object to learn from.
replay_client = reverb.Client(replay.server_address)
dataset = datasets.make_reverb_dataset(
server_address=replay.server_address,
batch_size=self._batch_size,
prefetch_size=self._prefetch_size)
logger = loggers.make_default_logger('learner', steps_key='learner_steps')
# Return the learning agent.
counter = counting.Counter(counter, 'learner')
learner = learning.DQNLearner(
network=network,
target_network=target_network,
discount=self._discount,
importance_sampling_exponent=self._importance_sampling_exponent,
learning_rate=self._learning_rate,
target_update_period=self._target_update_period,
dataset=dataset,
replay_client=replay_client,
counter=counter,
logger=logger)
return tf2_savers.CheckpointingRunner(
learner, subdirectory='dqn_learner', time_delta_minutes=60)
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
epsilon: float,
) -> acme.EnvironmentLoop:
"""The actor process."""
environment = self._environment_factory(False)
network = self._network_factory(self._env_spec.actions)
# Just inline the policy network here.
policy_network = snt.Sequential([
network,
lambda q: trfl.epsilon_greedy(q, epsilon=epsilon).sample(),
])
tf2_utils.create_variables(policy_network, [self._env_spec.observations])
variable_client = tf2_variable_utils.VariableClient(
client=variable_source,
variables={'policy': policy_network.trainable_variables},
update_period=self._variable_update_period)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Component to add things into replay.
adder = adders.NStepTransitionAdder(
client=replay,
n_step=self._n_step,
discount=self._discount,
)
# Create the agent.
actor = actors.FeedForwardActor(policy_network, adder, variable_client)
# Create the loop to connect environment and agent.
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor', save_data=False, steps_key='actor_steps')
return acme.EnvironmentLoop(environment, actor, counter, logger)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
):
"""The evaluation process."""
environment = self._environment_factory(True)
network = self._network_factory(self._env_spec.actions)
# Just inline the policy network here.
policy_network = snt.Sequential([
network,
lambda q: trfl.epsilon_greedy(q, self._evaluator_epsilon).sample(),
])
tf2_utils.create_variables(policy_network, [self._env_spec.observations])
variable_client = tf2_variable_utils.VariableClient(
client=variable_source,
variables={'policy': policy_network.trainable_variables},
update_period=self._variable_update_period)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Create the agent.
actor = actors.FeedForwardActor(
policy_network, variable_client=variable_client)
# Create the run loop and return it.
logger = loggers.make_default_logger(
'evaluator', steps_key='evaluator_steps')
counter = counting.Counter(counter, 'evaluator')
return acme.EnvironmentLoop(
environment, actor, counter=counter, logger=logger)
def build(self, name='dqn'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
if self._max_actor_steps:
program.add_node(
lp.CourierNode(self.coordinator, counter, self._max_actor_steps))
with program.group('learner'):
learner = program.add_node(lp.CourierNode(self.learner, replay, counter))
with program.group('evaluator'):
program.add_node(lp.CourierNode(self.evaluator, learner, counter))
# Generate an epsilon for each actor.
epsilons = np.flip(np.logspace(1, 8, self._num_actors, base=0.4), axis=0)
with program.group('cacher'):
# Create a set of learner caches.
sources = []
for _ in range(self._num_caches):
cacher = program.add_node(
lp.CacherNode(
learner, refresh_interval_ms=2000, stale_after_ms=4000))
sources.append(cacher)
with program.group('actor'):
# Add actors which pull round-robin from our variable sources.
for actor_id, epsilon in enumerate(epsilons):
source = sources[actor_id % len(sources)]
program.add_node(
lp.CourierNode(self.actor, replay, source, counter, epsilon))
return program
|
acme-master
|
acme/agents/tf/dqn/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a deep Q-networks (DQN) agent."""
from acme.agents.tf.dqn.agent import DQN
from acme.agents.tf.dqn.agent_distributed import DistributedDQN
from acme.agents.tf.dqn.learning import DQNLearner
|
acme-master
|
acme/agents/tf/dqn/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DQN agent."""
import acme
from acme import specs
from acme.agents.tf import dqn
from acme.testing import fakes
import numpy as np
import sonnet as snt
from absl.testing import absltest
def _make_network(action_spec: specs.DiscreteArray) -> snt.Module:
return snt.Sequential([
snt.Flatten(),
snt.nets.MLP([50, 50, action_spec.num_values]),
])
class DQNTest(absltest.TestCase):
def test_dqn(self):
# Create a fake environment to test with.
environment = fakes.DiscreteEnvironment(
num_actions=5,
num_observations=10,
obs_dtype=np.float32,
episode_length=10)
spec = specs.make_environment_spec(environment)
# Construct the agent.
agent = dqn.DQN(
environment_spec=spec,
network=_make_network(spec.actions),
batch_size=10,
samples_per_insert=2,
min_replay_size=10)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=2)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/dqn/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agent implementation."""
import copy
from typing import Optional
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents import agent
from acme.agents.tf import actors
from acme.agents.tf.dqn import learning
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import loggers
import reverb
import sonnet as snt
import tensorflow as tf
import trfl
class DQN(agent.Agent):
"""DQN agent.
This implements a single-process DQN agent. This is a simple Q-learning
algorithm that inserts N-step transitions into a replay buffer, and
periodically updates its policy by sampling these transitions using
prioritization.
"""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
network: snt.Module,
batch_size: int = 256,
prefetch_size: int = 4,
target_update_period: int = 100,
samples_per_insert: float = 32.0,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
importance_sampling_exponent: float = 0.2,
priority_exponent: float = 0.6,
n_step: int = 5,
epsilon: Optional[tf.Variable] = None,
learning_rate: float = 1e-3,
discount: float = 0.99,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
checkpoint_subpath: str = '~/acme',
policy_network: Optional[snt.Module] = None,
max_gradient_norm: Optional[float] = None,
):
"""Initialize the agent.
Args:
environment_spec: description of the actions, observations, etc.
network: the online Q network (the one being optimized)
batch_size: batch size for updates.
prefetch_size: size to prefetch from replay.
target_update_period: number of learner steps to perform before updating
the target networks.
samples_per_insert: number of samples to take from replay for every insert
that is made.
min_replay_size: minimum replay size before updating. This and all
following arguments are related to dataset construction and will be
ignored if a dataset argument is passed.
max_replay_size: maximum replay size.
importance_sampling_exponent: power to which importance weights are raised
before normalizing.
priority_exponent: exponent used in prioritized sampling.
n_step: number of steps to squash into a single transition.
epsilon: probability of taking a random action; ignored if a policy
network is given.
learning_rate: learning rate for the q-network update.
discount: discount to use for TD updates.
logger: logger object to be used by learner.
checkpoint: boolean indicating whether to checkpoint the learner.
checkpoint_subpath: string indicating where the agent should save
checkpoints and snapshots.
policy_network: if given, this will be used as the policy network.
Otherwise, an epsilon greedy policy using the online Q network will be
created. Policy network is used in the actor to sample actions.
max_gradient_norm: used for gradient clipping.
"""
# Create a replay server to add data to. This uses no limiter behavior in
# order to allow the Agent interface to handle it.
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Prioritized(priority_exponent),
remover=reverb.selectors.Fifo(),
max_size=max_replay_size,
rate_limiter=reverb.rate_limiters.MinSize(1),
signature=adders.NStepTransitionAdder.signature(environment_spec))
self._server = reverb.Server([replay_table], port=None)
# The adder is used to insert observations into replay.
address = f'localhost:{self._server.port}'
adder = adders.NStepTransitionAdder(
client=reverb.Client(address),
n_step=n_step,
discount=discount)
# The dataset provides an interface to sample from replay.
replay_client = reverb.Client(address)
dataset = datasets.make_reverb_dataset(
server_address=address,
batch_size=batch_size,
prefetch_size=prefetch_size)
# Create epsilon greedy policy network by default.
if policy_network is None:
# Use constant 0.05 epsilon greedy policy by default.
if epsilon is None:
epsilon = tf.Variable(0.05, trainable=False)
policy_network = snt.Sequential([
network,
lambda q: trfl.epsilon_greedy(q, epsilon=epsilon).sample(),
])
# Create a target network.
target_network = copy.deepcopy(network)
# Ensure that we create the variables before proceeding (maybe not needed).
tf2_utils.create_variables(network, [environment_spec.observations])
tf2_utils.create_variables(target_network, [environment_spec.observations])
# Create the actor which defines how we take actions.
actor = actors.FeedForwardActor(policy_network, adder)
# The learner updates the parameters (and initializes them).
learner = learning.DQNLearner(
network=network,
target_network=target_network,
discount=discount,
importance_sampling_exponent=importance_sampling_exponent,
learning_rate=learning_rate,
target_update_period=target_update_period,
dataset=dataset,
replay_client=replay_client,
max_gradient_norm=max_gradient_norm,
logger=logger,
checkpoint=checkpoint,
save_directory=checkpoint_subpath)
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
directory=checkpoint_subpath,
objects_to_save=learner.state,
subdirectory='dqn_learner',
time_delta_minutes=60.)
else:
self._checkpointer = None
super().__init__(
actor=actor,
learner=learner,
min_observations=max(batch_size, min_replay_size),
observations_per_step=float(batch_size) / samples_per_insert)
def update(self):
super().update()
if self._checkpointer is not None:
self._checkpointer.save()
|
acme-master
|
acme/agents/tf/dqn/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN learner implementation."""
import time
from typing import Dict, List, Optional, Union
import acme
from acme import types
from acme.adders import reverb as adders
from acme.tf import losses
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import trfl
class DQNLearner(acme.Learner, tf2_savers.TFSaveable):
"""DQN learner.
This is the learning component of a DQN agent. It takes a dataset as input
and implements update functionality to learn from this dataset. Optionally
it takes a replay client as well to allow for updating of priorities.
"""
def __init__(
self,
network: snt.Module,
target_network: snt.Module,
discount: float,
importance_sampling_exponent: float,
learning_rate: float,
target_update_period: int,
dataset: tf.data.Dataset,
max_abs_reward: Optional[float] = 1.,
huber_loss_parameter: float = 1.,
replay_client: Optional[Union[reverb.Client, reverb.TFClient]] = None,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
save_directory: str = '~/acme',
max_gradient_norm: Optional[float] = None,
):
"""Initializes the learner.
Args:
network: the online Q network (the one being optimized)
target_network: the target Q critic (which lags behind the online net).
discount: discount to use for TD updates.
importance_sampling_exponent: power to which importance weights are raised
before normalizing.
learning_rate: learning rate for the q-network update.
target_update_period: number of learner steps to perform before updating
the target networks.
dataset: dataset to learn from, whether fixed or from a replay buffer (see
`acme.datasets.reverb.make_reverb_dataset` documentation).
max_abs_reward: Optional maximum absolute value for the reward.
huber_loss_parameter: Quadratic-linear boundary for Huber loss.
replay_client: client to replay to allow for updating priorities.
counter: Counter object for (potentially distributed) counting.
logger: Logger object for writing logs to.
checkpoint: boolean indicating whether to checkpoint the learner.
save_directory: string indicating where the learner should save
checkpoints and snapshots.
max_gradient_norm: used for gradient clipping.
"""
# TODO(mwhoffman): stop allowing replay_client to be passed as a TFClient.
# This is just here for backwards compatability for agents which reuse this
# Learner and still pass a TFClient instance.
if isinstance(replay_client, reverb.TFClient):
# TODO(b/170419518): open source pytype does not understand this
# isinstance() check because it does not have a way of getting precise
# type information for pip-installed packages.
replay_client = reverb.Client(replay_client._server_address) # pytype: disable=attribute-error
# Internalise agent components (replay buffer, networks, optimizer).
# TODO(b/155086959): Fix type stubs and remove.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
self._network = network
self._target_network = target_network
self._optimizer = snt.optimizers.Adam(learning_rate)
self._replay_client = replay_client
# Make sure to initialize the optimizer so that its variables (e.g. the Adam
# moments) are included in the state returned by the learner (which can then
# be checkpointed and restored).
self._optimizer._initialize(network.trainable_variables) # pylint: disable= protected-access
# Internalise the hyperparameters.
self._discount = discount
self._target_update_period = target_update_period
self._importance_sampling_exponent = importance_sampling_exponent
self._max_abs_reward = max_abs_reward
self._huber_loss_parameter = huber_loss_parameter
if max_gradient_norm is None:
max_gradient_norm = 1e10 # A very large number. Infinity results in NaNs.
self._max_gradient_norm = tf.convert_to_tensor(max_gradient_norm)
# Learner state.
self._variables: List[List[tf.Tensor]] = [network.trainable_variables]
self._num_steps = tf.Variable(0, dtype=tf.int32)
# Internalise logging/counting objects.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
# Create a snapshotter object.
if checkpoint:
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'network': network},
directory=save_directory,
time_delta_minutes=60.)
else:
self._snapshotter = None
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
"""Do a step of SGD and update the priorities."""
# Pull out the data needed for updates/priorities.
inputs = next(self._iterator)
transitions: types.Transition = inputs.data
keys, probs = inputs.info[:2]
with tf.GradientTape() as tape:
# Evaluate our networks.
q_tm1 = self._network(transitions.observation)
q_t_value = self._target_network(transitions.next_observation)
q_t_selector = self._network(transitions.next_observation)
# The rewards and discounts have to have the same type as network values.
r_t = tf.cast(transitions.reward, q_tm1.dtype)
if self._max_abs_reward:
r_t = tf.clip_by_value(r_t, -self._max_abs_reward, self._max_abs_reward)
d_t = tf.cast(transitions.discount, q_tm1.dtype) * tf.cast(
self._discount, q_tm1.dtype)
# Compute the loss.
_, extra = trfl.double_qlearning(q_tm1, transitions.action, r_t, d_t,
q_t_value, q_t_selector)
loss = losses.huber(extra.td_error, self._huber_loss_parameter)
# Get the importance weights.
importance_weights = 1. / probs # [B]
importance_weights **= self._importance_sampling_exponent
importance_weights /= tf.reduce_max(importance_weights)
# Reweight.
loss *= tf.cast(importance_weights, loss.dtype) # [B]
loss = tf.reduce_mean(loss, axis=[0]) # []
# Do a step of SGD.
gradients = tape.gradient(loss, self._network.trainable_variables)
gradients, _ = tf.clip_by_global_norm(gradients, self._max_gradient_norm)
self._optimizer.apply(gradients, self._network.trainable_variables)
# Get the priorities that we'll use to update.
priorities = tf.abs(extra.td_error)
# Periodically update the target network.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(self._network.variables,
self._target_network.variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Report loss & statistics for logging.
fetches = {
'loss': loss,
'keys': keys,
'priorities': priorities,
}
return fetches
def step(self):
# Do a batch of SGD.
result = self._step()
# Get the keys and priorities.
keys = result.pop('keys')
priorities = result.pop('priorities')
# Update the priorities in the replay buffer.
if self._replay_client:
self._replay_client.mutate_priorities(
table=adders.DEFAULT_PRIORITY_TABLE,
updates=dict(zip(keys.numpy(), priorities.numpy())))
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
result.update(counts)
# Snapshot and attempt to write logs.
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(result)
def get_variables(self, names: List[str]) -> List[np.ndarray]:
return tf2_utils.to_numpy(self._variables)
@property
def state(self):
"""Returns the stateful parts of the learner for checkpointing."""
return {
'network': self._network,
'target_network': self._target_network,
'optimizer': self._optimizer,
'num_steps': self._num_steps
}
|
acme-master
|
acme/agents/tf/dqn/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for the distributed agent."""
import acme
from acme.agents.tf import r2d2
from acme.testing import fakes
from acme.tf import networks
import launchpad as lp
from absl.testing import absltest
class DistributedAgentTest(absltest.TestCase):
"""Simple integration/smoke test for the distributed agent."""
def test_agent(self):
env_factory = lambda x: fakes.fake_atari_wrapped(oar_wrapper=True)
net_factory = lambda spec: networks.R2D2AtariNetwork(spec.num_values)
agent = r2d2.DistributedR2D2(
environment_factory=env_factory,
network_factory=net_factory,
num_actors=2,
batch_size=32,
min_replay_size=32,
max_replay_size=1000,
replay_period=1,
burn_in_length=1,
trace_length=10,
)
program = agent.build()
(learner_node,) = program.groups['learner']
learner_node.disable_run()
lp.launch(program, launch_type='test_mt')
learner: acme.Learner = learner_node.create_handle().dereference()
for _ in range(5):
learner.step()
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/r2d2/agent_distributed_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the Recurrent DQN Launchpad program."""
import copy
from typing import Callable, List, Optional
import acme
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents.tf import actors
from acme.agents.tf.r2d2 import learning
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
from acme.utils import counting
from acme.utils import loggers
import dm_env
import launchpad as lp
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import trfl
class DistributedR2D2:
"""Program definition for Recurrent Replay Distributed DQN (R2D2)."""
def __init__(self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[specs.DiscreteArray], snt.RNNCore],
num_actors: int,
burn_in_length: int,
trace_length: int,
replay_period: int,
environment_spec: Optional[specs.EnvironmentSpec] = None,
batch_size: int = 256,
prefetch_size: int = tf.data.experimental.AUTOTUNE,
min_replay_size: int = 1000,
max_replay_size: int = 100_000,
samples_per_insert: float = 32.0,
discount: float = 0.99,
priority_exponent: float = 0.6,
importance_sampling_exponent: float = 0.2,
variable_update_period: int = 1000,
learning_rate: float = 1e-3,
evaluator_epsilon: float = 0.,
target_update_period: int = 100,
save_logs: bool = False):
if environment_spec is None:
environment_spec = specs.make_environment_spec(environment_factory(False))
self._environment_factory = environment_factory
self._network_factory = network_factory
self._environment_spec = environment_spec
self._num_actors = num_actors
self._batch_size = batch_size
self._prefetch_size = prefetch_size
self._min_replay_size = min_replay_size
self._max_replay_size = max_replay_size
self._samples_per_insert = samples_per_insert
self._burn_in_length = burn_in_length
self._trace_length = trace_length
self._replay_period = replay_period
self._discount = discount
self._target_update_period = target_update_period
self._variable_update_period = variable_update_period
self._save_logs = save_logs
self._priority_exponent = priority_exponent
self._learning_rate = learning_rate
self._evaluator_epsilon = evaluator_epsilon
self._importance_sampling_exponent = importance_sampling_exponent
self._obs_spec = environment_spec.observations
def replay(self) -> List[reverb.Table]:
"""The replay storage."""
network = self._network_factory(self._environment_spec.actions)
extra_spec = {
'core_state': network.initial_state(1),
}
# Remove batch dimensions.
extra_spec = tf2_utils.squeeze_batch_dim(extra_spec)
if self._samples_per_insert:
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._min_replay_size,
samples_per_insert=self._samples_per_insert,
error_buffer=self._batch_size)
else:
limiter = reverb.rate_limiters.MinSize(self._min_replay_size)
table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Prioritized(self._priority_exponent),
remover=reverb.selectors.Fifo(),
max_size=self._max_replay_size,
rate_limiter=limiter,
signature=adders.SequenceAdder.signature(
self._environment_spec,
extra_spec,
sequence_length=self._burn_in_length + self._trace_length + 1))
return [table]
def counter(self):
"""Creates the master counter process."""
return tf2_savers.CheckpointingRunner(
counting.Counter(), time_delta_minutes=1, subdirectory='counter')
def learner(self, replay: reverb.Client, counter: counting.Counter):
"""The Learning part of the agent."""
# Use architect and create the environment.
# Create the networks.
network = self._network_factory(self._environment_spec.actions)
target_network = copy.deepcopy(network)
tf2_utils.create_variables(network, [self._obs_spec])
tf2_utils.create_variables(target_network, [self._obs_spec])
# The dataset object to learn from.
reverb_client = reverb.TFClient(replay.server_address)
sequence_length = self._burn_in_length + self._trace_length + 1
dataset = datasets.make_reverb_dataset(
server_address=replay.server_address,
batch_size=self._batch_size,
prefetch_size=self._prefetch_size)
counter = counting.Counter(counter, 'learner')
logger = loggers.make_default_logger(
'learner', save_data=True, steps_key='learner_steps')
# Return the learning agent.
learner = learning.R2D2Learner(
environment_spec=self._environment_spec,
network=network,
target_network=target_network,
burn_in_length=self._burn_in_length,
sequence_length=sequence_length,
dataset=dataset,
reverb_client=reverb_client,
counter=counter,
logger=logger,
discount=self._discount,
target_update_period=self._target_update_period,
importance_sampling_exponent=self._importance_sampling_exponent,
learning_rate=self._learning_rate,
max_replay_size=self._max_replay_size)
return tf2_savers.CheckpointingRunner(
wrapped=learner, time_delta_minutes=60, subdirectory='r2d2_learner')
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
epsilon: float,
) -> acme.EnvironmentLoop:
"""The actor process."""
environment = self._environment_factory(False)
network = self._network_factory(self._environment_spec.actions)
tf2_utils.create_variables(network, [self._obs_spec])
policy_network = snt.DeepRNN([
network,
lambda qs: tf.cast(trfl.epsilon_greedy(qs, epsilon).sample(), tf.int32),
])
# Component to add things into replay.
sequence_length = self._burn_in_length + self._trace_length + 1
adder = adders.SequenceAdder(
client=replay,
period=self._replay_period,
sequence_length=sequence_length,
delta_encoded=True,
)
variable_client = tf2_variable_utils.VariableClient(
client=variable_source,
variables={'policy': policy_network.variables},
update_period=self._variable_update_period)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Create the agent.
actor = actors.RecurrentActor(
policy_network=policy_network,
variable_client=variable_client,
adder=adder)
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor', save_data=False, steps_key='actor_steps')
# Create the loop to connect environment and agent.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
):
"""The evaluation process."""
environment = self._environment_factory(True)
network = self._network_factory(self._environment_spec.actions)
tf2_utils.create_variables(network, [self._obs_spec])
policy_network = snt.DeepRNN([
network,
lambda qs: tf.cast(tf.argmax(qs, axis=-1), tf.int32),
])
variable_client = tf2_variable_utils.VariableClient(
client=variable_source,
variables={'policy': policy_network.variables},
update_period=self._variable_update_period)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
# Create the agent.
actor = actors.RecurrentActor(
policy_network=policy_network, variable_client=variable_client)
# Create the run loop and return it.
logger = loggers.make_default_logger(
'evaluator', save_data=True, steps_key='evaluator_steps')
counter = counting.Counter(counter, 'evaluator')
return acme.EnvironmentLoop(environment, actor, counter, logger)
def build(self, name='r2d2'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
with program.group('learner'):
learner = program.add_node(lp.CourierNode(self.learner, replay, counter))
with program.group('cacher'):
cacher = program.add_node(
lp.CacherNode(learner, refresh_interval_ms=2000, stale_after_ms=4000))
with program.group('evaluator'):
program.add_node(lp.CourierNode(self.evaluator, cacher, counter))
# Generate an epsilon for each actor.
epsilons = np.flip(np.logspace(1, 8, self._num_actors, base=0.4), axis=0)
with program.group('actor'):
for epsilon in epsilons:
program.add_node(
lp.CourierNode(self.actor, replay, cacher, counter, epsilon))
return program
|
acme-master
|
acme/agents/tf/r2d2/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for Recurrent DQN (R2D2)."""
from acme.agents.tf.r2d2.agent import R2D2
from acme.agents.tf.r2d2.agent_distributed import DistributedR2D2
|
acme-master
|
acme/agents/tf/r2d2/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RDQN agent."""
import acme
from acme import specs
from acme.agents.tf import r2d2
from acme.testing import fakes
from acme.tf import networks
import numpy as np
import sonnet as snt
from absl.testing import absltest
from absl.testing import parameterized
class SimpleNetwork(networks.RNNCore):
def __init__(self, action_spec: specs.DiscreteArray):
super().__init__(name='r2d2_test_network')
self._net = snt.DeepRNN([
snt.Flatten(),
snt.LSTM(20),
snt.nets.MLP([50, 50, action_spec.num_values])
])
def __call__(self, inputs, state):
return self._net(inputs, state)
def initial_state(self, batch_size: int, **kwargs):
return self._net.initial_state(batch_size)
def unroll(self, inputs, state, sequence_length):
return snt.static_unroll(self._net, inputs, state, sequence_length)
class R2D2Test(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_r2d2(self, store_lstm_state: bool):
# Create a fake environment to test with.
# TODO(b/152596848): Allow R2D2 to deal with integer observations.
environment = fakes.DiscreteEnvironment(
num_actions=5,
num_observations=10,
obs_shape=(10, 4),
obs_dtype=np.float32,
episode_length=10)
spec = specs.make_environment_spec(environment)
# Construct the agent.
agent = r2d2.R2D2(
environment_spec=spec,
network=SimpleNetwork(spec.actions),
batch_size=10,
samples_per_insert=2,
min_replay_size=10,
store_lstm_state=store_lstm_state,
burn_in_length=2,
trace_length=6,
replay_period=4,
checkpoint=False,
)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/r2d2/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recurrent DQN (R2D2) agent implementation."""
import copy
from typing import Optional
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents import agent
from acme.agents.tf import actors
from acme.agents.tf.r2d2 import learning
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import reverb
import sonnet as snt
import tensorflow as tf
import trfl
class R2D2(agent.Agent):
"""R2D2 Agent.
This implements a single-process R2D2 agent. This is a Q-learning algorithm
that generates data via a (epislon-greedy) behavior policy, inserts
trajectories into a replay buffer, and periodically updates the policy (and
as a result the behavior) by sampling from this buffer.
"""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
network: snt.RNNCore,
burn_in_length: int,
trace_length: int,
replay_period: int,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
discount: float = 0.99,
batch_size: int = 32,
prefetch_size: int = tf.data.experimental.AUTOTUNE,
target_update_period: int = 100,
importance_sampling_exponent: float = 0.2,
priority_exponent: float = 0.6,
epsilon: float = 0.01,
learning_rate: float = 1e-3,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: float = 32.0,
store_lstm_state: bool = True,
max_priority_weight: float = 0.9,
checkpoint: bool = True,
):
if store_lstm_state:
extra_spec = {
'core_state': tf2_utils.squeeze_batch_dim(network.initial_state(1)),
}
else:
extra_spec = ()
sequence_length = burn_in_length + trace_length + 1
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Prioritized(priority_exponent),
remover=reverb.selectors.Fifo(),
max_size=max_replay_size,
rate_limiter=reverb.rate_limiters.MinSize(min_size_to_sample=1),
signature=adders.SequenceAdder.signature(
environment_spec, extra_spec, sequence_length=sequence_length))
self._server = reverb.Server([replay_table], port=None)
address = f'localhost:{self._server.port}'
# Component to add things into replay.
adder = adders.SequenceAdder(
client=reverb.Client(address),
period=replay_period,
sequence_length=sequence_length,
)
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
server_address=address,
batch_size=batch_size,
prefetch_size=prefetch_size)
target_network = copy.deepcopy(network)
tf2_utils.create_variables(network, [environment_spec.observations])
tf2_utils.create_variables(target_network, [environment_spec.observations])
learner = learning.R2D2Learner(
environment_spec=environment_spec,
network=network,
target_network=target_network,
burn_in_length=burn_in_length,
sequence_length=sequence_length,
dataset=dataset,
reverb_client=reverb.TFClient(address),
counter=counter,
logger=logger,
discount=discount,
target_update_period=target_update_period,
importance_sampling_exponent=importance_sampling_exponent,
max_replay_size=max_replay_size,
learning_rate=learning_rate,
store_lstm_state=store_lstm_state,
max_priority_weight=max_priority_weight,
)
self._checkpointer = tf2_savers.Checkpointer(
subdirectory='r2d2_learner',
time_delta_minutes=60,
objects_to_save=learner.state,
enable_checkpointing=checkpoint,
)
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'network': network}, time_delta_minutes=60.)
policy_network = snt.DeepRNN([
network,
lambda qs: trfl.epsilon_greedy(qs, epsilon=epsilon).sample(),
])
actor = actors.RecurrentActor(
policy_network, adder, store_recurrent_state=store_lstm_state)
observations_per_step = (
float(replay_period * batch_size) / samples_per_insert)
super().__init__(
actor=actor,
learner=learner,
min_observations=replay_period * max(batch_size, min_replay_size),
observations_per_step=observations_per_step)
def update(self):
super().update()
self._snapshotter.save()
self._checkpointer.save()
|
acme-master
|
acme/agents/tf/r2d2/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recurrent Replay Distributed DQN (R2D2) learner implementation."""
import functools
import time
from typing import Dict, Iterator, List, Mapping, Union, Optional
import acme
from acme import specs
from acme.adders import reverb as adders
from acme.tf import losses
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import tree
Variables = List[np.ndarray]
class R2D2Learner(acme.Learner, tf2_savers.TFSaveable):
"""R2D2 learner.
This is the learning component of the R2D2 agent. It takes a dataset as input
and implements update functionality to learn from this dataset.
"""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
network: Union[networks.RNNCore, snt.RNNCore],
target_network: Union[networks.RNNCore, snt.RNNCore],
burn_in_length: int,
sequence_length: int,
dataset: tf.data.Dataset,
reverb_client: Optional[reverb.TFClient] = None,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
discount: float = 0.99,
target_update_period: int = 100,
importance_sampling_exponent: float = 0.2,
max_replay_size: int = 1_000_000,
learning_rate: float = 1e-3,
# TODO(sergomez): rename to use_core_state for consistency with JAX agent.
store_lstm_state: bool = True,
max_priority_weight: float = 0.9,
n_step: int = 5,
clip_grad_norm: Optional[float] = None,
):
if not isinstance(network, networks.RNNCore):
network.unroll = functools.partial(snt.static_unroll, network)
target_network.unroll = functools.partial(snt.static_unroll,
target_network)
# Internalise agent components (replay buffer, networks, optimizer).
# TODO(b/155086959): Fix type stubs and remove.
self._iterator: Iterator[reverb.ReplaySample] = iter(dataset) # pytype: disable=wrong-arg-types
self._network = network
self._target_network = target_network
self._optimizer = snt.optimizers.Adam(learning_rate, epsilon=1e-3)
self._reverb_client = reverb_client
# Internalise the hyperparameters.
self._store_lstm_state = store_lstm_state
self._burn_in_length = burn_in_length
self._discount = discount
self._max_replay_size = max_replay_size
self._importance_sampling_exponent = importance_sampling_exponent
self._max_priority_weight = max_priority_weight
self._target_update_period = target_update_period
self._num_actions = environment_spec.actions.num_values
self._sequence_length = sequence_length
self._n_step = n_step
self._clip_grad_norm = clip_grad_norm
if burn_in_length:
self._burn_in = lambda o, s: self._network.unroll(o, s, burn_in_length)
else:
self._burn_in = lambda o, s: (o, s) # pylint: disable=unnecessary-lambda
# Learner state.
self._variables = network.variables
self._num_steps = tf.Variable(
0., dtype=tf.float32, trainable=False, name='step')
# Internalise logging/counting objects.
self._counter = counting.Counter(counter, 'learner')
self._logger = logger or loggers.TerminalLogger('learner', time_delta=100.)
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
# Draw a batch of data from replay.
sample: reverb.ReplaySample = next(self._iterator)
data = tf2_utils.batch_to_sequence(sample.data)
observations, actions, rewards, discounts, extra = (data.observation,
data.action,
data.reward,
data.discount,
data.extras)
unused_sequence_length, batch_size = actions.shape
# Get initial state for the LSTM, either from replay or simply use zeros.
if self._store_lstm_state:
core_state = tree.map_structure(lambda x: x[0], extra['core_state'])
else:
core_state = self._network.initial_state(batch_size)
target_core_state = tree.map_structure(tf.identity, core_state)
# Before training, optionally unroll the LSTM for a fixed warmup period.
burn_in_obs = tree.map_structure(lambda x: x[:self._burn_in_length],
observations)
_, core_state = self._burn_in(burn_in_obs, core_state)
_, target_core_state = self._burn_in(burn_in_obs, target_core_state)
# Don't train on the warmup period.
observations, actions, rewards, discounts, extra = tree.map_structure(
lambda x: x[self._burn_in_length:],
(observations, actions, rewards, discounts, extra))
with tf.GradientTape() as tape:
# Unroll the online and target Q-networks on the sequences.
q_values, _ = self._network.unroll(observations, core_state,
self._sequence_length)
target_q_values, _ = self._target_network.unroll(observations,
target_core_state,
self._sequence_length)
# Compute the target policy distribution (greedy).
greedy_actions = tf.argmax(q_values, output_type=tf.int32, axis=-1)
target_policy_probs = tf.one_hot(
greedy_actions, depth=self._num_actions, dtype=q_values.dtype)
# Compute the transformed n-step loss.
rewards = tree.map_structure(lambda x: x[:-1], rewards)
discounts = tree.map_structure(lambda x: x[:-1], discounts)
loss, extra = losses.transformed_n_step_loss(
qs=q_values,
targnet_qs=target_q_values,
actions=actions,
rewards=rewards,
pcontinues=discounts * self._discount,
target_policy_probs=target_policy_probs,
bootstrap_n=self._n_step,
)
# Calculate importance weights and use them to scale the loss.
sample_info = sample.info
keys, probs = sample_info.key, sample_info.probability
importance_weights = 1. / (self._max_replay_size * probs) # [T, B]
importance_weights **= self._importance_sampling_exponent
importance_weights /= tf.reduce_max(importance_weights)
loss *= tf.cast(importance_weights, tf.float32) # [T, B]
loss = tf.reduce_mean(loss) # []
# Apply gradients via optimizer.
gradients = tape.gradient(loss, self._network.trainable_variables)
# Clip and apply gradients.
if self._clip_grad_norm is not None:
gradients, _ = tf.clip_by_global_norm(gradients, self._clip_grad_norm)
self._optimizer.apply(gradients, self._network.trainable_variables)
# Periodically update the target network.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(self._network.variables,
self._target_network.variables):
dest.assign(src)
self._num_steps.assign_add(1)
if self._reverb_client:
# Compute updated priorities.
priorities = compute_priority(extra.errors, self._max_priority_weight)
# Compute priorities and add an op to update them on the reverb side.
self._reverb_client.update_priorities(
table=adders.DEFAULT_PRIORITY_TABLE,
keys=keys,
priorities=tf.cast(priorities, tf.float64))
return {'loss': loss}
def step(self):
# Run the learning step.
results = self._step()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
results.update(counts)
self._logger.write(results)
def get_variables(self, names: List[str]) -> List[Variables]:
return [tf2_utils.to_numpy(self._variables)]
@property
def state(self) -> Mapping[str, tf2_savers.Checkpointable]:
"""Returns the stateful parts of the learner for checkpointing."""
return {
'network': self._network,
'target_network': self._target_network,
'optimizer': self._optimizer,
'num_steps': self._num_steps,
}
def compute_priority(errors: tf.Tensor, alpha: float):
"""Compute priority as mixture of max and mean sequence errors."""
abs_errors = tf.abs(errors)
mean_priority = tf.reduce_mean(abs_errors, axis=0)
max_priority = tf.reduce_max(abs_errors, axis=0)
return alpha * max_priority + (1 - alpha) * mean_priority
|
acme-master
|
acme/agents/tf/r2d2/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for Recurrent DQfD (R2D3)."""
from acme.agents.tf.r2d3.agent import R2D3
|
acme-master
|
acme/agents/tf/r2d3/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for R2D3 agent."""
import acme
from acme import specs
from acme.agents.tf import r2d3
from acme.agents.tf.dqfd import bsuite_demonstrations
from acme.testing import fakes
from acme.tf import networks
import dm_env
import numpy as np
import sonnet as snt
from absl.testing import absltest
class SimpleNetwork(networks.RNNCore):
def __init__(self, action_spec: specs.DiscreteArray):
super().__init__(name='r2d2_test_network')
self._net = snt.DeepRNN([
snt.Flatten(),
snt.LSTM(20),
snt.nets.MLP([50, 50, action_spec.num_values])
])
def __call__(self, inputs, state):
return self._net(inputs, state)
def initial_state(self, batch_size: int, **kwargs):
return self._net.initial_state(batch_size)
def unroll(self, inputs, state, sequence_length):
return snt.static_unroll(self._net, inputs, state, sequence_length)
class R2D3Test(absltest.TestCase):
def test_r2d3(self):
# Create a fake environment to test with.
environment = fakes.DiscreteEnvironment(
num_actions=5,
num_observations=10,
obs_dtype=np.float32,
episode_length=10)
spec = specs.make_environment_spec(environment)
# Build demonstrations.
dummy_action = np.zeros((), dtype=np.int32)
recorder = bsuite_demonstrations.DemonstrationRecorder()
timestep = environment.reset()
while timestep.step_type is not dm_env.StepType.LAST:
recorder.step(timestep, dummy_action)
timestep = environment.step(dummy_action)
recorder.step(timestep, dummy_action)
recorder.record_episode()
# Construct the agent.
agent = r2d3.R2D3(
environment_spec=spec,
network=SimpleNetwork(spec.actions),
target_network=SimpleNetwork(spec.actions),
demonstration_dataset=recorder.make_tf_dataset(),
demonstration_ratio=0.5,
batch_size=10,
samples_per_insert=2,
min_replay_size=10,
burn_in_length=2,
trace_length=6,
replay_period=4,
checkpoint=False,
)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/r2d3/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recurrent DQfD (R2D3) agent implementation."""
import functools
from typing import Optional
from acme import datasets
from acme import specs
from acme import types as acme_types
from acme.adders import reverb as adders
from acme.agents import agent
from acme.agents.tf import actors
from acme.agents.tf.r2d2 import learning
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import reverb
import sonnet as snt
import tensorflow as tf
import tree
import trfl
class R2D3(agent.Agent):
"""R2D3 Agent.
This implements a single-process R2D2 agent that mixes demonstrations with
actor experience.
"""
def __init__(self,
environment_spec: specs.EnvironmentSpec,
network: snt.RNNCore,
target_network: snt.RNNCore,
burn_in_length: int,
trace_length: int,
replay_period: int,
demonstration_dataset: tf.data.Dataset,
demonstration_ratio: float,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
discount: float = 0.99,
batch_size: int = 32,
target_update_period: int = 100,
importance_sampling_exponent: float = 0.2,
epsilon: float = 0.01,
learning_rate: float = 1e-3,
save_logs: bool = False,
log_name: str = 'agent',
checkpoint: bool = True,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: float = 32.0):
sequence_length = burn_in_length + trace_length + 1
extra_spec = {
'core_state': network.initial_state(1),
}
# Remove batch dimensions.
extra_spec = tf2_utils.squeeze_batch_dim(extra_spec)
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=max_replay_size,
rate_limiter=reverb.rate_limiters.MinSize(min_size_to_sample=1),
signature=adders.SequenceAdder.signature(
environment_spec, extra_spec, sequence_length=sequence_length))
self._server = reverb.Server([replay_table], port=None)
address = f'localhost:{self._server.port}'
# Component to add things into replay.
sequence_kwargs = dict(
period=replay_period,
sequence_length=sequence_length,
)
adder = adders.SequenceAdder(client=reverb.Client(address),
**sequence_kwargs)
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
server_address=address)
# Combine with demonstration dataset.
transition = functools.partial(_sequence_from_episode,
extra_spec=extra_spec,
**sequence_kwargs)
dataset_demos = demonstration_dataset.map(transition)
dataset = tf.data.experimental.sample_from_datasets(
[dataset, dataset_demos],
[1 - demonstration_ratio, demonstration_ratio])
# Batch and prefetch.
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
tf2_utils.create_variables(network, [environment_spec.observations])
tf2_utils.create_variables(target_network, [environment_spec.observations])
learner = learning.R2D2Learner(
environment_spec=environment_spec,
network=network,
target_network=target_network,
burn_in_length=burn_in_length,
dataset=dataset,
reverb_client=reverb.TFClient(address),
counter=counter,
logger=logger,
sequence_length=sequence_length,
discount=discount,
target_update_period=target_update_period,
importance_sampling_exponent=importance_sampling_exponent,
max_replay_size=max_replay_size,
learning_rate=learning_rate,
store_lstm_state=False,
)
self._checkpointer = tf2_savers.Checkpointer(
subdirectory='r2d2_learner',
time_delta_minutes=60,
objects_to_save=learner.state,
enable_checkpointing=checkpoint,
)
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'network': network}, time_delta_minutes=60.)
policy_network = snt.DeepRNN([
network,
lambda qs: trfl.epsilon_greedy(qs, epsilon=epsilon).sample(),
])
actor = actors.RecurrentActor(policy_network, adder)
observations_per_step = (float(replay_period * batch_size) /
samples_per_insert)
super().__init__(
actor=actor,
learner=learner,
min_observations=replay_period * max(batch_size, min_replay_size),
observations_per_step=observations_per_step)
def update(self):
super().update()
self._snapshotter.save()
self._checkpointer.save()
def _sequence_from_episode(observations: acme_types.NestedTensor,
actions: tf.Tensor,
rewards: tf.Tensor,
discounts: tf.Tensor,
extra_spec: acme_types.NestedSpec,
period: int,
sequence_length: int):
"""Produce Reverb-like sequence from a full episode.
Observations, actions, rewards and discounts have the same length. This
function will ignore the first reward and discount and the last action.
This function generates fake (all-zero) extras.
See docs for reverb.SequenceAdder() for more details.
Args:
observations: [L, ...] Tensor.
actions: [L, ...] Tensor.
rewards: [L] Tensor.
discounts: [L] Tensor.
extra_spec: A possibly nested structure of specs for extras. This function
will generate fake (all-zero) extras.
period: The period with which we add sequences.
sequence_length: The fixed length of sequences we wish to add.
Returns:
(o_t, a_t, r_t, d_t, e_t) Tuple.
"""
length = tf.shape(rewards)[0]
first = tf.random.uniform(shape=(), minval=0, maxval=length, dtype=tf.int32)
first = first // period * period # Get a multiple of `period`.
to = tf.minimum(first + sequence_length, length)
def _slice_and_pad(x):
pad_length = sequence_length + first - to
padding_shape = tf.concat([[pad_length], tf.shape(x)[1:]], axis=0)
result = tf.concat([x[first:to], tf.zeros(padding_shape, x.dtype)], axis=0)
result.set_shape([sequence_length] + x.shape.as_list()[1:])
return result
o_t = tree.map_structure(_slice_and_pad, observations)
a_t = tree.map_structure(_slice_and_pad, actions)
r_t = _slice_and_pad(rewards)
d_t = _slice_and_pad(discounts)
start_of_episode = tf.equal(first, 0)
start_of_episode = tf.expand_dims(start_of_episode, axis=0)
start_of_episode = tf.tile(start_of_episode, [sequence_length])
def _sequence_zeros(spec):
return tf.zeros([sequence_length] + spec.shape, spec.dtype)
e_t = tree.map_structure(_sequence_zeros, extra_spec)
info = tree.map_structure(lambda dtype: tf.ones([], dtype),
reverb.SampleInfo.tf_dtypes())
return reverb.ReplaySample(
info=info,
data=adders.Step(
observation=o_t,
action=a_t,
reward=r_t,
discount=d_t,
start_of_episode=start_of_episode,
extras=e_t))
|
acme-master
|
acme/agents/tf/r2d3/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch-Constrained Deep Q-learning (BCQ)."""
from acme.agents.tf.bcq.discrete_learning import DiscreteBCQLearner
|
acme-master
|
acme/agents/tf/bcq/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discrete BCQ learner implementation.
As described in https://arxiv.org/pdf/1910.01708.pdf.
"""
import copy
from typing import Dict, List, Optional
from acme import core
from acme import types
from acme.adders import reverb as adders
from acme.agents.tf import bc
from acme.tf import losses
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.tf.networks import discrete as discrete_networks
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import trfl
class _InternalBCQLearner(core.Learner, tf2_savers.TFSaveable):
"""Internal BCQ learner.
This implements the Q-learning component in the discrete BCQ algorithm.
"""
def __init__(
self,
network: discrete_networks.DiscreteFilteredQNetwork,
discount: float,
importance_sampling_exponent: float,
learning_rate: float,
target_update_period: int,
dataset: tf.data.Dataset,
huber_loss_parameter: float = 1.,
replay_client: Optional[reverb.TFClient] = None,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = False,
):
"""Initializes the learner.
Args:
network: BCQ network
discount: discount to use for TD updates.
importance_sampling_exponent: power to which importance weights are raised
before normalizing.
learning_rate: learning rate for the q-network update.
target_update_period: number of learner steps to perform before updating
the target networks.
dataset: dataset to learn from, whether fixed or from a replay buffer (see
`acme.datasets.reverb.make_reverb_dataset` documentation).
huber_loss_parameter: Quadratic-linear boundary for Huber loss.
replay_client: client to replay to allow for updating priorities.
counter: Counter object for (potentially distributed) counting.
logger: Logger object for writing logs to.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
# Internalise agent components (replay buffer, networks, optimizer).
# TODO(b/155086959): Fix type stubs and remove.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
self._network = network
self._q_network = network.q_network
self._target_q_network = copy.deepcopy(network.q_network)
self._optimizer = snt.optimizers.Adam(learning_rate)
self._replay_client = replay_client
# Internalise the hyperparameters.
self._discount = discount
self._target_update_period = target_update_period
self._importance_sampling_exponent = importance_sampling_exponent
self._huber_loss_parameter = huber_loss_parameter
# Learner state.
self._variables = [self._network.trainable_variables]
self._num_steps = tf.Variable(0, dtype=tf.int32)
# Internalise logging/counting objects.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner',
save_data=False)
# Create a snapshotter object.
if checkpoint:
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'network': network}, time_delta_minutes=60.)
else:
self._snapshotter = None
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
"""Do a step of SGD and update the priorities."""
# Pull out the data needed for updates/priorities.
inputs = next(self._iterator)
transitions: types.Transition = inputs.data
keys, probs = inputs.info[:2]
with tf.GradientTape() as tape:
# Evaluate our networks.
q_tm1 = self._q_network(transitions.observation)
q_t_value = self._target_q_network(transitions.next_observation)
q_t_selector = self._network(transitions.next_observation)
# The rewards and discounts have to have the same type as network values.
r_t = tf.cast(transitions.reward, q_tm1.dtype)
r_t = tf.clip_by_value(r_t, -1., 1.)
d_t = tf.cast(transitions.discount, q_tm1.dtype) * tf.cast(
self._discount, q_tm1.dtype)
# Compute the loss.
_, extra = trfl.double_qlearning(q_tm1, transitions.action, r_t, d_t,
q_t_value, q_t_selector)
loss = losses.huber(extra.td_error, self._huber_loss_parameter)
# Get the importance weights.
importance_weights = 1. / probs # [B]
importance_weights **= self._importance_sampling_exponent
importance_weights /= tf.reduce_max(importance_weights)
# Reweight.
loss *= tf.cast(importance_weights, loss.dtype) # [B]
loss = tf.reduce_mean(loss, axis=[0]) # []
# Do a step of SGD.
gradients = tape.gradient(loss, self._network.trainable_variables)
self._optimizer.apply(gradients, self._network.trainable_variables)
# Update the priorities in the replay buffer.
if self._replay_client:
priorities = tf.cast(tf.abs(extra.td_error), tf.float64)
self._replay_client.update_priorities(
table=adders.DEFAULT_PRIORITY_TABLE, keys=keys, priorities=priorities)
# Periodically update the target network.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(self._q_network.variables,
self._target_q_network.variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Compute the global norm of the gradients for logging.
global_gradient_norm = tf.linalg.global_norm(gradients)
# Compute statistics of the Q-values for logging.
max_q = tf.reduce_max(q_t_value)
min_q = tf.reduce_min(q_t_value)
mean_q, var_q = tf.nn.moments(q_t_value, [0, 1])
# Report loss & statistics for logging.
fetches = {
'gradient_norm': global_gradient_norm,
'loss': loss,
'max_q': max_q,
'mean_q': mean_q,
'min_q': min_q,
'var_q': var_q,
}
return fetches
def step(self):
# Do a batch of SGD.
result = self._step()
# Update our counts and record it.
counts = self._counter.increment(steps=1)
result.update(counts)
# Snapshot and attempt to write logs.
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(result)
def get_variables(self, names: List[str]) -> List[np.ndarray]:
return tf2_utils.to_numpy(self._variables)
@property
def state(self):
"""Returns the stateful parts of the learner for checkpointing."""
return {
'network': self._network,
'target_q_network': self._target_q_network,
'optimizer': self._optimizer,
'num_steps': self._num_steps
}
class DiscreteBCQLearner(core.Learner, tf2_savers.TFSaveable):
"""Discrete BCQ learner.
This learner combines supervised BC learning and Q learning to implement the
discrete BCQ algorithm as described in https://arxiv.org/pdf/1910.01708.pdf.
"""
def __init__(self,
network: discrete_networks.DiscreteFilteredQNetwork,
dataset: tf.data.Dataset,
learning_rate: float,
counter: Optional[counting.Counter] = None,
bc_logger: Optional[loggers.Logger] = None,
bcq_logger: Optional[loggers.Logger] = None,
**bcq_learner_kwargs):
counter = counter or counting.Counter()
self._bc_logger = bc_logger or loggers.TerminalLogger('bc_learner',
time_delta=1.)
self._bcq_logger = bcq_logger or loggers.TerminalLogger('bcq_learner',
time_delta=1.)
self._bc_learner = bc.BCLearner(
network=network.g_network,
learning_rate=learning_rate,
dataset=dataset,
counter=counting.Counter(counter, 'bc'),
logger=self._bc_logger,
checkpoint=False)
self._bcq_learner = _InternalBCQLearner(
network=network,
learning_rate=learning_rate,
dataset=dataset,
counter=counting.Counter(counter, 'bcq'),
logger=self._bcq_logger,
**bcq_learner_kwargs)
def get_variables(self, names):
return self._bcq_learner.get_variables(names)
@property
def state(self):
bc_state = self._bc_learner.state
bc_state.pop('network') # No need to checkpoint the BC network.
bcq_state = self._bcq_learner.state
state = dict()
state.update({f'bc_{k}': v for k, v in bc_state.items()})
state.update({f'bcq_{k}': v for k, v in bcq_state.items()})
return state
def step(self):
self._bc_learner.step()
self._bcq_learner.step()
|
acme-master
|
acme/agents/tf/bcq/discrete_learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for discrete BCQ learner."""
from acme import specs
from acme.agents.tf import bcq
from acme.testing import fakes
from acme.tf import utils as tf2_utils
from acme.tf.networks import discrete as discrete_networks
from acme.utils import counting
import numpy as np
import sonnet as snt
from absl.testing import absltest
def _make_network(action_spec: specs.DiscreteArray) -> snt.Module:
return snt.Sequential([
snt.Flatten(),
snt.nets.MLP([50, 50, action_spec.num_values]),
])
class DiscreteBCQLearnerTest(absltest.TestCase):
def test_full_learner(self):
# Create dataset.
environment = fakes.DiscreteEnvironment(
num_actions=5,
num_observations=10,
obs_dtype=np.float32,
episode_length=10)
spec = specs.make_environment_spec(environment)
dataset = fakes.transition_dataset(environment).batch(2)
# Build network.
g_network = _make_network(spec.actions)
q_network = _make_network(spec.actions)
network = discrete_networks.DiscreteFilteredQNetwork(g_network=g_network,
q_network=q_network,
threshold=0.5)
tf2_utils.create_variables(network, [spec.observations])
# Build learner.
counter = counting.Counter()
learner = bcq.DiscreteBCQLearner(
network=network,
dataset=dataset,
learning_rate=1e-4,
discount=0.99,
importance_sampling_exponent=0.2,
target_update_period=100,
counter=counter)
# Run a learner step.
learner.step()
# Check counts from BC and BCQ learners.
counts = counter.get_counts()
self.assertEqual(1, counts['bc_steps'])
self.assertEqual(1, counts['bcq_steps'])
# Check learner state.
self.assertEqual(1, learner.state['bc_num_steps'].numpy())
self.assertEqual(1, learner.state['bcq_num_steps'].numpy())
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/bcq/discrete_learning_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for IQN learner."""
import copy
from acme import specs
from acme.agents.tf import iqn
from acme.testing import fakes
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import counting
import numpy as np
import sonnet as snt
from absl.testing import absltest
def _make_torso_network(num_outputs: int) -> snt.Module:
"""Create torso network (outputs intermediate representation)."""
return snt.Sequential([
snt.Flatten(),
snt.nets.MLP([num_outputs])
])
def _make_head_network(num_outputs: int) -> snt.Module:
"""Create head network (outputs Q-values)."""
return snt.nets.MLP([num_outputs])
class IQNLearnerTest(absltest.TestCase):
def test_full_learner(self):
# Create dataset.
environment = fakes.DiscreteEnvironment(
num_actions=5,
num_observations=10,
obs_dtype=np.float32,
episode_length=10)
spec = specs.make_environment_spec(environment)
dataset = fakes.transition_dataset(environment).batch(
2, drop_remainder=True)
# Build network.
network = networks.IQNNetwork(
torso=_make_torso_network(num_outputs=2),
head=_make_head_network(num_outputs=spec.actions.num_values),
latent_dim=2,
num_quantile_samples=1)
tf2_utils.create_variables(network, [spec.observations])
# Build learner.
counter = counting.Counter()
learner = iqn.IQNLearner(
network=network,
target_network=copy.deepcopy(network),
dataset=dataset,
learning_rate=1e-4,
discount=0.99,
importance_sampling_exponent=0.2,
target_update_period=1,
counter=counter)
# Run a learner step.
learner.step()
# Check counts from IQN learner.
counts = counter.get_counts()
self.assertEqual(1, counts['steps'])
# Check learner state.
self.assertEqual(1, learner.state['num_steps'].numpy())
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/iqn/learning_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of an IQN agent."""
from acme.agents.tf.iqn.learning import IQNLearner
|
acme-master
|
acme/agents/tf/iqn/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implicit Quantile Network (IQN) learner implementation."""
from typing import Dict, List, Optional, Tuple
from acme import core
from acme import types
from acme.adders import reverb as adders
from acme.tf import losses
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
class IQNLearner(core.Learner, tf2_savers.TFSaveable):
"""Distributional DQN learner."""
def __init__(
self,
network: networks.IQNNetwork,
target_network: snt.Module,
discount: float,
importance_sampling_exponent: float,
learning_rate: float,
target_update_period: int,
dataset: tf.data.Dataset,
huber_loss_parameter: float = 1.,
replay_client: Optional[reverb.TFClient] = None,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
"""Initializes the learner.
Args:
network: the online Q network (the one being optimized) that outputs
(q_values, q_logits, atoms).
target_network: the target Q critic (which lags behind the online net).
discount: discount to use for TD updates.
importance_sampling_exponent: power to which importance weights are raised
before normalizing.
learning_rate: learning rate for the q-network update.
target_update_period: number of learner steps to perform before updating
the target networks.
dataset: dataset to learn from, whether fixed or from a replay buffer
(see `acme.datasets.reverb.make_reverb_dataset` documentation).
huber_loss_parameter: Quadratic-linear boundary for Huber loss.
replay_client: client to replay to allow for updating priorities.
counter: Counter object for (potentially distributed) counting.
logger: Logger object for writing logs to.
checkpoint: boolean indicating whether to checkpoint the learner or not.
"""
# Internalise agent components (replay buffer, networks, optimizer).
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
self._network = network
self._target_network = target_network
self._optimizer = snt.optimizers.Adam(learning_rate)
self._replay_client = replay_client
# Internalise the hyperparameters.
self._discount = discount
self._target_update_period = target_update_period
self._importance_sampling_exponent = importance_sampling_exponent
self._huber_loss_parameter = huber_loss_parameter
# Learner state.
self._variables: List[List[tf.Tensor]] = [network.trainable_variables]
self._num_steps = tf.Variable(0, dtype=tf.int32)
# Internalise logging/counting objects.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
# Create a snapshotter object.
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
time_delta_minutes=5,
objects_to_save={
'network': self._network,
'target_network': self._target_network,
'optimizer': self._optimizer,
'num_steps': self._num_steps
})
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'network': network}, time_delta_minutes=60.)
else:
self._checkpointer = None
self._snapshotter = None
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
"""Do a step of SGD and update the priorities."""
# Pull out the data needed for updates/priorities.
inputs = next(self._iterator)
transitions: types.Transition = inputs.data
keys, probs, *_ = inputs.info
with tf.GradientTape() as tape:
loss, fetches = self._loss_and_fetches(transitions.observation,
transitions.action,
transitions.reward,
transitions.discount,
transitions.next_observation)
# Get the importance weights.
importance_weights = 1. / probs # [B]
importance_weights **= self._importance_sampling_exponent
importance_weights /= tf.reduce_max(importance_weights)
# Reweight.
loss *= tf.cast(importance_weights, loss.dtype) # [B]
loss = tf.reduce_mean(loss, axis=[0]) # []
# Do a step of SGD.
gradients = tape.gradient(loss, self._network.trainable_variables)
self._optimizer.apply(gradients, self._network.trainable_variables)
# Update the priorities in the replay buffer.
if self._replay_client:
priorities = tf.clip_by_value(tf.abs(loss), -100, 100)
priorities = tf.cast(priorities, tf.float64)
self._replay_client.update_priorities(
table=adders.DEFAULT_PRIORITY_TABLE, keys=keys, priorities=priorities)
# Periodically update the target network.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(self._network.variables,
self._target_network.variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Report gradient norms.
fetches.update(
loss=loss,
gradient_norm=tf.linalg.global_norm(gradients))
return fetches
def step(self):
# Do a batch of SGD.
result = self._step()
# Update our counts and record it.
counts = self._counter.increment(steps=1)
result.update(counts)
# Checkpoint and attempt to write logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(result)
def get_variables(self, names: List[str]) -> List[np.ndarray]:
return tf2_utils.to_numpy(self._variables)
def _loss_and_fetches(
self,
o_tm1: tf.Tensor,
a_tm1: tf.Tensor,
r_t: tf.Tensor,
d_t: tf.Tensor,
o_t: tf.Tensor,
) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]:
# Evaluate our networks.
_, dist_tm1, tau = self._network(o_tm1)
q_tm1 = _index_embs_with_actions(dist_tm1, a_tm1)
q_selector, _, _ = self._target_network(o_t)
a_t = tf.argmax(q_selector, axis=1)
_, dist_t, _ = self._target_network(o_t)
q_t = _index_embs_with_actions(dist_t, a_t)
q_tm1 = losses.QuantileDistribution(values=q_tm1,
logits=tf.zeros_like(q_tm1))
q_t = losses.QuantileDistribution(values=q_t, logits=tf.zeros_like(q_t))
# The rewards and discounts have to have the same type as network values.
r_t = tf.cast(r_t, tf.float32)
r_t = tf.clip_by_value(r_t, -1., 1.)
d_t = tf.cast(d_t, tf.float32) * tf.cast(self._discount, tf.float32)
# Compute the loss.
loss_module = losses.NonUniformQuantileRegression(
self._huber_loss_parameter)
loss = loss_module(q_tm1, r_t, d_t, q_t, tau)
# Compute statistics of the Q-values for logging.
max_q = tf.reduce_max(q_t.values)
min_q = tf.reduce_min(q_t.values)
mean_q, var_q = tf.nn.moments(q_t.values, [0, 1])
fetches = {
'max_q': max_q,
'mean_q': mean_q,
'min_q': min_q,
'var_q': var_q,
}
return loss, fetches
@property
def state(self):
"""Returns the stateful parts of the learner for checkpointing."""
return {
'network': self._network,
'target_network': self._target_network,
'optimizer': self._optimizer,
'num_steps': self._num_steps
}
def _index_embs_with_actions(
embeddings: tf.Tensor,
actions: tf.Tensor,
) -> tf.Tensor:
"""Slice an embedding Tensor with action indices.
Take embeddings of the form [batch_size, num_actions, embed_dim]
and actions of the form [batch_size], and return the sliced embeddings
like embeddings[:, actions, :]. Doing this my way because the comments in
the official op are scary.
Args:
embeddings: Tensor of embeddings to index.
actions: int Tensor to use as index into embeddings
Returns:
Tensor of embeddings indexed by actions
"""
batch_size, num_actions, _ = embeddings.shape.as_list()
# Values are the 'values' in a sparse tensor we will be setting
act_indx = tf.cast(actions, tf.int64)[:, None]
values = tf.ones([tf.size(actions)], dtype=tf.bool)
# Create a range for each index into the batch
act_range = tf.range(0, batch_size, dtype=tf.int64)[:, None]
# Combine this into coordinates with the action indices
indices = tf.concat([act_range, act_indx], 1)
actions_mask = tf.SparseTensor(indices, values, [batch_size, num_actions])
actions_mask = tf.stop_gradient(
tf.sparse.to_dense(actions_mask, default_value=False))
sliced_emb = tf.boolean_mask(embeddings, actions_mask)
return sliced_emb
|
acme-master
|
acme/agents/tf/iqn/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for DQfD."""
from acme.agents.tf.dqfd.agent import DQfD
from acme.agents.tf.dqfd.bsuite_demonstrations import DemonstrationRecorder
|
acme-master
|
acme/agents/tf/dqfd/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""bsuite demonstrations."""
from typing import Any, List
from absl import flags
from bsuite.environments import deep_sea
import dm_env
import numpy as np
import tensorflow as tf
import tree
FLAGS = flags.FLAGS
def _nested_stack(sequence: List[Any]):
"""Stack nested elements in a sequence."""
return tree.map_structure(lambda *x: np.stack(x), *sequence)
class DemonstrationRecorder:
"""Records demonstrations.
A demonstration is a (observation, action, reward, discount) tuple where
every element is a numpy array corresponding to a full episode.
"""
def __init__(self):
self._demos = []
self._reset_episode()
def step(self, timestep: dm_env.TimeStep, action: np.ndarray):
reward = np.array(timestep.reward or 0, np.float32)
self._episode_reward += reward
self._episode.append((timestep.observation, action, reward,
np.array(timestep.discount or 0, np.float32)))
def record_episode(self):
self._demos.append(_nested_stack(self._episode))
self._reset_episode()
def discard_episode(self):
self._reset_episode()
def _reset_episode(self):
self._episode = []
self._episode_reward = 0
@property
def episode_reward(self):
return self._episode_reward
def make_tf_dataset(self):
types = tree.map_structure(lambda x: x.dtype, self._demos[0])
shapes = tree.map_structure(lambda x: x.shape, self._demos[0])
ds = tf.data.Dataset.from_generator(lambda: self._demos, types, shapes)
return ds.repeat().shuffle(len(self._demos))
def _optimal_deep_sea_policy(environment: deep_sea.DeepSea,
timestep: dm_env.TimeStep):
action = environment._action_mapping[np.where(timestep.observation)] # pylint: disable=protected-access
return action[0].astype(np.int32)
def _run_optimal_deep_sea_episode(environment: deep_sea.DeepSea,
recorder: DemonstrationRecorder):
timestep = environment.reset()
while timestep.step_type is not dm_env.StepType.LAST:
action = _optimal_deep_sea_policy(environment, timestep)
recorder.step(timestep, action)
timestep = environment.step(action)
recorder.step(timestep, np.zeros_like(action))
def _make_deep_sea_dataset(environment: deep_sea.DeepSea):
"""Make DeepSea demonstration dataset."""
recorder = DemonstrationRecorder()
_run_optimal_deep_sea_episode(environment, recorder)
assert recorder.episode_reward > 0
recorder.record_episode()
return recorder.make_tf_dataset()
def _make_deep_sea_stochastic_dataset(environment: deep_sea.DeepSea):
"""Make stochastic DeepSea demonstration dataset."""
recorder = DemonstrationRecorder()
# Use 10*size demos, 80% success, 20% failure.
num_demos = environment._size * 10 # pylint: disable=protected-access
num_failures = num_demos // 5
num_successes = num_demos - num_failures
successes_saved = 0
failures_saved = 0
while (successes_saved < num_successes) or (failures_saved < num_failures):
_run_optimal_deep_sea_episode(environment, recorder)
if recorder.episode_reward > 0 and successes_saved < num_successes:
recorder.record_episode()
successes_saved += 1
elif recorder.episode_reward <= 0 and failures_saved < num_failures:
recorder.record_episode()
failures_saved += 1
else:
recorder.discard_episode()
return recorder.make_tf_dataset()
def make_dataset(environment: dm_env.Environment, stochastic: bool):
"""Make bsuite demos for the current task."""
if not stochastic:
assert isinstance(environment, deep_sea.DeepSea)
return _make_deep_sea_dataset(environment)
else:
assert isinstance(environment, deep_sea.DeepSea)
return _make_deep_sea_stochastic_dataset(environment)
|
acme-master
|
acme/agents/tf/dqfd/bsuite_demonstrations.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DQN agent."""
import acme
from acme import specs
from acme.agents.tf.dqfd import agent as dqfd
from acme.agents.tf.dqfd import bsuite_demonstrations
from acme.testing import fakes
import dm_env
import numpy as np
import sonnet as snt
from absl.testing import absltest
def _make_network(action_spec: specs.DiscreteArray) -> snt.Module:
return snt.Sequential([
snt.Flatten(),
snt.nets.MLP([50, 50, action_spec.num_values]),
])
class DQfDTest(absltest.TestCase):
def test_dqfd(self):
# Create a fake environment to test with.
# TODO(b/152596848): Allow DQN to deal with integer observations.
environment = fakes.DiscreteEnvironment(
num_actions=5,
num_observations=10,
obs_dtype=np.float32,
episode_length=10)
spec = specs.make_environment_spec(environment)
# Build demonstrations.
dummy_action = np.zeros((), dtype=np.int32)
recorder = bsuite_demonstrations.DemonstrationRecorder()
timestep = environment.reset()
while timestep.step_type is not dm_env.StepType.LAST:
recorder.step(timestep, dummy_action)
timestep = environment.step(dummy_action)
recorder.step(timestep, dummy_action)
recorder.record_episode()
# Construct the agent.
agent = dqfd.DQfD(
environment_spec=spec,
network=_make_network(spec.actions),
demonstration_dataset=recorder.make_tf_dataset(),
demonstration_ratio=0.5,
batch_size=10,
samples_per_insert=2,
min_replay_size=10)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=10)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/dqfd/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQfD Agent implementation."""
import copy
import functools
import operator
from typing import Optional
from acme import datasets
from acme import specs
from acme import types as acme_types
from acme.adders import reverb as adders
from acme.agents import agent
from acme.agents.tf import actors
from acme.agents.tf import dqn
from acme.tf import utils as tf2_utils
import reverb
import sonnet as snt
import tensorflow as tf
import tree
import trfl
class DQfD(agent.Agent):
"""DQfD agent.
This implements a single-process DQN agent that mixes demonstrations with
actor experience.
"""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
network: snt.Module,
demonstration_dataset: tf.data.Dataset,
demonstration_ratio: float,
batch_size: int = 256,
prefetch_size: int = 4,
target_update_period: int = 100,
samples_per_insert: float = 32.0,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
importance_sampling_exponent: float = 0.2,
n_step: int = 5,
epsilon: Optional[tf.Tensor] = None,
learning_rate: float = 1e-3,
discount: float = 0.99,
):
"""Initialize the agent.
Args:
environment_spec: description of the actions, observations, etc.
network: the online Q network (the one being optimized)
demonstration_dataset: tf.data.Dataset producing (timestep, action)
tuples containing full episodes.
demonstration_ratio: Ratio of transitions coming from demonstrations.
batch_size: batch size for updates.
prefetch_size: size to prefetch from replay.
target_update_period: number of learner steps to perform before updating
the target networks.
samples_per_insert: number of samples to take from replay for every insert
that is made.
min_replay_size: minimum replay size before updating. This and all
following arguments are related to dataset construction and will be
ignored if a dataset argument is passed.
max_replay_size: maximum replay size.
importance_sampling_exponent: power to which importance weights are raised
before normalizing.
n_step: number of steps to squash into a single transition.
epsilon: probability of taking a random action; ignored if a policy
network is given.
learning_rate: learning rate for the q-network update.
discount: discount to use for TD updates.
"""
# Create a replay server to add data to. This uses no limiter behavior in
# order to allow the Agent interface to handle it.
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=max_replay_size,
rate_limiter=reverb.rate_limiters.MinSize(1),
signature=adders.NStepTransitionAdder.signature(environment_spec))
self._server = reverb.Server([replay_table], port=None)
# The adder is used to insert observations into replay.
address = f'localhost:{self._server.port}'
adder = adders.NStepTransitionAdder(
client=reverb.Client(address),
n_step=n_step,
discount=discount)
# The dataset provides an interface to sample from replay.
replay_client = reverb.TFClient(address)
dataset = datasets.make_reverb_dataset(server_address=address)
# Combine with demonstration dataset.
transition = functools.partial(_n_step_transition_from_episode,
n_step=n_step,
discount=discount)
dataset_demos = demonstration_dataset.map(transition)
dataset = tf.data.experimental.sample_from_datasets(
[dataset, dataset_demos],
[1 - demonstration_ratio, demonstration_ratio])
# Batch and prefetch.
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(prefetch_size)
# Use constant 0.05 epsilon greedy policy by default.
if epsilon is None:
epsilon = tf.Variable(0.05, trainable=False)
policy_network = snt.Sequential([
network,
lambda q: trfl.epsilon_greedy(q, epsilon=epsilon).sample(),
])
# Create a target network.
target_network = copy.deepcopy(network)
# Ensure that we create the variables before proceeding (maybe not needed).
tf2_utils.create_variables(network, [environment_spec.observations])
tf2_utils.create_variables(target_network, [environment_spec.observations])
# Create the actor which defines how we take actions.
actor = actors.FeedForwardActor(policy_network, adder)
# The learner updates the parameters (and initializes them).
learner = dqn.DQNLearner(
network=network,
target_network=target_network,
discount=discount,
importance_sampling_exponent=importance_sampling_exponent,
learning_rate=learning_rate,
target_update_period=target_update_period,
dataset=dataset,
replay_client=replay_client)
super().__init__(
actor=actor,
learner=learner,
min_observations=max(batch_size, min_replay_size),
observations_per_step=float(batch_size) / samples_per_insert)
def _n_step_transition_from_episode(observations: acme_types.NestedTensor,
actions: tf.Tensor,
rewards: tf.Tensor,
discounts: tf.Tensor,
n_step: int,
discount: float):
"""Produce Reverb-like N-step transition from a full episode.
Observations, actions, rewards and discounts have the same length. This
function will ignore the first reward and discount and the last action.
Args:
observations: [L, ...] Tensor.
actions: [L, ...] Tensor.
rewards: [L] Tensor.
discounts: [L] Tensor.
n_step: number of steps to squash into a single transition.
discount: discount to use for TD updates.
Returns:
(o_t, a_t, r_t, d_t, o_tp1) tuple.
"""
max_index = tf.shape(rewards)[0] - 1
first = tf.random.uniform(shape=(), minval=0, maxval=max_index - 1,
dtype=tf.int32)
last = tf.minimum(first + n_step, max_index)
o_t = tree.map_structure(operator.itemgetter(first), observations)
a_t = tree.map_structure(operator.itemgetter(first), actions)
o_tp1 = tree.map_structure(operator.itemgetter(last), observations)
# 0, 1, ..., n-1.
discount_range = tf.cast(tf.range(last - first), tf.float32)
# 1, g, ..., g^{n-1}.
additional_discounts = tf.pow(discount, discount_range)
# 1, d_t, d_t * d_{t+1}, ..., d_t * ... * d_{t+n-2}.
discounts = tf.concat([[1.], tf.math.cumprod(discounts[first:last-1])], 0)
# 1, g * d_t, ..., g^{n-1} * d_t * ... * d_{t+n-2}.
discounts *= additional_discounts
# r_t + g * d_t * r_{t+1} + ... + g^{n-1} * d_t * ... * d_{t+n-2} * r_{t+n-1}
# We have to shift rewards by one so last=max_index corresponds to transitions
# that include the last reward.
r_t = tf.reduce_sum(rewards[first+1:last+1] * discounts)
# g^{n-1} * d_{t} * ... * d_{t+n-1}.
d_t = discounts[-1]
info = tree.map_structure(lambda dtype: tf.ones([], dtype),
reverb.SampleInfo.tf_dtypes())
return reverb.ReplaySample(
info=info, data=acme_types.Transition(o_t, a_t, r_t, d_t, o_tp1))
|
acme-master
|
acme/agents/tf/dqfd/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recurrent CRR learner implementation."""
import operator
import time
from typing import Dict, List, Optional
from acme import core
from acme.tf import losses
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import tree
class RCRRLearner(core.Learner):
"""Recurrent CRR learner.
This is the learning component of a RCRR agent. It takes a dataset as
input and implements update functionality to learn from this dataset.
"""
def __init__(self,
policy_network: snt.RNNCore,
critic_network: networks.CriticDeepRNN,
target_policy_network: snt.RNNCore,
target_critic_network: networks.CriticDeepRNN,
dataset: tf.data.Dataset,
accelerator_strategy: Optional[tf.distribute.Strategy] = None,
behavior_network: Optional[snt.Module] = None,
cwp_network: Optional[snt.Module] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
discount: float = 0.99,
target_update_period: int = 100,
num_action_samples_td_learning: int = 1,
num_action_samples_policy_weight: int = 4,
baseline_reduce_function: str = 'mean',
clipping: bool = True,
policy_improvement_modes: str = 'exp',
ratio_upper_bound: float = 20.,
beta: float = 1.0,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = False):
"""Initializes the learner.
Args:
policy_network: the online (optimized) policy.
critic_network: the online critic.
target_policy_network: the target policy (which lags behind the online
policy).
target_critic_network: the target critic.
dataset: dataset to learn from, whether fixed or from a replay buffer
(see `acme.datasets.reverb.make_reverb_dataset` documentation).
accelerator_strategy: the strategy used to distribute computation,
whether on a single, or multiple, GPU or TPU; as supported by
tf.distribute.
behavior_network: The network to snapshot under `policy` name. If None,
snapshots `policy_network` instead.
cwp_network: CWP network to snapshot: samples actions
from the policy and weighs them with the critic, then returns the action
by sampling from the softmax distribution using critic values as logits.
Used only for snapshotting, not training.
policy_optimizer: the optimizer to be applied to the policy loss.
critic_optimizer: the optimizer to be applied to the distributional
Bellman loss.
discount: discount to use for TD updates.
target_update_period: number of learner steps to perform before updating
the target networks.
num_action_samples_td_learning: number of action samples to use to
estimate expected value of the critic loss w.r.t. stochastic policy.
num_action_samples_policy_weight: number of action samples to use to
estimate the advantage function for the CRR weighting of the policy
loss.
baseline_reduce_function: one of 'mean', 'max', 'min'. Way of aggregating
values from `num_action_samples` estimates of the value function.
clipping: whether to clip gradients by global norm.
policy_improvement_modes: one of 'exp', 'binary', 'all'. CRR mode which
determines how the advantage function is processed before being
multiplied by the policy loss.
ratio_upper_bound: if policy_improvement_modes is 'exp', determines
the upper bound of the weight (i.e. the weight is
min(exp(advantage / beta), upper_bound)
).
beta: if policy_improvement_modes is 'exp', determines the beta (see
above).
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
if accelerator_strategy is None:
accelerator_strategy = snt.distribute.Replicator()
self._accelerator_strategy = accelerator_strategy
self._policy_improvement_modes = policy_improvement_modes
self._ratio_upper_bound = ratio_upper_bound
self._num_action_samples_td_learning = num_action_samples_td_learning
self._num_action_samples_policy_weight = num_action_samples_policy_weight
self._baseline_reduce_function = baseline_reduce_function
self._beta = beta
# When running on TPUs we have to know the amount of memory required (and
# thus the sequence length) at the graph compilation stage. At the moment,
# the only way to get it is to sample from the dataset, since the dataset
# does not have any metadata, see b/160672927 to track this upcoming
# feature.
sample = next(dataset.as_numpy_iterator())
self._sequence_length = sample.action.shape[1]
self._counter = counter or counting.Counter()
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
self._discount = discount
self._clipping = clipping
self._target_update_period = target_update_period
with self._accelerator_strategy.scope():
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
# (Maybe) distributing the dataset across multiple accelerators.
distributed_dataset = self._accelerator_strategy.experimental_distribute_dataset(
dataset)
self._iterator = iter(distributed_dataset)
# Create the optimizers.
self._critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
self._policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
# Store online and target networks.
self._policy_network = policy_network
self._critic_network = critic_network
self._target_policy_network = target_policy_network
self._target_critic_network = target_critic_network
# Expose the variables.
self._variables = {
'critic': self._target_critic_network.variables,
'policy': self._target_policy_network.variables,
}
# Create a checkpointer object.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
objects_to_save={
'counter': self._counter,
'policy': self._policy_network,
'critic': self._critic_network,
'target_policy': self._target_policy_network,
'target_critic': self._target_critic_network,
'policy_optimizer': self._policy_optimizer,
'critic_optimizer': self._critic_optimizer,
'num_steps': self._num_steps,
},
time_delta_minutes=30.)
raw_policy = snt.DeepRNN(
[policy_network, networks.StochasticSamplingHead()])
critic_mean = networks.CriticDeepRNN(
[critic_network, networks.StochasticMeanHead()])
objects_to_save = {
'raw_policy': raw_policy,
'critic': critic_mean,
}
if behavior_network is not None:
objects_to_save['policy'] = behavior_network
if cwp_network is not None:
objects_to_save['cwp_policy'] = cwp_network
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save=objects_to_save, time_delta_minutes=30)
# Timestamp to keep track of the wall time.
self._walltime_timestamp = time.time()
def _step(self, sample: reverb.ReplaySample) -> Dict[str, tf.Tensor]:
# Transpose batch and sequence axes, i.e. [B, T, ...] to [T, B, ...].
sample = tf2_utils.batch_to_sequence(sample)
observations = sample.observation
actions = sample.action
rewards = sample.reward
discounts = sample.discount
dtype = rewards.dtype
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(self._discount, dtype=discounts.dtype)
# Loss cumulants across time. These cannot be python mutable objects.
critic_loss = 0.
policy_loss = 0.
# Each transition induces a policy loss, which we then weight using
# the `policy_loss_coef_t`; shape [B], see https://arxiv.org/abs/2006.15134.
# `policy_loss_coef` is a scalar average of these coefficients across
# the batch and sequence length dimensions.
policy_loss_coef = 0.
per_device_batch_size = actions.shape[1]
# Initialize recurrent states.
critic_state = self._critic_network.initial_state(per_device_batch_size)
target_critic_state = critic_state
policy_state = self._policy_network.initial_state(per_device_batch_size)
target_policy_state = policy_state
with tf.GradientTape(persistent=True) as tape:
for t in range(1, self._sequence_length):
o_tm1 = tree.map_structure(operator.itemgetter(t - 1), observations)
a_tm1 = tree.map_structure(operator.itemgetter(t - 1), actions)
r_t = tree.map_structure(operator.itemgetter(t - 1), rewards)
d_t = tree.map_structure(operator.itemgetter(t - 1), discounts)
o_t = tree.map_structure(operator.itemgetter(t), observations)
if t != 1:
# By only updating the target critic state here we are forcing
# the target critic to ignore observations[0]. Otherwise, the
# target_critic will be unrolled for one more timestep than critic.
# The smaller the sequence length, the more problematic this is: if
# you use RNN on sequences of length 2, you would expect the code to
# never use recurrent connections. But if you don't skip updating the
# target_critic_state on observation[0] here, it won't be the case.
_, target_critic_state = self._target_critic_network(
o_tm1, a_tm1, target_critic_state)
# ========================= Critic learning ============================
q_tm1, next_critic_state = self._critic_network(o_tm1, a_tm1,
critic_state)
target_action_distribution, target_policy_state = self._target_policy_network(
o_t, target_policy_state)
sampled_actions_t = target_action_distribution.sample(
self._num_action_samples_td_learning)
# [N, B, ...]
tiled_o_t = tf2_utils.tile_nested(
o_t, self._num_action_samples_td_learning)
tiled_target_critic_state = tf2_utils.tile_nested(
target_critic_state, self._num_action_samples_td_learning)
# Compute the target critic's Q-value of the sampled actions.
sampled_q_t, _ = snt.BatchApply(self._target_critic_network)(
tiled_o_t, sampled_actions_t, tiled_target_critic_state)
# Compute average logits by first reshaping them to [N, B, A] and then
# normalizing them across atoms.
new_shape = [self._num_action_samples_td_learning, r_t.shape[0], -1]
sampled_logits = tf.reshape(sampled_q_t.logits, new_shape)
sampled_logprobs = tf.math.log_softmax(sampled_logits, axis=-1)
averaged_logits = tf.reduce_logsumexp(sampled_logprobs, axis=0)
# Construct the expected distributional value for bootstrapping.
q_t = networks.DiscreteValuedDistribution(
values=sampled_q_t.values, logits=averaged_logits)
critic_loss_t = losses.categorical(q_tm1, r_t, discount * d_t, q_t)
critic_loss_t = tf.reduce_mean(critic_loss_t)
# ========================= Actor learning =============================
action_distribution_tm1, policy_state = self._policy_network(
o_tm1, policy_state)
q_tm1_mean = q_tm1.mean()
# Compute the estimate of the value function based on
# self._num_action_samples_policy_weight samples from the policy.
tiled_o_tm1 = tf2_utils.tile_nested(
o_tm1, self._num_action_samples_policy_weight)
tiled_critic_state = tf2_utils.tile_nested(
critic_state, self._num_action_samples_policy_weight)
action_tm1 = action_distribution_tm1.sample(
self._num_action_samples_policy_weight)
tiled_z_tm1, _ = snt.BatchApply(self._critic_network)(
tiled_o_tm1, action_tm1, tiled_critic_state)
tiled_v_tm1 = tf.reshape(tiled_z_tm1.mean(),
[self._num_action_samples_policy_weight, -1])
# Use mean, min, or max to aggregate Q(s, a_i), a_i ~ pi(s) into the
# final estimate of the value function.
if self._baseline_reduce_function == 'mean':
v_tm1_estimate = tf.reduce_mean(tiled_v_tm1, axis=0)
elif self._baseline_reduce_function == 'max':
v_tm1_estimate = tf.reduce_max(tiled_v_tm1, axis=0)
elif self._baseline_reduce_function == 'min':
v_tm1_estimate = tf.reduce_min(tiled_v_tm1, axis=0)
# Assert that action_distribution_tm1 is a batch of multivariate
# distributions (in contrast to e.g. a [batch, action_size] collection
# of 1d distributions).
assert len(action_distribution_tm1.batch_shape) == 1
policy_loss_batch = -action_distribution_tm1.log_prob(a_tm1)
advantage = q_tm1_mean - v_tm1_estimate
if self._policy_improvement_modes == 'exp':
policy_loss_coef_t = tf.math.minimum(
tf.math.exp(advantage / self._beta), self._ratio_upper_bound)
elif self._policy_improvement_modes == 'binary':
policy_loss_coef_t = tf.cast(advantage > 0, dtype=dtype)
elif self._policy_improvement_modes == 'all':
# Regress against all actions (effectively pure BC).
policy_loss_coef_t = 1.
policy_loss_coef_t = tf.stop_gradient(policy_loss_coef_t)
policy_loss_batch *= policy_loss_coef_t
policy_loss_t = tf.reduce_mean(policy_loss_batch)
critic_state = next_critic_state
critic_loss += critic_loss_t
policy_loss += policy_loss_t
policy_loss_coef += tf.reduce_mean(policy_loss_coef_t) # For logging.
# Divide by sequence length to get mean losses.
critic_loss /= tf.cast(self._sequence_length, dtype=dtype)
policy_loss /= tf.cast(self._sequence_length, dtype=dtype)
policy_loss_coef /= tf.cast(self._sequence_length, dtype=dtype)
# Compute gradients.
critic_gradients = tape.gradient(critic_loss,
self._critic_network.trainable_variables)
policy_gradients = tape.gradient(policy_loss,
self._policy_network.trainable_variables)
# Delete the tape manually because of the persistent=True flag.
del tape
# Sync gradients across GPUs or TPUs.
ctx = tf.distribute.get_replica_context()
critic_gradients = ctx.all_reduce('mean', critic_gradients)
policy_gradients = ctx.all_reduce('mean', policy_gradients)
# Maybe clip gradients.
if self._clipping:
policy_gradients = tf.clip_by_global_norm(policy_gradients, 40.)[0]
critic_gradients = tf.clip_by_global_norm(critic_gradients, 40.)[0]
# Apply gradients.
self._critic_optimizer.apply(critic_gradients,
self._critic_network.trainable_variables)
self._policy_optimizer.apply(policy_gradients,
self._policy_network.trainable_variables)
source_variables = (
self._critic_network.variables + self._policy_network.variables)
target_variables = (
self._target_critic_network.variables +
self._target_policy_network.variables)
# Make online -> target network update ops.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(source_variables, target_variables):
dest.assign(src)
self._num_steps.assign_add(1)
return {
'critic_loss': critic_loss,
'policy_loss': policy_loss,
'policy_loss_coef': policy_loss_coef,
}
@tf.function
def _replicated_step(self) -> Dict[str, tf.Tensor]:
sample = next(self._iterator)
fetches = self._accelerator_strategy.run(self._step, args=(sample,))
mean = tf.distribute.ReduceOp.MEAN
return {
k: self._accelerator_strategy.reduce(mean, fetches[k], axis=None)
for k in fetches
}
def step(self):
# Run the learning step.
with self._accelerator_strategy.scope():
fetches = self._replicated_step()
# Update our counts and record it.
new_timestamp = time.time()
time_passed = new_timestamp - self._walltime_timestamp
self._walltime_timestamp = new_timestamp
counts = self._counter.increment(steps=1, wall_time=time_passed)
fetches.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
self._snapshotter.save()
self._logger.write(fetches)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables[name]) for name in names]
|
acme-master
|
acme/agents/tf/crr/recurrent_learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a CRR agent."""
from acme.agents.tf.crr.recurrent_learning import RCRRLearner
|
acme-master
|
acme/agents/tf/crr/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the distributed MCTS agent topology via Launchpad."""
from typing import Callable, Optional
import acme
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents.tf.mcts import acting
from acme.agents.tf.mcts import learning
from acme.agents.tf.mcts import models
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
from acme.utils import counting
from acme.utils import loggers
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
class DistributedMCTS:
"""Distributed MCTS agent."""
def __init__(
self,
environment_factory: Callable[[], dm_env.Environment],
network_factory: Callable[[specs.DiscreteArray], snt.Module],
model_factory: Callable[[specs.EnvironmentSpec], models.Model],
num_actors: int,
num_simulations: int = 50,
batch_size: int = 256,
prefetch_size: int = 4,
target_update_period: int = 100,
samples_per_insert: float = 32.0,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
importance_sampling_exponent: float = 0.2,
priority_exponent: float = 0.6,
n_step: int = 5,
learning_rate: float = 1e-3,
discount: float = 0.99,
environment_spec: Optional[specs.EnvironmentSpec] = None,
save_logs: bool = False,
variable_update_period: int = 1000,
):
if environment_spec is None:
environment_spec = specs.make_environment_spec(environment_factory())
# These 'factories' create the relevant components on the workers.
self._environment_factory = environment_factory
self._network_factory = network_factory
self._model_factory = model_factory
# Internalize hyperparameters.
self._num_actors = num_actors
self._num_simulations = num_simulations
self._env_spec = environment_spec
self._batch_size = batch_size
self._prefetch_size = prefetch_size
self._target_update_period = target_update_period
self._samples_per_insert = samples_per_insert
self._min_replay_size = min_replay_size
self._max_replay_size = max_replay_size
self._importance_sampling_exponent = importance_sampling_exponent
self._priority_exponent = priority_exponent
self._n_step = n_step
self._learning_rate = learning_rate
self._discount = discount
self._save_logs = save_logs
self._variable_update_period = variable_update_period
def replay(self):
"""The replay storage worker."""
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._min_replay_size,
samples_per_insert=self._samples_per_insert,
error_buffer=self._batch_size)
extra_spec = {
'pi':
specs.Array(
shape=(self._env_spec.actions.num_values,), dtype='float32')
}
signature = adders.NStepTransitionAdder.signature(self._env_spec,
extra_spec)
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._max_replay_size,
rate_limiter=limiter,
signature=signature)
return [replay_table]
def learner(self, replay: reverb.Client, counter: counting.Counter):
"""The learning part of the agent."""
# Create the networks.
network = self._network_factory(self._env_spec.actions)
tf2_utils.create_variables(network, [self._env_spec.observations])
# The dataset object to learn from.
dataset = datasets.make_reverb_dataset(
server_address=replay.server_address,
batch_size=self._batch_size,
prefetch_size=self._prefetch_size)
# Create the optimizer.
optimizer = snt.optimizers.Adam(self._learning_rate)
# Return the learning agent.
return learning.AZLearner(
network=network,
discount=self._discount,
dataset=dataset,
optimizer=optimizer,
counter=counter,
)
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
) -> acme.EnvironmentLoop:
"""The actor process."""
# Build environment, model, network.
environment = self._environment_factory()
network = self._network_factory(self._env_spec.actions)
model = self._model_factory(self._env_spec)
# Create variable client for communicating with the learner.
tf2_utils.create_variables(network, [self._env_spec.observations])
variable_client = tf2_variable_utils.VariableClient(
client=variable_source,
variables={'network': network.trainable_variables},
update_period=self._variable_update_period)
# Component to add things into replay.
adder = adders.NStepTransitionAdder(
client=replay,
n_step=self._n_step,
discount=self._discount,
)
# Create the agent.
actor = acting.MCTSActor(
environment_spec=self._env_spec,
model=model,
network=network,
discount=self._discount,
adder=adder,
variable_client=variable_client,
num_simulations=self._num_simulations,
)
# Create the loop to connect environment and agent.
return acme.EnvironmentLoop(environment, actor, counter)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
):
"""The evaluation process."""
# Build environment, model, network.
environment = self._environment_factory()
network = self._network_factory(self._env_spec.actions)
model = self._model_factory(self._env_spec)
# Create variable client for communicating with the learner.
tf2_utils.create_variables(network, [self._env_spec.observations])
variable_client = tf2_variable_utils.VariableClient(
client=variable_source,
variables={'policy': network.trainable_variables},
update_period=self._variable_update_period)
# Create the agent.
actor = acting.MCTSActor(
environment_spec=self._env_spec,
model=model,
network=network,
discount=self._discount,
variable_client=variable_client,
num_simulations=self._num_simulations,
)
# Create the run loop and return it.
logger = loggers.make_default_logger('evaluator')
return acme.EnvironmentLoop(
environment, actor, counter=counter, logger=logger)
def build(self, name='MCTS'):
"""Builds the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay), label='replay')
with program.group('counter'):
counter = program.add_node(
lp.CourierNode(counting.Counter), label='counter')
with program.group('learner'):
learner = program.add_node(
lp.CourierNode(self.learner, replay, counter), label='learner')
with program.group('evaluator'):
program.add_node(
lp.CourierNode(self.evaluator, learner, counter), label='evaluator')
with program.group('actor'):
program.add_node(
lp.CourierNode(self.actor, replay, learner, counter), label='actor')
return program
|
acme-master
|
acme/agents/tf/mcts/agent_distributed.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Monte-Carlo tree search (MCTS) agent."""
from acme.agents.tf.mcts.agent import MCTS
from acme.agents.tf.mcts.agent_distributed import DistributedMCTS
|
acme-master
|
acme/agents/tf/mcts/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Type aliases and assumptions that are specific to the MCTS agent."""
from typing import Callable, Tuple, Union
import numpy as np
# pylint: disable=invalid-name
# Assumption: actions are scalar and discrete (integral).
Action = Union[int, np.int32, np.int64]
# Assumption: observations are array-like.
Observation = np.ndarray
# Assumption: rewards and discounts are scalar.
Reward = Union[float, np.float32, np.float64]
Discount = Union[float, np.float32, np.float64]
# Notation: policy logits/probabilities are simply a vector of floats.
Probs = np.ndarray
# Notation: the value function is scalar-valued.
Value = float
# Notation: the 'evaluation function' maps observations -> (probs, value).
EvaluationFn = Callable[[Observation], Tuple[Probs, Value]]
|
acme-master
|
acme/agents/tf/mcts/types.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the MCTS agent."""
import acme
from acme import specs
from acme.agents.tf import mcts
from acme.agents.tf.mcts.models import simulator
from acme.testing import fakes
from acme.tf import networks
import numpy as np
import sonnet as snt
from absl.testing import absltest
class MCTSTest(absltest.TestCase):
def test_mcts(self):
# Create a fake environment to test with.
num_actions = 5
environment = fakes.DiscreteEnvironment(
num_actions=num_actions,
num_observations=10,
obs_dtype=np.float32,
episode_length=10)
spec = specs.make_environment_spec(environment)
network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP([50, 50]),
networks.PolicyValueHead(spec.actions.num_values),
])
model = simulator.Simulator(environment)
optimizer = snt.optimizers.Adam(1e-3)
# Construct the agent.
agent = mcts.MCTS(
environment_spec=spec,
network=network,
model=model,
optimizer=optimizer,
n_step=1,
discount=1.,
replay_capacity=100,
num_simulations=10,
batch_size=10)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=2)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/mcts/agent_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A single-process MCTS agent."""
from acme import datasets
from acme import specs
from acme.adders import reverb as adders
from acme.agents import agent
from acme.agents.tf.mcts import acting
from acme.agents.tf.mcts import learning
from acme.agents.tf.mcts import models
from acme.tf import utils as tf2_utils
import numpy as np
import reverb
import sonnet as snt
class MCTS(agent.Agent):
"""A single-process MCTS agent."""
def __init__(
self,
network: snt.Module,
model: models.Model,
optimizer: snt.Optimizer,
n_step: int,
discount: float,
replay_capacity: int,
num_simulations: int,
environment_spec: specs.EnvironmentSpec,
batch_size: int,
):
extra_spec = {
'pi':
specs.Array(
shape=(environment_spec.actions.num_values,), dtype=np.float32)
}
# Create a replay server for storing transitions.
replay_table = reverb.Table(
name=adders.DEFAULT_PRIORITY_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=replay_capacity,
rate_limiter=reverb.rate_limiters.MinSize(1),
signature=adders.NStepTransitionAdder.signature(
environment_spec, extra_spec))
self._server = reverb.Server([replay_table], port=None)
# The adder is used to insert observations into replay.
address = f'localhost:{self._server.port}'
adder = adders.NStepTransitionAdder(
client=reverb.Client(address),
n_step=n_step,
discount=discount)
# The dataset provides an interface to sample from replay.
dataset = datasets.make_reverb_dataset(server_address=address)
dataset = dataset.batch(batch_size, drop_remainder=True)
tf2_utils.create_variables(network, [environment_spec.observations])
# Now create the agent components: actor & learner.
actor = acting.MCTSActor(
environment_spec=environment_spec,
model=model,
network=network,
discount=discount,
adder=adder,
num_simulations=num_simulations,
)
learner = learning.AZLearner(
network=network,
optimizer=optimizer,
dataset=dataset,
discount=discount,
)
# The parent class combines these together into one 'agent'.
super().__init__(
actor=actor,
learner=learner,
min_observations=10,
observations_per_step=1,
)
|
acme-master
|
acme/agents/tf/mcts/agent.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for search.py."""
from typing import Text
from acme.agents.tf.mcts import search
from acme.agents.tf.mcts.models import simulator
from bsuite.environments import catch
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
class TestSearch(parameterized.TestCase):
@parameterized.parameters([
'puct',
'bfs',
])
def test_catch(self, policy_type: Text):
env = catch.Catch(rows=2, seed=1)
num_actions = env.action_spec().num_values
model = simulator.Simulator(env)
eval_fn = lambda _: (np.ones(num_actions) / num_actions, 0.)
timestep = env.reset()
model.reset()
search_policy = search.bfs if policy_type == 'bfs' else search.puct
root = search.mcts(
observation=timestep.observation,
model=model,
search_policy=search_policy,
evaluation=eval_fn,
num_simulations=100,
num_actions=num_actions)
values = np.array([c.value for c in root.children.values()])
best_action = search.argmax(values)
if env._paddle_x > env._ball_x:
self.assertEqual(best_action, 0)
if env._paddle_x == env._ball_x:
self.assertEqual(best_action, 1)
if env._paddle_x < env._ball_x:
self.assertEqual(best_action, 2)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/mcts/search_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Monte Carlo Tree Search implementation."""
import dataclasses
from typing import Callable, Dict
from acme.agents.tf.mcts import models
from acme.agents.tf.mcts import types
import numpy as np
@dataclasses.dataclass
class Node:
"""A MCTS node."""
reward: float = 0.
visit_count: int = 0
terminal: bool = False
prior: float = 1.
total_value: float = 0.
children: Dict[types.Action, 'Node'] = dataclasses.field(default_factory=dict)
def expand(self, prior: np.ndarray):
"""Expands this node, adding child nodes."""
assert prior.ndim == 1 # Prior should be a flat vector.
for a, p in enumerate(prior):
self.children[a] = Node(prior=p)
@property
def value(self) -> types.Value: # Q(s, a)
"""Returns the value from this node."""
if self.visit_count:
return self.total_value / self.visit_count
return 0.
@property
def children_visits(self) -> np.ndarray:
"""Return array of visit counts of visited children."""
return np.array([c.visit_count for c in self.children.values()])
@property
def children_values(self) -> np.ndarray:
"""Return array of values of visited children."""
return np.array([c.value for c in self.children.values()])
SearchPolicy = Callable[[Node], types.Action]
def mcts(
observation: types.Observation,
model: models.Model,
search_policy: SearchPolicy,
evaluation: types.EvaluationFn,
num_simulations: int,
num_actions: int,
discount: float = 1.,
dirichlet_alpha: float = 1,
exploration_fraction: float = 0.,
) -> Node:
"""Does Monte Carlo tree search (MCTS), AlphaZero style."""
# Evaluate the prior policy for this state.
prior, value = evaluation(observation)
assert prior.shape == (num_actions,)
# Add exploration noise to the prior.
noise = np.random.dirichlet(alpha=[dirichlet_alpha] * num_actions)
prior = prior * (1 - exploration_fraction) + noise * exploration_fraction
# Create a fresh tree search.
root = Node()
root.expand(prior)
# Save the model state so that we can reset it for each simulation.
model.save_checkpoint()
for _ in range(num_simulations):
# Start a new simulation from the top.
trajectory = [root]
node = root
# Generate a trajectory.
timestep = None
while node.children:
# Select an action according to the search policy.
action = search_policy(node)
# Point the node at the corresponding child.
node = node.children[action]
# Step the simulator and add this timestep to the node.
timestep = model.step(action)
node.reward = timestep.reward or 0.
node.terminal = timestep.last()
trajectory.append(node)
if timestep is None:
raise ValueError('Generated an empty rollout; this should not happen.')
# Calculate the bootstrap for leaf nodes.
if node.terminal:
# If terminal, there is no bootstrap value.
value = 0.
else:
# Otherwise, bootstrap from this node with our value function.
prior, value = evaluation(timestep.observation)
# We also want to expand this node for next time.
node.expand(prior)
# Load the saved model state.
model.load_checkpoint()
# Monte Carlo back-up with bootstrap from value function.
ret = value
while trajectory:
# Pop off the latest node in the trajectory.
node = trajectory.pop()
# Accumulate the discounted return
ret *= discount
ret += node.reward
# Update the node.
node.total_value += ret
node.visit_count += 1
return root
def bfs(node: Node) -> types.Action:
"""Breadth-first search policy."""
visit_counts = np.array([c.visit_count for c in node.children.values()])
return argmax(-visit_counts)
def puct(node: Node, ucb_scaling: float = 1.) -> types.Action:
"""PUCT search policy, i.e. UCT with 'prior' policy."""
# Action values Q(s,a).
value_scores = np.array([child.value for child in node.children.values()])
check_numerics(value_scores)
# Policy prior P(s,a).
priors = np.array([child.prior for child in node.children.values()])
check_numerics(priors)
# Visit ratios.
visit_ratios = np.array([
np.sqrt(node.visit_count) / (child.visit_count + 1)
for child in node.children.values()
])
check_numerics(visit_ratios)
# Combine.
puct_scores = value_scores + ucb_scaling * priors * visit_ratios
return argmax(puct_scores)
def visit_count_policy(root: Node, temperature: float = 1.) -> types.Probs:
"""Probability weighted by visit^{1/temp} of children nodes."""
visits = root.children_visits
if np.sum(visits) == 0: # uniform policy for zero visits
visits += 1
rescaled_visits = visits**(1 / temperature)
probs = rescaled_visits / np.sum(rescaled_visits)
check_numerics(probs)
return probs
def argmax(values: np.ndarray) -> types.Action:
"""Argmax with random tie-breaking."""
check_numerics(values)
max_value = np.max(values)
return np.int32(np.random.choice(np.flatnonzero(values == max_value)))
def check_numerics(values: np.ndarray):
"""Raises a ValueError if any of the inputs are NaN or Inf."""
if not np.isfinite(values).all():
raise ValueError('check_numerics failed. Inputs: {}. '.format(values))
|
acme-master
|
acme/agents/tf/mcts/search.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A MCTS "AlphaZero-style" learner."""
from typing import List, Optional
import acme
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow as tf
class AZLearner(acme.Learner):
"""AlphaZero-style learning."""
def __init__(
self,
network: snt.Module,
optimizer: snt.Optimizer,
dataset: tf.data.Dataset,
discount: float,
logger: Optional[loggers.Logger] = None,
counter: Optional[counting.Counter] = None,
):
# Logger and counter for tracking statistics / writing out to terminal.
self._counter = counting.Counter(counter, 'learner')
self._logger = logger or loggers.TerminalLogger('learner', time_delta=30.)
# Internalize components.
# TODO(b/155086959): Fix type stubs and remove.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
self._optimizer = optimizer
self._network = network
self._variables = network.trainable_variables
self._discount = np.float32(discount)
@tf.function
def _step(self) -> tf.Tensor:
"""Do a step of SGD on the loss."""
inputs = next(self._iterator)
o_t, _, r_t, d_t, o_tp1, extras = inputs.data
pi_t = extras['pi']
with tf.GradientTape() as tape:
# Forward the network on the two states in the transition.
logits, value = self._network(o_t)
_, target_value = self._network(o_tp1)
target_value = tf.stop_gradient(target_value)
# Value loss is simply on-policy TD learning.
value_loss = tf.square(r_t + self._discount * d_t * target_value - value)
# Policy loss distills MCTS policy into the policy network.
policy_loss = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=pi_t)
# Compute gradients.
loss = tf.reduce_mean(value_loss + policy_loss)
gradients = tape.gradient(loss, self._network.trainable_variables)
self._optimizer.apply(gradients, self._network.trainable_variables)
return loss
def step(self):
"""Does a step of SGD and logs the results."""
loss = self._step()
self._logger.write({'loss': loss})
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
"""Exposes the variables for actors to update from."""
return tf2_utils.to_numpy(self._variables)
|
acme-master
|
acme/agents/tf/mcts/learning.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A MCTS actor."""
from typing import Optional, Tuple
import acme
from acme import adders
from acme import specs
from acme.agents.tf.mcts import models
from acme.agents.tf.mcts import search
from acme.agents.tf.mcts import types
from acme.tf import variable_utils as tf2_variable_utils
import dm_env
import numpy as np
from scipy import special
import sonnet as snt
import tensorflow as tf
class MCTSActor(acme.Actor):
"""Executes a policy- and value-network guided MCTS search."""
_prev_timestep: dm_env.TimeStep
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
model: models.Model,
network: snt.Module,
discount: float,
num_simulations: int,
adder: Optional[adders.Adder] = None,
variable_client: Optional[tf2_variable_utils.VariableClient] = None,
):
# Internalize components: model, network, data sink and variable source.
self._model = model
self._network = tf.function(network)
self._variable_client = variable_client
self._adder = adder
# Internalize hyperparameters.
self._num_actions = environment_spec.actions.num_values
self._num_simulations = num_simulations
self._actions = list(range(self._num_actions))
self._discount = discount
# We need to save the policy so as to add it to replay on the next step.
self._probs = np.ones(
shape=(self._num_actions,), dtype=np.float32) / self._num_actions
def _forward(
self, observation: types.Observation) -> Tuple[types.Probs, types.Value]:
"""Performs a forward pass of the policy-value network."""
logits, value = self._network(tf.expand_dims(observation, axis=0))
# Convert to numpy & take softmax.
logits = logits.numpy().squeeze(axis=0)
value = value.numpy().item()
probs = special.softmax(logits)
return probs, value
def select_action(self, observation: types.Observation) -> types.Action:
"""Computes the agent's policy via MCTS."""
if self._model.needs_reset:
self._model.reset(observation)
# Compute a fresh MCTS plan.
root = search.mcts(
observation,
model=self._model,
search_policy=search.puct,
evaluation=self._forward,
num_simulations=self._num_simulations,
num_actions=self._num_actions,
discount=self._discount,
)
# The agent's policy is softmax w.r.t. the *visit counts* as in AlphaZero.
probs = search.visit_count_policy(root)
action = np.int32(np.random.choice(self._actions, p=probs))
# Save the policy probs so that we can add them to replay in `observe()`.
self._probs = probs.astype(np.float32)
return action
def update(self, wait: bool = False):
"""Fetches the latest variables from the variable source, if needed."""
if self._variable_client:
self._variable_client.update(wait)
def observe_first(self, timestep: dm_env.TimeStep):
self._prev_timestep = timestep
if self._adder:
self._adder.add_first(timestep)
def observe(self, action: types.Action, next_timestep: dm_env.TimeStep):
"""Updates the agent's internal model and adds the transition to replay."""
self._model.update(self._prev_timestep, action, next_timestep)
self._prev_timestep = next_timestep
if self._adder:
self._adder.add(action, next_timestep, extras={'pi': self._probs})
|
acme-master
|
acme/agents/tf/mcts/acting.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for planning via MCTS."""
# pylint: disable=unused-import
from acme.agents.tf.mcts.models.base import Model
|
acme-master
|
acme/agents/tf/mcts/models/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple (deterministic) environment transition model from pixels."""
from typing import Optional, Tuple
from acme import specs
from acme.agents.tf.mcts import types
from acme.agents.tf.mcts.models import base
from acme.tf import utils as tf2_utils
from bsuite.baselines.utils import replay
import dm_env
import numpy as np
from scipy import special
import sonnet as snt
import tensorflow as tf
class MLPTransitionModel(snt.Module):
"""This uses MLPs to model (s, a) -> (r, d, s')."""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
hidden_sizes: Tuple[int, ...],
):
super(MLPTransitionModel, self).__init__(name='mlp_transition_model')
# Get num actions/observation shape.
self._num_actions = environment_spec.actions.num_values
self._input_shape = environment_spec.observations.shape
self._flat_shape = int(np.prod(self._input_shape))
# Prediction networks.
self._state_network = snt.Sequential([
snt.nets.MLP(hidden_sizes + (self._flat_shape,)),
snt.Reshape(self._input_shape)
])
self._reward_network = snt.Sequential([
snt.nets.MLP(hidden_sizes + (1,)),
lambda r: tf.squeeze(r, axis=-1),
])
self._discount_network = snt.Sequential([
snt.nets.MLP(hidden_sizes + (1,)),
lambda d: tf.squeeze(d, axis=-1),
])
def __call__(self, state: tf.Tensor,
action: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
embedded_state = snt.Flatten()(state)
embedded_action = tf.one_hot(action, depth=self._num_actions)
embedding = tf.concat([embedded_state, embedded_action], axis=-1)
# Predict the next state, reward, and termination.
next_state = self._state_network(embedding)
reward = self._reward_network(embedding)
discount_logits = self._discount_network(embedding)
return next_state, reward, discount_logits
class MLPModel(base.Model):
"""A simple environment model."""
_checkpoint: types.Observation
_state: types.Observation
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
replay_capacity: int,
batch_size: int,
hidden_sizes: Tuple[int, ...],
learning_rate: float = 1e-3,
terminal_tol: float = 1e-3,
):
self._obs_spec = environment_spec.observations
self._action_spec = environment_spec.actions
# Hyperparameters.
self._batch_size = batch_size
self._terminal_tol = terminal_tol
# Modelling
self._replay = replay.Replay(replay_capacity)
self._transition_model = MLPTransitionModel(environment_spec, hidden_sizes)
self._optimizer = snt.optimizers.Adam(learning_rate)
self._forward = tf.function(self._transition_model)
tf2_utils.create_variables(
self._transition_model, [self._obs_spec, self._action_spec])
self._variables = self._transition_model.trainable_variables
# Model state.
self._needs_reset = True
@tf.function
def _step(
self,
o_t: tf.Tensor,
a_t: tf.Tensor,
r_t: tf.Tensor,
d_t: tf.Tensor,
o_tp1: tf.Tensor,
) -> tf.Tensor:
with tf.GradientTape() as tape:
next_state, reward, discount = self._transition_model(o_t, a_t)
state_loss = tf.square(next_state - o_tp1)
reward_loss = tf.square(reward - r_t)
discount_loss = tf.nn.sigmoid_cross_entropy_with_logits(d_t, discount)
loss = sum([
tf.reduce_mean(state_loss),
tf.reduce_mean(reward_loss),
tf.reduce_mean(discount_loss),
])
gradients = tape.gradient(loss, self._variables)
self._optimizer.apply(gradients, self._variables)
return loss
def step(self, action: types.Action):
# Reset if required.
if self._needs_reset:
raise ValueError('Model must be reset with an initial timestep.')
# Step the model.
state, action = tf2_utils.add_batch_dim([self._state, action])
new_state, reward, discount_logits = [
x.numpy().squeeze(axis=0) for x in self._forward(state, action)
]
discount = special.softmax(discount_logits)
# Save the resulting state for the next step.
self._state = new_state
# We threshold discount on a given tolerance.
if discount < self._terminal_tol:
self._needs_reset = True
return dm_env.termination(reward=reward, observation=self._state.copy())
return dm_env.transition(reward=reward, observation=self._state.copy())
def reset(self, initial_state: Optional[types.Observation] = None):
if initial_state is None:
raise ValueError('Model must be reset with an initial state.')
# We reset to an initial state that we are explicitly given.
# This allows us to handle environments with stochastic resets (e.g. Catch).
self._state = initial_state.copy()
self._needs_reset = False
return dm_env.restart(self._state)
def update(
self,
timestep: dm_env.TimeStep,
action: types.Action,
next_timestep: dm_env.TimeStep,
) -> dm_env.TimeStep:
# Add the true transition to replay.
transition = [
timestep.observation,
action,
next_timestep.reward,
next_timestep.discount,
next_timestep.observation,
]
self._replay.add(transition)
# Step the model to generate a synthetic transition.
ts = self.step(action)
# Copy the *true* state on update.
self._state = next_timestep.observation.copy()
if ts.last() or next_timestep.last():
# Model believes that a termination has happened.
# This will result in a crash during planning if the true environment
# didn't terminate here as well. So, we indicate that we need a reset.
self._needs_reset = True
# Sample from replay and do SGD.
if self._replay.size >= self._batch_size:
batch = self._replay.sample(self._batch_size)
self._step(*batch)
return ts
def save_checkpoint(self):
if self._needs_reset:
raise ValueError('Cannot save checkpoint: model must be reset first.')
self._checkpoint = self._state.copy()
def load_checkpoint(self):
self._needs_reset = False
self._state = self._checkpoint.copy()
def action_spec(self):
return self._action_spec
def observation_spec(self):
return self._obs_spec
@property
def needs_reset(self) -> bool:
return self._needs_reset
|
acme-master
|
acme/agents/tf/mcts/models/mlp.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simulator model, which wraps a copy of the true environment."""
import copy
import dataclasses
from acme.agents.tf.mcts import types
from acme.agents.tf.mcts.models import base
import dm_env
@dataclasses.dataclass
class Checkpoint:
"""Holds the checkpoint state for the environment simulator."""
needs_reset: bool
environment: dm_env.Environment
class Simulator(base.Model):
"""A simulator model, which wraps a copy of the true environment.
Assumptions:
- The environment (including RNG) is fully copyable via `deepcopy`.
- Environment dynamics (modulo episode resets) are deterministic.
"""
_checkpoint: Checkpoint
_env: dm_env.Environment
def __init__(self, env: dm_env.Environment):
# Make a 'checkpoint' copy env to save/load from when doing rollouts.
self._env = copy.deepcopy(env)
self._needs_reset = True
self.save_checkpoint()
def update(
self,
timestep: dm_env.TimeStep,
action: types.Action,
next_timestep: dm_env.TimeStep,
) -> dm_env.TimeStep:
# Call update() once per 'real' experience to keep this env in sync.
return self.step(action)
def save_checkpoint(self):
self._checkpoint = Checkpoint(
needs_reset=self._needs_reset,
environment=copy.deepcopy(self._env),
)
def load_checkpoint(self):
self._env = copy.deepcopy(self._checkpoint.environment)
self._needs_reset = self._checkpoint.needs_reset
def step(self, action: types.Action) -> dm_env.TimeStep:
if self._needs_reset:
raise ValueError('This model needs to be explicitly reset.')
timestep = self._env.step(action)
self._needs_reset = timestep.last()
return timestep
def reset(self, *unused_args, **unused_kwargs):
self._needs_reset = False
return self._env.reset()
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
@property
def needs_reset(self) -> bool:
return self._needs_reset
|
acme-master
|
acme/agents/tf/mcts/models/simulator.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base model class, specifying the interface.."""
import abc
from typing import Optional
from acme.agents.tf.mcts import types
import dm_env
class Model(dm_env.Environment, abc.ABC):
"""Base (abstract) class for models used for planning via MCTS."""
@abc.abstractmethod
def load_checkpoint(self):
"""Loads a saved model state, if it exists."""
@abc.abstractmethod
def save_checkpoint(self):
"""Saves the model state so that we can reset it after a rollout."""
@abc.abstractmethod
def update(
self,
timestep: dm_env.TimeStep,
action: types.Action,
next_timestep: dm_env.TimeStep,
) -> dm_env.TimeStep:
"""Updates the model given an observation, action, reward, and discount."""
@abc.abstractmethod
def reset(self, initial_state: Optional[types.Observation] = None):
"""Resets the model, optionally to an initial state."""
@property
@abc.abstractmethod
def needs_reset(self) -> bool:
"""Returns whether or not the model needs to be reset."""
|
acme-master
|
acme/agents/tf/mcts/models/base.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for simulator.py."""
from acme.agents.tf.mcts.models import simulator
from bsuite.environments import catch
import dm_env
import numpy as np
from absl.testing import absltest
class SimulatorTest(absltest.TestCase):
def _check_equal(self, a: dm_env.TimeStep, b: dm_env.TimeStep):
self.assertEqual(a.reward, b.reward)
self.assertEqual(a.discount, b.discount)
self.assertEqual(a.step_type, b.step_type)
np.testing.assert_array_equal(a.observation, b.observation)
def test_simulator_fidelity(self):
"""Tests whether the simulator match the ground truth."""
# Given an environment.
env = catch.Catch()
# If we instantiate a simulator 'model' of this environment.
model = simulator.Simulator(env)
# Then the model and environment should always agree as we step them.
num_actions = env.action_spec().num_values
for _ in range(10):
true_timestep = env.reset()
self.assertTrue(model.needs_reset)
model_timestep = model.reset()
self.assertFalse(model.needs_reset)
self._check_equal(true_timestep, model_timestep)
while not true_timestep.last():
action = np.random.randint(num_actions)
true_timestep = env.step(action)
model_timestep = model.step(action)
self._check_equal(true_timestep, model_timestep)
def test_checkpointing(self):
"""Tests whether checkpointing restores the state correctly."""
# Given an environment, and a model based on this environment.
model = simulator.Simulator(catch.Catch())
num_actions = model.action_spec().num_values
model.reset()
# Now, we save a checkpoint.
model.save_checkpoint()
ts = model.step(1)
# Step the model once and load the checkpoint.
timestep = model.step(np.random.randint(num_actions))
model.load_checkpoint()
self._check_equal(ts, model.step(1))
while not timestep.last():
timestep = model.step(np.random.randint(num_actions))
# The model should require a reset.
self.assertTrue(model.needs_reset)
# Once we load checkpoint, the model should no longer require reset.
model.load_checkpoint()
self.assertFalse(model.needs_reset)
# Further steps should agree with the original environment state.
self._check_equal(ts, model.step(1))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/agents/tf/mcts/models/simulator_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility definitions for Acme experiments."""
from typing import Optional
from acme.utils import loggers
def make_experiment_logger(label: str,
steps_key: Optional[str] = None,
task_instance: int = 0) -> loggers.Logger:
del task_instance
if steps_key is None:
steps_key = f'{label}_steps'
return loggers.make_default_logger(label=label, steps_key=steps_key)
def create_experiment_logger_factory() -> loggers.LoggerFactory:
return make_experiment_logger
|
acme-master
|
acme/utils/experiment_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module does nothing and exists solely for the sake of OS compatibility."""
from typing import Type, TypeVar
T = TypeVar('T')
def record_class_usage(cls: Type[T]) -> Type[T]:
return cls
|
acme-master
|
acme/utils/metrics.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for handling signals."""
import contextlib
import ctypes
import threading
from typing import Any, Callable, Optional
import launchpad
_Handler = Callable[[], Any]
@contextlib.contextmanager
def runtime_terminator(callback: Optional[_Handler] = None):
"""Runtime terminator used for stopping computation upon agent termination.
Runtime terminator optionally executed a provided `callback` and then raises
`SystemExit` exception in the thread performing the computation.
Args:
callback: callback to execute before raising exception.
Yields:
None.
"""
worker_id = threading.get_ident()
def signal_handler():
if callback:
callback()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(worker_id), ctypes.py_object(SystemExit))
assert res < 2, 'Stopping worker failed'
launchpad.register_stop_handler(signal_handler)
yield
launchpad.unregister_stop_handler(signal_handler)
|
acme-master
|
acme/utils/signals.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for frozen_learner."""
from unittest import mock
import acme
from acme.utils import frozen_learner
from absl.testing import absltest
class FrozenLearnerTest(absltest.TestCase):
@mock.patch.object(acme, 'Learner', autospec=True)
def test_step_fn(self, mock_learner):
num_calls = 0
def step_fn():
nonlocal num_calls
num_calls += 1
learner = frozen_learner.FrozenLearner(mock_learner, step_fn=step_fn)
# Step two times.
learner.step()
learner.step()
self.assertEqual(num_calls, 2)
# step() method of the wrapped learner should not be called.
mock_learner.step.assert_not_called()
@mock.patch.object(acme, 'Learner', autospec=True)
def test_no_step_fn(self, mock_learner):
learner = frozen_learner.FrozenLearner(mock_learner)
learner.step()
# step() method of the wrapped learner should not be called.
mock_learner.step.assert_not_called()
@mock.patch.object(acme, 'Learner', autospec=True)
def test_save_and_restore(self, mock_learner):
learner = frozen_learner.FrozenLearner(mock_learner)
mock_learner.save.return_value = 'state1'
state = learner.save()
self.assertEqual(state, 'state1')
learner.restore('state2')
# State of the wrapped learner should be restored.
mock_learner.restore.assert_called_once_with('state2')
@mock.patch.object(acme, 'Learner', autospec=True)
def test_get_variables(self, mock_learner):
learner = frozen_learner.FrozenLearner(mock_learner)
mock_learner.get_variables.return_value = [1, 2]
variables = learner.get_variables(['a', 'b'])
# Values should match with those returned by the wrapped learner.
self.assertEqual(variables, [1, 2])
mock_learner.get_variables.assert_called_once_with(['a', 'b'])
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/frozen_learner_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme launchpad utilities."""
from acme.utils import lp_utils
from absl.testing import absltest
class LpUtilsTest(absltest.TestCase):
def test_partial_kwargs(self):
def foo(a, b, c=2):
return a, b, c
def bar(a, b):
return a, b
# Override the default values. The last two should be no-ops.
foo1 = lp_utils.partial_kwargs(foo, c=1)
foo2 = lp_utils.partial_kwargs(foo)
bar1 = lp_utils.partial_kwargs(bar)
# Check that we raise errors on overriding kwargs with no default values
with self.assertRaises(ValueError):
lp_utils.partial_kwargs(foo, a=2)
# CHeck the we raise if we try to override a kwarg that doesn't exist.
with self.assertRaises(ValueError):
lp_utils.partial_kwargs(foo, d=2)
# Make sure we get back the correct values.
self.assertEqual(foo1(1, 2), (1, 2, 1))
self.assertEqual(foo2(1, 2), (1, 2, 2))
self.assertEqual(bar1(1, 2), (1, 2))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/lp_utils_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filesystem path helpers."""
import os
import os.path
import shutil
import time
from typing import Optional, Tuple
from absl import flags
ACME_ID = flags.DEFINE_string('acme_id', None,
'Experiment identifier to use for Acme.')
def process_path(path: str,
*subpaths: str,
ttl_seconds: Optional[int] = None,
backups: Optional[bool] = None,
add_uid: bool = True) -> str:
"""Process the path string.
This will process the path string by running `os.path.expanduser` to replace
any initial "~". It will also append a unique string on the end of the path
and create the directories leading to this path if necessary.
Args:
path: string defining the path to process and create.
*subpaths: potential subpaths to include after uniqification.
ttl_seconds: ignored.
backups: ignored.
add_uid: Whether to add a unique directory identifier between `path` and
`subpaths`. If the `--acme_id` flag is set, will use that as the
identifier.
Returns:
the processed, expanded path string.
"""
del backups, ttl_seconds
path = os.path.expanduser(path)
if add_uid:
path = os.path.join(path, *get_unique_id())
path = os.path.join(path, *subpaths)
os.makedirs(path, exist_ok=True)
return path
_DATETIME = time.strftime('%Y%m%d-%H%M%S')
def get_unique_id() -> Tuple[str, ...]:
"""Makes a unique identifier for this process; override with --acme_id."""
# By default we'll use the global id.
identifier = _DATETIME
# If the --acme_id flag is given prefer that; ignore if flag processing has
# been skipped (this happens in colab or in tests).
try:
identifier = ACME_ID.value or identifier
except flags.UnparsedFlagAccessError:
pass
# Return as a tuple (for future proofing).
return (identifier,)
def rmdir(path: str):
"""Remove directory recursively."""
shutil.rmtree(path)
|
acme-master
|
acme/utils/paths.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to use within loggers."""
import queue
import threading
from typing import Callable, TypeVar, Generic
from absl import logging
E = TypeVar("E")
class AsyncExecutor(Generic[E]):
"""Executes a blocking function asynchronously on a queue of items."""
def __init__(
self,
fn: Callable[[E], None],
queue_size: int = 1,
interruptible_interval_secs: float = 1.0,
):
"""Buffers elements in a queue and runs `fn` asynchronously..
NOTE: Once closed, `AsyncExecutor` will block until current `fn` finishes
but is not guaranteed to dequeue all elements currently stored in
the data queue. This is intentional so as to prevent a blocking `fn` call
from preventing `AsyncExecutor` from closing.
Args:
fn: A callable to be executed upon dequeuing an element from data
queue.
queue_size: The maximum size of the synchronized buffer queue.
interruptible_interval_secs: Timeout interval in seconds for blocking
queue operations after which the background threads check for errors and
if background threads should stop.
"""
self._data = queue.Queue(maxsize=queue_size)
self._should_stop = threading.Event()
self._errors = queue.Queue()
self._interruptible_interval_secs = interruptible_interval_secs
def _dequeue() -> None:
"""Dequeue data from a queue and invoke blocking call."""
while not self._should_stop.is_set():
try:
element = self._data.get(timeout=self._interruptible_interval_secs)
# Execute fn upon dequeuing an element from the data queue.
fn(element)
except queue.Empty:
# If queue is Empty for longer than the specified time interval,
# check again if should_stop has been requested and retry.
continue
except Exception as e:
logging.error("AsyncExecuter thread terminated with error.")
logging.exception(e)
self._errors.put(e)
self._should_stop.set()
raise # Never caught by anything, just terminates the thread.
self._thread = threading.Thread(target=_dequeue, daemon=True)
self._thread.start()
def _raise_on_error(self) -> None:
try:
# Raise the error on the caller thread if an error has been raised in the
# looper thread.
raise self._errors.get_nowait()
except queue.Empty:
pass
def close(self):
self._should_stop.set()
# Join all background threads.
self._thread.join()
# Raise errors produced by background threads.
self._raise_on_error()
def put(self, element: E) -> None:
"""Puts `element` asynchronuously onto the underlying data queue.
The write call blocks if the underlying data_queue contains `queue_size`
elements for over `self._interruptible_interval_secs` second, in which
case we check if stop has been requested or if there has been an error
raised on the looper thread. If neither happened, retry enqueue.
Args:
element: an element to be put into the underlying data queue and dequeued
asynchronuously for `fn(element)` call.
"""
while not self._should_stop.is_set():
try:
self._data.put(element, timeout=self._interruptible_interval_secs)
break
except queue.Full:
continue
else:
# If `should_stop` has been set, then raises if any has been raised on
# the background thread.
self._raise_on_error()
|
acme-master
|
acme/utils/async_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple, hierarchical distributed counter."""
import threading
import time
from typing import Dict, Mapping, Optional, Union
from acme import core
Number = Union[int, float]
class Counter(core.Saveable):
"""A simple counter object that can periodically sync with a parent."""
def __init__(self,
parent: Optional['Counter'] = None,
prefix: str = '',
time_delta: float = 1.0,
return_only_prefixed: bool = False):
"""Initialize the counter.
Args:
parent: a Counter object to cache locally (or None for no caching).
prefix: string prefix to use for all local counts.
time_delta: time difference in seconds between syncing with the parent
counter.
return_only_prefixed: if True, and if `prefix` isn't empty, return counts
restricted to the given `prefix` on each call to `increment` and
`get_counts`. The `prefix` is stripped from returned count names.
"""
self._parent = parent
self._prefix = prefix
self._time_delta = time_delta
# Hold local counts and we'll lock around that.
# These are counts to be synced to the parent and the cache.
self._counts = {}
self._lock = threading.Lock()
# We'll sync periodically (when the last sync was more than self._time_delta
# seconds ago.)
self._cache = {}
self._last_sync_time = 0.0
self._return_only_prefixed = return_only_prefixed
def increment(self, **counts: Number) -> Dict[str, Number]:
"""Increment a set of counters.
Args:
**counts: keyword arguments specifying count increments.
Returns:
The [name, value] mapping of all counters stored, i.e. this will also
include counts that were not updated by this call to increment.
"""
with self._lock:
for key, value in counts.items():
self._counts.setdefault(key, 0)
self._counts[key] += value
return self.get_counts()
def get_counts(self) -> Dict[str, Number]:
"""Return all counts tracked by this counter."""
now = time.time()
# TODO(b/144421838): use futures instead of blocking.
if self._parent and (now - self._last_sync_time) > self._time_delta:
with self._lock:
counts = _prefix_keys(self._counts, self._prefix)
# Reset the local counts, as they will be merged into the parent and the
# cache.
self._counts = {}
self._cache = self._parent.increment(**counts)
self._last_sync_time = now
# Potentially prefix the keys in the counts dictionary.
counts = _prefix_keys(self._counts, self._prefix)
# If there's no prefix make a copy of the dictionary so we don't modify the
# internal self._counts.
if not self._prefix:
counts = dict(counts)
# Combine local counts with any parent counts.
for key, value in self._cache.items():
counts[key] = counts.get(key, 0) + value
if self._prefix and self._return_only_prefixed:
counts = dict([(key[len(self._prefix) + 1:], value)
for key, value in counts.items()
if key.startswith(f'{self._prefix}_')])
return counts
def save(self) -> Mapping[str, Mapping[str, Number]]:
return {'counts': self._counts, 'cache': self._cache}
def restore(self, state: Mapping[str, Mapping[str, Number]]):
# Force a sync, if necessary, on the next get_counts call.
self._last_sync_time = 0.
self._counts = state['counts']
self._cache = state['cache']
def get_steps_key(self) -> str:
"""Returns the key to use for steps by this counter."""
if not self._prefix or self._return_only_prefixed:
return 'steps'
return f'{self._prefix}_steps'
def _prefix_keys(dictionary: Dict[str, Number], prefix: str):
"""Return a dictionary with prefixed keys.
Args:
dictionary: dictionary to return a copy of.
prefix: string to use as the prefix.
Returns:
Return a copy of the given dictionary whose keys are replaced by
"{prefix}_{key}". If the prefix is the empty string it returns the given
dictionary unchanged.
"""
if prefix:
dictionary = {f'{prefix}_{k}': v for k, v in dictionary.items()}
return dictionary
|
acme-master
|
acme/utils/counting.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Acme utility functions."""
|
acme-master
|
acme/utils/__init__.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function for building and launching launchpad programs."""
import atexit
import functools
import inspect
import os
import sys
import time
from typing import Any, Callable, Optional
from absl import flags
from absl import logging
from acme.utils import counting
from acme.utils import signals
FLAGS = flags.FLAGS
def partial_kwargs(function: Callable[..., Any],
**kwargs: Any) -> Callable[..., Any]:
"""Return a partial function application by overriding default keywords.
This function is equivalent to `functools.partial(function, **kwargs)` but
will raise a `ValueError` when called if either the given keyword arguments
are not defined by `function` or if they do not have defaults.
This is useful as a way to define a factory function with default parameters
and then to override them in a safe way.
Args:
function: the base function before partial application.
**kwargs: keyword argument overrides.
Returns:
A function.
"""
# Try to get the argspec of our function which we'll use to get which keywords
# have defaults.
argspec = inspect.getfullargspec(function)
# Figure out which keywords have defaults.
if argspec.defaults is None:
defaults = []
else:
defaults = argspec.args[-len(argspec.defaults):]
# Find any keys not given as defaults by the function.
unknown_kwargs = set(kwargs.keys()).difference(defaults)
# Raise an error
if unknown_kwargs:
error_string = 'Cannot override unknown or non-default kwargs: {}'
raise ValueError(error_string.format(', '.join(unknown_kwargs)))
return functools.partial(function, **kwargs)
class StepsLimiter:
"""Process that terminates an experiment when `max_steps` is reached."""
def __init__(self,
counter: counting.Counter,
max_steps: int,
steps_key: str = 'actor_steps'):
self._counter = counter
self._max_steps = max_steps
self._steps_key = steps_key
def run(self):
"""Run steps limiter to terminate an experiment when max_steps is reached.
"""
logging.info('StepsLimiter: Starting with max_steps = %d (%s)',
self._max_steps, self._steps_key)
with signals.runtime_terminator():
while True:
# Update the counts.
counts = self._counter.get_counts()
num_steps = counts.get(self._steps_key, 0)
logging.info('StepsLimiter: Reached %d recorded steps', num_steps)
if num_steps > self._max_steps:
logging.info('StepsLimiter: Max steps of %d was reached, terminating',
self._max_steps)
# Avoid importing Launchpad until it is actually used.
import launchpad as lp # pylint: disable=g-import-not-at-top
lp.stop()
# Don't spam the counter.
for _ in range(10):
# Do not sleep for a long period of time to avoid LaunchPad program
# termination hangs (time.sleep is not interruptible).
time.sleep(1)
def is_local_run() -> bool:
return FLAGS.lp_launch_type.startswith('local')
# Resources for each individual instance of the program.
def make_xm_docker_resources(program,
requirements: Optional[str] = None):
"""Returns Docker XManager resources for each program's node.
For each node of the Launchpad's program appropriate hardware requirements are
specified (CPU, memory...), while the list of PyPi packages specified in
the requirements file will be installed inside the Docker images.
Args:
program: program for which to construct Docker XManager resources.
requirements: file containing additional requirements to use.
If not specified, default Acme dependencies are used instead.
"""
if (FLAGS.lp_launch_type != 'vertex_ai' and
FLAGS.lp_launch_type != 'local_docker'):
# Avoid importing 'xmanager' for local runs.
return None
# Avoid importing Launchpad until it is actually used.
import launchpad as lp # pylint: disable=g-import-not-at-top
# Reference lp.DockerConfig to force lazy import of xmanager by Launchpad and
# then import it. It is done this way to avoid heavy imports by default.
lp.DockerConfig # pylint: disable=pointless-statement
from xmanager import xm # pylint: disable=g-import-not-at-top
# Get number of each type of node.
num_nodes = {k: len(v) for k, v in program.groups.items()}
xm_resources = {}
acme_location = os.path.dirname(os.path.dirname(__file__))
if not requirements:
# Acme requirements are located in the Acme directory (when installed
# with pip), or need to be extracted from setup.py when using Acme codebase
# from GitHub without PyPi installation.
requirements = os.path.join(acme_location, 'requirements.txt')
if not os.path.isfile(requirements):
# Try to generate requirements.txt from setup.py
setup = os.path.join(os.path.dirname(acme_location), 'setup.py')
if os.path.isfile(setup):
# Generate requirements.txt file using setup.py.
import importlib.util # pylint: disable=g-import-not-at-top
spec = importlib.util.spec_from_file_location('setup', setup)
setup = importlib.util.module_from_spec(spec)
try:
spec.loader.exec_module(setup) # pytype: disable=attribute-error
except SystemExit:
pass
atexit.register(os.remove, requirements)
setup.generate_requirements_file(requirements)
# Extend PYTHONPATH with paths used by the launcher.
python_path = []
for path in sys.path:
if path.startswith(acme_location) and acme_location != path:
python_path.append(path[len(acme_location):])
if 'replay' in num_nodes:
replay_cpu = 6 + num_nodes.get('actor', 0) * 0.01
replay_cpu = min(40, replay_cpu)
xm_resources['replay'] = lp.DockerConfig(
acme_location,
requirements,
hw_requirements=xm.JobRequirements(cpu=replay_cpu, ram=10 * xm.GiB),
python_path=python_path)
if 'evaluator' in num_nodes:
xm_resources['evaluator'] = lp.DockerConfig(
acme_location,
requirements,
hw_requirements=xm.JobRequirements(cpu=2, ram=4 * xm.GiB),
python_path=python_path)
if 'actor' in num_nodes:
xm_resources['actor'] = lp.DockerConfig(
acme_location,
requirements,
hw_requirements=xm.JobRequirements(cpu=2, ram=4 * xm.GiB),
python_path=python_path)
if 'learner' in num_nodes:
learner_cpu = 6 + num_nodes.get('actor', 0) * 0.01
learner_cpu = min(40, learner_cpu)
xm_resources['learner'] = lp.DockerConfig(
acme_location,
requirements,
hw_requirements=xm.JobRequirements(
cpu=learner_cpu, ram=6 * xm.GiB, P100=1),
python_path=python_path)
if 'environment_loop' in num_nodes:
xm_resources['environment_loop'] = lp.DockerConfig(
acme_location,
requirements,
hw_requirements=xm.JobRequirements(
cpu=6, ram=6 * xm.GiB, P100=1),
python_path=python_path)
if 'counter' in num_nodes:
xm_resources['counter'] = lp.DockerConfig(
acme_location,
requirements,
hw_requirements=xm.JobRequirements(cpu=3, ram=4 * xm.GiB),
python_path=python_path)
if 'cacher' in num_nodes:
xm_resources['cacher'] = lp.DockerConfig(
acme_location,
requirements,
hw_requirements=xm.JobRequirements(cpu=3, ram=6 * xm.GiB),
python_path=python_path)
return xm_resources
|
acme-master
|
acme/utils/lp_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.utils.reverb_utils."""
from acme import types
from acme.adders import reverb as reverb_adders
from acme.utils import reverb_utils
import numpy as np
import reverb
import tree
from absl.testing import absltest
class ReverbUtilsTest(absltest.TestCase):
def test_make_replay_table_preserves_table_info(self):
limiter = reverb.rate_limiters.SampleToInsertRatio(
samples_per_insert=1, min_size_to_sample=2, error_buffer=(0, 10))
table = reverb.Table(
name='test',
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=10,
rate_limiter=limiter)
new_table = reverb_utils.make_replay_table_from_info(table.info)
new_info = new_table.info
# table_worker_time is not set by the above utility since this is meant to
# be monitoring information about any given table. So instead we copy this
# so that the assertion below checks that everything else matches.
new_info.table_worker_time.sleeping_ms = (
table.info.table_worker_time.sleeping_ms)
self.assertEqual(new_info, table.info)
_EMPTY_INFO = reverb.SampleInfo(*[() for _ in reverb.SampleInfo.tf_dtypes()])
_DUMMY_OBS = np.array([[[0], [1], [2]]])
_DUMMY_ACTION = np.array([[[3], [4], [5]]])
_DUMMY_REWARD = np.array([[6, 7, 8]])
_DUMMY_DISCOUNT = np.array([[.99, .99, .99]])
_DUMMY_NEXT_OBS = np.array([[[1], [2], [0]]])
_DUMMY_RETURN = np.array([[20.77, 14.92, 8.]])
def _create_dummy_steps(self):
return reverb_adders.Step(
observation=self._DUMMY_OBS,
action=self._DUMMY_ACTION,
reward=self._DUMMY_REWARD,
discount=self._DUMMY_DISCOUNT,
start_of_episode=True,
extras={'return': self._DUMMY_RETURN})
def _create_dummy_transitions(self):
return types.Transition(
observation=self._DUMMY_OBS,
action=self._DUMMY_ACTION,
reward=self._DUMMY_REWARD,
discount=self._DUMMY_DISCOUNT,
next_observation=self._DUMMY_NEXT_OBS,
extras={'return': self._DUMMY_RETURN})
def test_replay_sample_to_sars_transition_is_sequence(self):
fake_sample = reverb.ReplaySample(
info=self._EMPTY_INFO, data=self._create_dummy_steps())
fake_transition = self._create_dummy_transitions()
transition_from_sample = reverb_utils.replay_sample_to_sars_transition(
fake_sample, is_sequence=True)
tree.map_structure(np.testing.assert_array_equal, transition_from_sample,
fake_transition)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/reverb_utils_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reverb utils.
Contains functions manipulating reverb tables and samples.
"""
from acme import types
import jax
import numpy as np
import reverb
from reverb import item_selectors
from reverb import rate_limiters
from reverb import reverb_types
import tensorflow as tf
import tree
def make_replay_table_from_info(
table_info: reverb_types.TableInfo) -> reverb.Table:
"""Build a replay table out of its specs in a TableInfo.
Args:
table_info: A TableInfo containing the Table specs.
Returns:
A reverb replay table matching the info specs.
"""
sampler = _make_selector_from_key_distribution_options(
table_info.sampler_options)
remover = _make_selector_from_key_distribution_options(
table_info.remover_options)
rate_limiter = _make_rate_limiter_from_rate_limiter_info(
table_info.rate_limiter_info)
return reverb.Table(
name=table_info.name,
sampler=sampler,
remover=remover,
max_size=table_info.max_size,
rate_limiter=rate_limiter,
max_times_sampled=table_info.max_times_sampled,
signature=table_info.signature)
def _make_selector_from_key_distribution_options(
options) -> reverb_types.SelectorType:
"""Returns a Selector from its KeyDistributionOptions description."""
one_of = options.WhichOneof('distribution')
if one_of == 'fifo':
return item_selectors.Fifo()
if one_of == 'uniform':
return item_selectors.Uniform()
if one_of == 'prioritized':
return item_selectors.Prioritized(options.prioritized.priority_exponent)
if one_of == 'heap':
if options.heap.min_heap:
return item_selectors.MinHeap()
return item_selectors.MaxHeap()
if one_of == 'lifo':
return item_selectors.Lifo()
raise ValueError(f'Unknown distribution field: {one_of}')
def _make_rate_limiter_from_rate_limiter_info(
info) -> rate_limiters.RateLimiter:
return rate_limiters.SampleToInsertRatio(
samples_per_insert=info.samples_per_insert,
min_size_to_sample=info.min_size_to_sample,
error_buffer=(info.min_diff, info.max_diff))
def replay_sample_to_sars_transition(
sample: reverb.ReplaySample,
is_sequence: bool,
strip_last_transition: bool = False,
flatten_batch: bool = False) -> types.Transition:
"""Converts the replay sample to a types.Transition.
NB: If is_sequence is True then the last next_observation of each sequence is
rubbish. Don't train on it.
Args:
sample: The replay sample
is_sequence: If False we expect the sample data to match the
types.Transition already. Otherwise we expect a batch of sequences of
steps.
strip_last_transition: If True and is_sequence, the last transition will be
stripped as its next_observation field is incorrect.
flatten_batch: If True and is_sequence, the two batch dimensions will be
flatten to one.
Returns:
A types.Transition built from the sample data.
If is_sequence and strip_last_transition are both True, the output will be
smaller than the output as the last transition of every sequence will have
been removed.
"""
if not is_sequence:
return types.Transition(*sample.data)
# Note that the last next_observation is invalid.
steps = sample.data
def roll(observation):
return np.roll(observation, shift=-1, axis=1)
transitions = types.Transition(
observation=steps.observation,
action=steps.action,
reward=steps.reward,
discount=steps.discount,
next_observation=tree.map_structure(roll, steps.observation),
extras=steps.extras)
if strip_last_transition:
# We remove the last transition as its next_observation field is incorrect.
# It has been obtained by rolling the observation field, such that
# transitions.next_observations[:, -1] is transitions.observations[:, 0]
transitions = jax.tree_map(lambda x: x[:, :-1, ...], transitions)
if flatten_batch:
# Merge the 2 leading batch dimensions into 1.
transitions = jax.tree_map(lambda x: np.reshape(x, (-1,) + x.shape[2:]),
transitions)
return transitions
def transition_to_replaysample(
transitions: types.Transition) -> reverb.ReplaySample:
"""Converts a types.Transition to a reverb.ReplaySample."""
info = tree.map_structure(lambda dtype: tf.ones([], dtype),
reverb.SampleInfo.tf_dtypes())
return reverb.ReplaySample(info=info, data=transitions)
|
acme-master
|
acme/utils/reverb_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.utils.counting."""
import threading
from acme.utils import counting
from absl.testing import absltest
class Barrier:
"""Defines a simple barrier class to synchronize on a particular event."""
def __init__(self, num_threads):
"""Constructor.
Args:
num_threads: int - how many threads will be syncronizing on this barrier
"""
self._num_threads = num_threads
self._count = 0
self._cond = threading.Condition()
def wait(self):
"""Waits on the barrier until all threads have called this method."""
with self._cond:
self._count += 1
self._cond.notifyAll()
while self._count < self._num_threads:
self._cond.wait()
class CountingTest(absltest.TestCase):
def test_counter_threading(self):
counter = counting.Counter()
num_threads = 10
barrier = Barrier(num_threads)
# Increment in every thread at the same time.
def add_to_counter():
barrier.wait()
counter.increment(foo=1)
# Run the threads.
threads = []
for _ in range(num_threads):
t = threading.Thread(target=add_to_counter)
t.start()
threads.append(t)
for t in threads:
t.join()
# Make sure the counter has been incremented once per thread.
counts = counter.get_counts()
self.assertEqual(counts['foo'], num_threads)
def test_counter_caching(self):
parent = counting.Counter()
counter = counting.Counter(parent, time_delta=0.)
counter.increment(foo=12)
self.assertEqual(parent.get_counts(), counter.get_counts())
def test_shared_counts(self):
# Two counters with shared parent should share counts (modulo namespacing).
parent = counting.Counter()
child1 = counting.Counter(parent, 'child1')
child2 = counting.Counter(parent, 'child2')
child1.increment(foo=1)
result = child2.increment(foo=2)
expected = {'child1_foo': 1, 'child2_foo': 2}
self.assertEqual(result, expected)
def test_return_only_prefixed(self):
parent = counting.Counter()
child1 = counting.Counter(
parent, 'child1', time_delta=0., return_only_prefixed=False)
child2 = counting.Counter(
parent, 'child2', time_delta=0., return_only_prefixed=True)
child1.increment(foo=1)
child2.increment(bar=1)
self.assertEqual(child1.get_counts(), {'child1_foo': 1, 'child2_bar': 1})
self.assertEqual(child2.get_counts(), {'bar': 1})
def test_get_steps_key(self):
parent = counting.Counter()
child1 = counting.Counter(
parent, 'child1', time_delta=0., return_only_prefixed=False)
child2 = counting.Counter(
parent, 'child2', time_delta=0., return_only_prefixed=True)
self.assertEqual(child1.get_steps_key(), 'child1_steps')
self.assertEqual(child2.get_steps_key(), 'steps')
child1.increment(steps=1)
child2.increment(steps=2)
self.assertEqual(child1.get_counts().get(child1.get_steps_key()), 1)
self.assertEqual(child2.get_counts().get(child2.get_steps_key()), 2)
def test_parent_prefix(self):
parent = counting.Counter(prefix='parent')
child = counting.Counter(parent, prefix='child', time_delta=0.)
self.assertEqual(child.get_steps_key(), 'child_steps')
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/counting_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Frozen learner."""
from typing import Callable, List, Optional, Sequence
import acme
class FrozenLearner(acme.Learner):
"""Wraps a learner ignoring the step calls, i.e. freezing it."""
def __init__(self,
learner: acme.Learner,
step_fn: Optional[Callable[[], None]] = None):
"""Initializes the frozen learner.
Args:
learner: Learner to be wrapped.
step_fn: Function to call instead of the step() method of the learner.
This can be used, e.g. to drop samples from an iterator that would
normally be consumed by the learner.
"""
self._learner = learner
self._step_fn = step_fn
def step(self):
"""See base class."""
if self._step_fn:
self._step_fn()
def run(self, num_steps: Optional[int] = None):
"""See base class."""
self._learner.run(num_steps)
def save(self):
"""See base class."""
return self._learner.save()
def restore(self, state):
"""See base class."""
self._learner.restore(state)
def get_variables(self, names: Sequence[str]) -> List[acme.types.NestedArray]:
"""See base class."""
return self._learner.get_variables(names)
|
acme-master
|
acme/utils/frozen_learner.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Iterator utilities."""
import itertools
import operator
from typing import Any, Iterator, List, Sequence
def unzip_iterators(zipped_iterators: Iterator[Sequence[Any]],
num_sub_iterators: int) -> List[Iterator[Any]]:
"""Returns unzipped iterators.
Note that simply returning:
[(x[i] for x in iter_tuple[i]) for i in range(num_sub_iterators)]
seems to cause all iterators to point to the final value of i, thus causing
all sub_learners to consume data from this final iterator.
Args:
zipped_iterators: zipped iterators (e.g., from zip_iterators()).
num_sub_iterators: the number of sub-iterators in the zipped iterator.
"""
iter_tuple = itertools.tee(zipped_iterators, num_sub_iterators)
return [
map(operator.itemgetter(i), iter_tuple[i])
for i in range(num_sub_iterators)
]
|
acme-master
|
acme/utils/iterator_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensor framework-agnostic utilities for manipulating nested structures."""
from typing import Sequence, List, TypeVar, Any
import numpy as np
import tree
ElementType = TypeVar('ElementType')
def fast_map_structure(func, *structure):
"""Faster map_structure implementation which skips some error checking."""
flat_structure = (tree.flatten(s) for s in structure)
entries = zip(*flat_structure)
# Arbitrarily choose one of the structures of the original sequence (the last)
# to match the structure for the flattened sequence.
return tree.unflatten_as(structure[-1], [func(*x) for x in entries])
def fast_map_structure_with_path(func, *structure):
"""Faster map_structure_with_path implementation."""
head_entries_with_path = tree.flatten_with_path(structure[0])
if len(structure) > 1:
tail_entries = (tree.flatten(s) for s in structure[1:])
entries_with_path = [
e[0] + e[1:] for e in zip(head_entries_with_path, *tail_entries)
]
else:
entries_with_path = head_entries_with_path
# Arbitrarily choose one of the structures of the original sequence (the last)
# to match the structure for the flattened sequence.
return tree.unflatten_as(structure[-1], [func(*x) for x in entries_with_path])
def stack_sequence_fields(sequence: Sequence[ElementType]) -> ElementType:
"""Stacks a list of identically nested objects.
This takes a sequence of identically nested objects and returns a single
nested object whose ith leaf is a stacked numpy array of the corresponding
ith leaf from each element of the sequence.
For example, if `sequence` is:
```python
[{
'action': np.array([1.0]),
'observation': (np.array([0.0, 1.0, 2.0]),),
'reward': 1.0
}, {
'action': np.array([0.5]),
'observation': (np.array([1.0, 2.0, 3.0]),),
'reward': 0.0
}, {
'action': np.array([0.3]),1
'observation': (np.array([2.0, 3.0, 4.0]),),
'reward': 0.5
}]
```
Then this function will return:
```python
{
'action': np.array([....]) # array shape = [3 x 1]
'observation': (np.array([...]),) # array shape = [3 x 3]
'reward': np.array([...]) # array shape = [3]
}
```
Note that the 'observation' entry in the above example has two levels of
nesting, i.e it is a tuple of arrays.
Args:
sequence: a list of identically nested objects.
Returns:
A nested object with numpy.
Raises:
ValueError: If `sequence` is an empty sequence.
"""
# Handle empty input sequences.
if not sequence:
raise ValueError('Input sequence must not be empty')
# Default to asarray when arrays don't have the same shape to be compatible
# with old behaviour.
try:
return fast_map_structure(lambda *values: np.stack(values), *sequence)
except ValueError:
return fast_map_structure(lambda *values: np.asarray(values, dtype=object),
*sequence)
def unstack_sequence_fields(struct: ElementType,
batch_size: int) -> List[ElementType]:
"""Converts a struct of batched arrays to a list of structs.
This is effectively the inverse of `stack_sequence_fields`.
Args:
struct: An (arbitrarily nested) structure of arrays.
batch_size: The length of the leading dimension of each array in the struct.
This is assumed to be static and known.
Returns:
A list of structs with the same structure as `struct`, where each leaf node
is an unbatched element of the original leaf node.
"""
return [
tree.map_structure(lambda s, i=i: s[i], struct) for i in range(batch_size)
]
def broadcast_structures(*args: Any) -> Any:
"""Returns versions of the arguments that give them the same nested structure.
Any nested items in *args must have the same structure.
Any non-nested item will be replaced with a nested version that shares that
structure. The leaves will all be references to the same original non-nested
item.
If all *args are nested, or all *args are non-nested, this function will
return *args unchanged.
Example:
```
a = ('a', 'b')
b = 'c'
tree_a, tree_b = broadcast_structure(a, b)
tree_a
> ('a', 'b')
tree_b
> ('c', 'c')
```
Args:
*args: A Sequence of nested or non-nested items.
Returns:
`*args`, except with all items sharing the same nest structure.
"""
if not args:
return
reference_tree = None
for arg in args:
if tree.is_nested(arg):
reference_tree = arg
break
# If reference_tree is None then none of args are nested and we can skip over
# the rest of this function, which would be a no-op.
if reference_tree is None:
return args
def mirror_structure(value, reference_tree):
if tree.is_nested(value):
# Use check_types=True so that the types of the trees we construct aren't
# dependent on our arbitrary choice of which nested arg to use as the
# reference_tree.
tree.assert_same_structure(value, reference_tree, check_types=True)
return value
else:
return tree.map_structure(lambda _: value, reference_tree)
return tuple(mirror_structure(arg, reference_tree) for arg in args)
def tree_map(f):
"""Transforms `f` into a tree-mapped version."""
def mapped_f(*structures):
return tree.map_structure(f, *structures)
return mapped_f
|
acme-master
|
acme/utils/tree_utils.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tree_utils."""
import functools
from typing import Sequence
from acme.utils import tree_utils
import numpy as np
import tree
from absl.testing import absltest
TEST_SEQUENCE = [
{
'action': np.array([1.0]),
'observation': (np.array([0.0, 1.0, 2.0]),),
'reward': np.array(1.0),
},
{
'action': np.array([0.5]),
'observation': (np.array([1.0, 2.0, 3.0]),),
'reward': np.array(0.0),
},
{
'action': np.array([0.3]),
'observation': (np.array([2.0, 3.0, 4.0]),),
'reward': np.array(0.5),
},
]
class SequenceStackTest(absltest.TestCase):
"""Tests for various tree utilities."""
def test_stack_sequence_fields(self):
"""Tests that `stack_sequence_fields` behaves correctly on nested data."""
stacked = tree_utils.stack_sequence_fields(TEST_SEQUENCE)
# Check that the stacked output has the correct structure.
tree.assert_same_structure(stacked, TEST_SEQUENCE[0])
# Check that the leaves have the correct array shapes.
self.assertEqual(stacked['action'].shape, (3, 1))
self.assertEqual(stacked['observation'][0].shape, (3, 3))
self.assertEqual(stacked['reward'].shape, (3,))
# Check values.
self.assertEqual(stacked['observation'][0].tolist(), [
[0., 1., 2.],
[1., 2., 3.],
[2., 3., 4.],
])
self.assertEqual(stacked['action'].tolist(), [[1.], [0.5], [0.3]])
self.assertEqual(stacked['reward'].tolist(), [1., 0., 0.5])
def test_unstack_sequence_fields(self):
"""Tests that `unstack_sequence_fields(stack_sequence_fields(x)) == x`."""
stacked = tree_utils.stack_sequence_fields(TEST_SEQUENCE)
batch_size = len(TEST_SEQUENCE)
unstacked = tree_utils.unstack_sequence_fields(stacked, batch_size)
tree.map_structure(np.testing.assert_array_equal, unstacked, TEST_SEQUENCE)
def test_fast_map_structure_with_path(self):
structure = {
'a': {
'b': np.array([0.0])
},
'c': (np.array([1.0]), np.array([2.0])),
'd': [np.array(3.0), np.array(4.0)],
}
def map_fn(path: Sequence[str], x: np.ndarray, y: np.ndarray):
return x + y + len(path)
single_arg_map_fn = functools.partial(map_fn, y=np.array([0.0]))
expected_mapped_structure = (
tree.map_structure_with_path(single_arg_map_fn, structure))
mapped_structure = (
tree_utils.fast_map_structure_with_path(single_arg_map_fn, structure))
self.assertEqual(mapped_structure, expected_mapped_structure)
expected_double_mapped_structure = (
tree.map_structure_with_path(map_fn, structure, mapped_structure))
double_mapped_structure = (
tree_utils.fast_map_structure_with_path(map_fn, structure,
mapped_structure))
self.assertEqual(double_mapped_structure, expected_double_mapped_structure)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/tree_utils_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iterator_utils."""
from acme.utils import iterator_utils
import numpy as np
from absl.testing import absltest
class IteratorUtilsTest(absltest.TestCase):
def test_iterator_zipping(self):
def get_iters():
x = iter(range(0, 10))
y = iter(range(20, 30))
return [x, y]
zipped = zip(*get_iters())
unzipped = iterator_utils.unzip_iterators(zipped, num_sub_iterators=2)
expected_x, expected_y = get_iters()
np.testing.assert_equal(list(unzipped[0]), list(expected_x))
np.testing.assert_equal(list(unzipped[1]), list(expected_y))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/iterator_utils_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for paths."""
from unittest import mock
from acme.testing import test_utils
import acme.utils.paths as paths # pylint: disable=consider-using-from-import
from absl.testing import absltest
class PathTest(test_utils.TestCase):
def test_process_path(self):
root_directory = self.get_tempdir()
with mock.patch.object(paths, 'get_unique_id') as mock_unique_id:
mock_unique_id.return_value = ('test',)
path = paths.process_path(root_directory, 'foo', 'bar')
self.assertEqual(path, f'{root_directory}/test/foo/bar')
def test_unique_id_with_flag(self):
with mock.patch.object(paths, 'ACME_ID') as mock_acme_id:
mock_acme_id.value = 'test_flag'
self.assertEqual(paths.get_unique_id(), ('test_flag',))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/paths_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.utils.loggers.base."""
from acme.utils.loggers import base
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
from absl.testing import absltest
class BaseTest(absltest.TestCase):
def test_tensor_serialisation(self):
data = {'x': tf.zeros(shape=(32,))}
output = base.to_numpy(data)
expected = {'x': np.zeros(shape=(32,))}
np.testing.assert_array_equal(output['x'], expected['x'])
def test_device_array_serialisation(self):
data = {'x': jnp.zeros(shape=(32,))}
output = base.to_numpy(data)
expected = {'x': np.zeros(shape=(32,))}
np.testing.assert_array_equal(output['x'], expected['x'])
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/loggers/base_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for terminal logger."""
from acme.utils.loggers import terminal
from absl.testing import absltest
class LoggingTest(absltest.TestCase):
def test_logging_output_format(self):
inputs = {
'c': 'foo',
'a': 1337,
'b': 42.0001,
}
expected_outputs = 'A = 1337 | B = 42.000 | C = foo'
test_fn = lambda outputs: self.assertEqual(outputs, expected_outputs)
logger = terminal.TerminalLogger(print_fn=test_fn)
logger.write(inputs)
def test_label(self):
inputs = {'foo': 'bar', 'baz': 123}
expected_outputs = '[Test] Baz = 123 | Foo = bar'
test_fn = lambda outputs: self.assertEqual(outputs, expected_outputs)
logger = terminal.TerminalLogger(print_fn=test_fn, label='test')
logger.write(inputs)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/utils/loggers/terminal_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger which makes another logger asynchronous."""
from typing import Any, Mapping
from acme.utils import async_utils
from acme.utils.loggers import base
class AsyncLogger(base.Logger):
"""Logger which makes the logging to another logger asyncronous."""
def __init__(self, to: base.Logger):
"""Initializes the logger.
Args:
to: A `Logger` object to which the current object will forward its results
when `write` is called.
"""
self._to = to
self._async_worker = async_utils.AsyncExecutor(self._to.write, queue_size=5)
def write(self, values: Mapping[str, Any]):
self._async_worker.put(values)
def close(self):
"""Closes the logger, closing is synchronous."""
self._async_worker.close()
self._to.close()
|
acme-master
|
acme/utils/loggers/asynchronous.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.