python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multihead networks apply separate networks to the input."""
from typing import Callable, Union, Sequence
from acme import types
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
TensorTransformation = Union[snt.Module, Callable[[types.NestedTensor],
tf.Tensor]]
class Multihead(snt.Module):
"""Multi-head network module.
This takes as input a list of N `network_heads`, and returns another network
whose output is the stacked outputs of each of these network heads separately
applied to the module input. The dimension of the output is [..., N].
"""
def __init__(self,
network_heads: Sequence[TensorTransformation]):
if not network_heads:
raise ValueError('Must specify non-empty, non-None critic_network_heads.')
self._network_heads = network_heads
super().__init__(name='multihead')
def __call__(self,
inputs: tf.Tensor) -> Union[tf.Tensor, Sequence[tf.Tensor]]:
outputs = [network_head(inputs) for network_head in self._network_heads]
if isinstance(outputs[0], tfd.Distribution):
# Cannot stack distributions
return outputs
outputs = tf.stack(outputs, axis=-1)
return outputs
|
acme-master
|
acme/tf/networks/multihead.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributional modules: these are modules that return a tfd.Distribution.
There are useful modules in `acme.networks.stochastic` to either sample or
take the mean of these distributions.
"""
import types
from typing import Optional, Union
from absl import logging
from acme.tf.networks import distributions as ad
import numpy as np
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
snt_init = snt.initializers
_MIN_SCALE = 1e-4
class DiscreteValuedHead(snt.Module):
"""Represents a parameterized discrete valued distribution.
The returned distribution is essentially a `tfd.Categorical`, but one which
knows its support and so can compute the mean value.
"""
def __init__(self,
vmin: Union[float, np.ndarray, tf.Tensor],
vmax: Union[float, np.ndarray, tf.Tensor],
num_atoms: int,
w_init: Optional[snt.initializers.Initializer] = None,
b_init: Optional[snt.initializers.Initializer] = None):
"""Initialization.
If vmin and vmax have shape S, this will store the category values as a
Tensor of shape (S*, num_atoms).
Args:
vmin: Minimum of the value range
vmax: Maximum of the value range
num_atoms: The atom values associated with each bin.
w_init: Initialization for linear layer weights.
b_init: Initialization for linear layer biases.
"""
super().__init__(name='DiscreteValuedHead')
vmin = tf.convert_to_tensor(vmin)
vmax = tf.convert_to_tensor(vmax)
self._values = tf.linspace(vmin, vmax, num_atoms, axis=-1)
self._distributional_layer = snt.Linear(tf.size(self._values),
w_init=w_init,
b_init=b_init)
def __call__(self, inputs: tf.Tensor) -> tfd.Distribution:
logits = self._distributional_layer(inputs)
logits = tf.reshape(logits,
tf.concat([tf.shape(logits)[:1], # batch size
tf.shape(self._values)],
axis=0))
values = tf.cast(self._values, logits.dtype)
return ad.DiscreteValuedDistribution(values=values, logits=logits)
class MultivariateNormalDiagHead(snt.Module):
"""Module that produces a multivariate normal distribution using tfd.Independent or tfd.MultivariateNormalDiag."""
def __init__(
self,
num_dimensions: int,
init_scale: float = 0.3,
min_scale: float = 1e-6,
tanh_mean: bool = False,
fixed_scale: bool = False,
use_tfd_independent: bool = False,
w_init: snt_init.Initializer = tf.initializers.VarianceScaling(1e-4),
b_init: snt_init.Initializer = tf.initializers.Zeros()):
"""Initialization.
Args:
num_dimensions: Number of dimensions of MVN distribution.
init_scale: Initial standard deviation.
min_scale: Minimum standard deviation.
tanh_mean: Whether to transform the mean (via tanh) before passing it to
the distribution.
fixed_scale: Whether to use a fixed variance.
use_tfd_independent: Whether to use tfd.Independent or
tfd.MultivariateNormalDiag class
w_init: Initialization for linear layer weights.
b_init: Initialization for linear layer biases.
"""
super().__init__(name='MultivariateNormalDiagHead')
self._init_scale = init_scale
self._min_scale = min_scale
self._tanh_mean = tanh_mean
self._mean_layer = snt.Linear(num_dimensions, w_init=w_init, b_init=b_init)
self._fixed_scale = fixed_scale
if not fixed_scale:
self._scale_layer = snt.Linear(
num_dimensions, w_init=w_init, b_init=b_init)
self._use_tfd_independent = use_tfd_independent
def __call__(self, inputs: tf.Tensor) -> tfd.Distribution:
zero = tf.constant(0, dtype=inputs.dtype)
mean = self._mean_layer(inputs)
if self._fixed_scale:
scale = tf.ones_like(mean) * self._init_scale
else:
scale = tf.nn.softplus(self._scale_layer(inputs))
scale *= self._init_scale / tf.nn.softplus(zero)
scale += self._min_scale
# Maybe transform the mean.
if self._tanh_mean:
mean = tf.tanh(mean)
if self._use_tfd_independent:
dist = tfd.Independent(tfd.Normal(loc=mean, scale=scale))
else:
dist = tfd.MultivariateNormalDiag(loc=mean, scale_diag=scale)
return dist
class GaussianMixture(snt.Module):
"""Module that outputs a Gaussian Mixture Distribution."""
def __init__(self,
num_dimensions: int,
num_components: int,
multivariate: bool,
init_scale: Optional[float] = None,
name: str = 'GaussianMixture'):
"""Initialization.
Args:
num_dimensions: dimensionality of the output distribution
num_components: number of mixture components.
multivariate: whether the resulting distribution is multivariate or not.
init_scale: the initial scale for the Gaussian mixture components.
name: name of the module passed to snt.Module parent class.
"""
super().__init__(name=name)
self._num_dimensions = num_dimensions
self._num_components = num_components
self._multivariate = multivariate
if init_scale is not None:
self._scale_factor = init_scale / tf.nn.softplus(0.)
else:
self._scale_factor = 1.0 # Corresponds to init_scale = softplus(0).
# Define the weight initializer.
w_init = tf.initializers.VarianceScaling(1e-5)
# Create a layer that outputs the unnormalized log-weights.
if self._multivariate:
logits_size = self._num_components
else:
logits_size = self._num_dimensions * self._num_components
self._logit_layer = snt.Linear(logits_size, w_init=w_init)
# Create two layers that outputs a location and a scale, respectively, for
# each dimension and each component.
self._loc_layer = snt.Linear(
self._num_dimensions * self._num_components, w_init=w_init)
self._scale_layer = snt.Linear(
self._num_dimensions * self._num_components, w_init=w_init)
def __call__(self,
inputs: tf.Tensor,
low_noise_policy: bool = False) -> tfd.Distribution:
"""Run the networks through inputs.
Args:
inputs: hidden activations of the policy network body.
low_noise_policy: whether to set vanishingly small scales for each
component. If this flag is set to True, the policy is effectively run
without Gaussian noise.
Returns:
Mixture Gaussian distribution.
"""
# Compute logits, locs, and scales if necessary.
logits = self._logit_layer(inputs)
locs = self._loc_layer(inputs)
# When a low_noise_policy is requested, set the scales to its minimum value.
if low_noise_policy:
scales = tf.fill(locs.shape, _MIN_SCALE)
else:
scales = self._scale_layer(inputs)
scales = self._scale_factor * tf.nn.softplus(scales) + _MIN_SCALE
if self._multivariate:
shape = [-1, self._num_components, self._num_dimensions]
# Reshape the mixture's location and scale parameters appropriately.
locs = tf.reshape(locs, shape)
scales = tf.reshape(scales, shape)
# In this case, no need to reshape logits as they are in the correct shape
# already, namely [batch_size, num_components].
components_distribution = tfd.MultivariateNormalDiag(
loc=locs, scale_diag=scales)
else:
shape = [-1, self._num_dimensions, self._num_components]
# Reshape the mixture's location and scale parameters appropriately.
locs = tf.reshape(locs, shape)
scales = tf.reshape(scales, shape)
components_distribution = tfd.Normal(loc=locs, scale=scales)
logits = tf.reshape(logits, shape)
# Create the mixture distribution.
distribution = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=logits),
components_distribution=components_distribution)
if not self._multivariate:
distribution = tfd.Independent(distribution)
return distribution
class UnivariateGaussianMixture(GaussianMixture):
"""Head which outputs a Mixture of Gaussians Distribution."""
def __init__(self,
num_dimensions: int,
num_components: int = 5,
init_scale: Optional[float] = None,
num_mixtures: Optional[int] = None):
"""Create an mixture of Gaussian actor head.
Args:
num_dimensions: dimensionality of the output distribution. Each dimension
is going to be an independent 1d GMM model.
num_components: number of mixture components.
init_scale: the initial scale for the Gaussian mixture components.
num_mixtures: deprecated argument which overwrites num_components.
"""
if num_mixtures is not None:
logging.warning("""the num_mixtures parameter has been deprecated; use
num_components instead; the value of num_components is being
ignored""")
num_components = num_mixtures
super().__init__(num_dimensions=num_dimensions,
num_components=num_components,
multivariate=False,
init_scale=init_scale,
name='UnivariateGaussianMixture')
class MultivariateGaussianMixture(GaussianMixture):
"""Head which outputs a mixture of multivariate Gaussians distribution."""
def __init__(self,
num_dimensions: int,
num_components: int = 5,
init_scale: Optional[float] = None):
"""Initialization.
Args:
num_dimensions: dimensionality of the output distribution
(also the dimensionality of the multivariate Gaussian model).
num_components: number of mixture components.
init_scale: the initial scale for the Gaussian mixture components.
"""
super().__init__(num_dimensions=num_dimensions,
num_components=num_components,
multivariate=True,
init_scale=init_scale,
name='MultivariateGaussianMixture')
class ApproximateMode(snt.Module):
"""Override the mode function of the distribution.
For non-constant Jacobian transformed distributions, the mode is non-trivial
to compute, so for these distributions the mode function is not supported in
TFP. A frequently used approximation is to forward transform the mode of the
untransformed distribution.
Otherwise (an untransformed distribution or a transformed distribution with a
constant Jacobian), this is a no-op.
"""
def __call__(self, inputs: tfd.Distribution) -> tfd.Distribution:
if isinstance(inputs, tfd.TransformedDistribution):
if not inputs.bijector.is_constant_jacobian:
def _mode(self, **kwargs):
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
x = self.distribution.mode(**distribution_kwargs)
y = self.bijector.forward(x, **bijector_kwargs)
return y
inputs._mode = types.MethodType(_mode, inputs)
return inputs
|
acme-master
|
acme/tf/networks/distributional.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributions, for use in acme/networks/distributional.py."""
from typing import Optional
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
@tfp.experimental.auto_composite_tensor
class DiscreteValuedDistribution(tfd.Categorical):
"""This is a generalization of a categorical distribution.
The support for the DiscreteValued distribution can be any real valued range,
whereas the categorical distribution has support [0, n_categories - 1] or
[1, n_categories]. This generalization allows us to take the mean of the
distribution over its support.
"""
def __init__(self,
values: tf.Tensor,
logits: Optional[tf.Tensor] = None,
probs: Optional[tf.Tensor] = None,
name: str = 'DiscreteValuedDistribution'):
"""Initialization.
Args:
values: Values making up support of the distribution. Should have a shape
compatible with logits.
logits: An N-D Tensor, N >= 1, representing the log probabilities of a set
of Categorical distributions. The first N - 1 dimensions index into a
batch of independent distributions and the last dimension indexes into
the classes.
probs: An N-D Tensor, N >= 1, representing the probabilities of a set of
Categorical distributions. The first N - 1 dimensions index into a batch
of independent distributions and the last dimension represents a vector
of probabilities for each class. Only one of logits or probs should be
passed in.
name: Name of the distribution object.
"""
self._values = tf.convert_to_tensor(values)
shape_strings = [f'D{i}' for i, _ in enumerate(values.shape)]
if logits is not None:
logits = tf.convert_to_tensor(logits)
tf.debugging.assert_shapes([(values, shape_strings),
(logits, [..., *shape_strings])])
if probs is not None:
probs = tf.convert_to_tensor(probs)
tf.debugging.assert_shapes([(values, shape_strings),
(probs, [..., *shape_strings])])
super().__init__(logits=logits, probs=probs, name=name)
self._parameters = dict(values=values,
logits=logits,
probs=probs,
name=name)
@property
def values(self) -> tf.Tensor:
return self._values
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
values=tfp.util.ParameterProperties(event_ndims=None),
logits=tfp.util.ParameterProperties(
event_ndims=lambda self: self.values.shape.rank),
probs=tfp.util.ParameterProperties(
event_ndims=lambda self: self.values.shape.rank,
is_preferred=False))
def _sample_n(self, n, seed=None) -> tf.Tensor:
indices = super()._sample_n(n, seed=seed)
return tf.gather(self.values, indices, axis=-1)
def _mean(self) -> tf.Tensor:
"""Overrides the Categorical mean by incorporating category values."""
return tf.reduce_sum(self.probs_parameter() * self.values, axis=-1)
def _variance(self) -> tf.Tensor:
"""Overrides the Categorical variance by incorporating category values."""
dist_squared = tf.square(tf.expand_dims(self.mean(), -1) - self.values)
return tf.reduce_sum(self.probs_parameter() * dist_squared, axis=-1)
def _event_shape(self):
# Omit the atoms axis, to return just the shape of a single (i.e. unbatched)
# sample value.
return self._values.shape[:-1]
def _event_shape_tensor(self):
return tf.shape(self._values)[:-1]
|
acme-master
|
acme/tf/networks/distributions.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test networks for building recurrent agents."""
import os
from acme import specs
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.tf.networks import recurrence
import numpy as np
import sonnet as snt
import tensorflow as tf
import tree
from absl.testing import absltest
# Simple critic-like modules for testing.
class Critic(snt.Module):
def __call__(self, o, a):
return o * a
class RNNCritic(snt.RNNCore):
def __call__(self, o, a, prev_state):
return o * a, prev_state
def initial_state(self, batch_size):
return ()
class NetsTest(tf.test.TestCase):
def test_criticdeeprnn_snapshot(self):
"""Test that CriticDeepRNN works correctly with snapshotting."""
# Create a test network.
critic = Critic()
rnn_critic = RNNCritic()
for base_net in [critic, rnn_critic]:
net = recurrence.CriticDeepRNN([base_net, snt.LSTM(10)])
obs = specs.Array([10], dtype=np.float32)
actions = specs.Array([10], dtype=np.float32)
spec = [obs, actions]
tf2_utils.create_variables(net, spec)
# Test that if you add some postprocessing without rerunning
# create_variables, it still works.
wrapped_net = recurrence.CriticDeepRNN([net, lambda x: x])
for curr_net in [net, wrapped_net]:
# Save the test network.
directory = absltest.get_default_test_tmpdir()
objects_to_save = {'net': curr_net}
snapshotter = tf2_savers.Snapshotter(
objects_to_save, directory=directory)
snapshotter.save()
# Reload the test network.
net2 = tf.saved_model.load(os.path.join(snapshotter.directory, 'net'))
obs = tf.ones((2, 10))
actions = tf.ones((2, 10))
state = curr_net.initial_state(2)
outputs1, next_state1 = curr_net(obs, actions, state)
outputs2, next_state2 = net2(obs, actions, state)
assert np.allclose(outputs1, outputs2)
assert np.allclose(tree.flatten(next_state1), tree.flatten(next_state2))
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/tf/networks/recurrence_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiplexers are networks that take multiple inputs."""
from typing import Callable, Optional, Union
from acme import types
from acme.tf import utils as tf2_utils
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
TensorTransformation = Union[snt.Module, Callable[[types.NestedTensor],
tf.Tensor]]
class CriticMultiplexer(snt.Module):
"""Module connecting a critic torso to (transformed) observations/actions.
This takes as input a `critic_network`, an `observation_network`, and an
`action_network` and returns another network whose outputs are given by
`critic_network(observation_network(o), action_network(a))`.
The observations and actions passed to this module are assumed to have a batch
dimension that match.
Notes:
- Either the `observation_` or `action_network` can be `None`, in which case
the observation or action, resp., are passed to the critic network as is.
- If all `critic_`, `observation_` and `action_network` are `None`, this
module reduces to a simple `tf2_utils.batch_concat()`.
"""
def __init__(self,
critic_network: Optional[TensorTransformation] = None,
observation_network: Optional[TensorTransformation] = None,
action_network: Optional[TensorTransformation] = None):
self._critic_network = critic_network
self._observation_network = observation_network
self._action_network = action_network
super().__init__(name='critic_multiplexer')
def __call__(self,
observation: types.NestedTensor,
action: types.NestedTensor) -> tf.Tensor:
# Maybe transform observations and actions before feeding them on.
if self._observation_network:
observation = self._observation_network(observation)
if self._action_network:
action = self._action_network(action)
if hasattr(observation, 'dtype') and hasattr(action, 'dtype'):
if observation.dtype != action.dtype:
# Observation and action must be the same type for concat to work
action = tf.cast(action, observation.dtype)
# Concat observations and actions, with one batch dimension.
outputs = tf2_utils.batch_concat([observation, action])
# Maybe transform output before returning.
if self._critic_network:
outputs = self._critic_network(outputs)
return outputs
|
acme-master
|
acme/tf/networks/multiplexers.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.tf.networks.distributional."""
from acme.tf.networks import distributional
import numpy as np
from numpy import testing as npt
from absl.testing import absltest
from absl.testing import parameterized
class DistributionalTest(parameterized.TestCase):
@parameterized.parameters(
((2, 3), (), (), 5, (2, 5)),
((2, 3), (4, 1), (1, 5), 6, (2, 4, 5, 6)),
)
def test_discrete_valued_head(
self,
input_shape,
vmin_shape,
vmax_shape,
num_atoms,
expected_logits_shape):
vmin = np.zeros(vmin_shape, float)
vmax = np.ones(vmax_shape, float)
head = distributional.DiscreteValuedHead(
vmin=vmin,
vmax=vmax,
num_atoms=num_atoms)
input_array = np.zeros(input_shape, dtype=float)
output_distribution = head(input_array)
self.assertEqual(output_distribution.logits_parameter().shape,
expected_logits_shape)
values = output_distribution._values
# Can't do assert_allclose(values[..., 0], vmin), because the args may
# have broadcast-compatible but unequal shapes. Do the following instead:
npt.assert_allclose(values[..., 0] - vmin, np.zeros_like(values[..., 0]))
npt.assert_allclose(values[..., -1] - vmax, np.zeros_like(values[..., -1]))
# Check that values are monotonically increasing.
intervals = values[..., 1:] - values[..., :-1]
npt.assert_array_less(np.zeros_like(intervals), intervals)
# Check that the values are equally spaced.
npt.assert_allclose(intervals[..., 1:] - intervals[..., :1],
np.zeros_like(intervals[..., 1:]),
atol=1e-7)
if __name__ == '__main__':
absltest.main()
|
acme-master
|
acme/tf/networks/distributional_test.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy-value network head for actor-critic algorithms."""
from typing import Tuple
import sonnet as snt
import tensorflow as tf
class PolicyValueHead(snt.Module):
"""A network with two linear layers, for policy and value respectively."""
def __init__(self, num_actions: int):
super().__init__(name='policy_value_network')
self._policy_layer = snt.Linear(num_actions)
self._value_layer = snt.Linear(1)
def __call__(self, inputs: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Returns a (Logits, Value) tuple."""
logits = self._policy_layer(inputs) # [B, A]
value = tf.squeeze(self._value_layer(inputs), axis=-1) # [B]
return logits, value
|
acme-master
|
acme/tf/networks/policy_value.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Noise layers (for exploration)."""
from acme import types
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
import tree
tfd = tfp.distributions
class ClippedGaussian(snt.Module):
"""Sonnet module for adding clipped Gaussian noise to each output."""
def __init__(self, stddev: float, name: str = 'clipped_gaussian'):
super().__init__(name=name)
self._noise = tfd.Normal(loc=0., scale=stddev)
def __call__(self, inputs: types.NestedTensor) -> types.NestedTensor:
def add_noise(tensor: tf.Tensor):
output = tensor + tf.cast(self._noise.sample(tensor.shape),
dtype=tensor.dtype)
output = tf.clip_by_value(output, -1.0, 1.0)
return output
return tree.map_structure(add_noise, inputs)
|
acme-master
|
acme/tf/networks/noise.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks used in continuous control."""
from typing import Callable, Optional, Sequence
from acme import types
from acme.tf import utils as tf2_utils
from acme.tf.networks import base
import sonnet as snt
import tensorflow as tf
def _uniform_initializer():
return tf.initializers.VarianceScaling(
distribution='uniform', mode='fan_out', scale=0.333)
class NearZeroInitializedLinear(snt.Linear):
"""Simple linear layer, initialized at near zero weights and zero biases."""
def __init__(self, output_size: int, scale: float = 1e-4):
super().__init__(output_size, w_init=tf.initializers.VarianceScaling(scale))
class LayerNormMLP(snt.Module):
"""Simple feedforward MLP torso with initial layer-norm.
This module is an MLP which uses LayerNorm (with a tanh normalizer) on the
first layer and non-linearities (elu) on all but the last remaining layers.
"""
def __init__(self,
layer_sizes: Sequence[int],
w_init: Optional[snt.initializers.Initializer] = None,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.elu,
activate_final: bool = False):
"""Construct the MLP.
Args:
layer_sizes: a sequence of ints specifying the size of each layer.
w_init: initializer for Linear weights.
activation: activation function to apply between linear layers. Defaults
to ELU. Note! This is different from snt.nets.MLP's default.
activate_final: whether or not to use the activation function on the final
layer of the neural network.
"""
super().__init__(name='feedforward_mlp_torso')
self._network = snt.Sequential([
snt.Linear(layer_sizes[0], w_init=w_init or _uniform_initializer()),
snt.LayerNorm(
axis=slice(1, None), create_scale=True, create_offset=True),
tf.nn.tanh,
snt.nets.MLP(
layer_sizes[1:],
w_init=w_init or _uniform_initializer(),
activation=activation,
activate_final=activate_final),
])
def __call__(self, observations: types.Nest) -> tf.Tensor:
"""Forwards the policy network."""
return self._network(tf2_utils.batch_concat(observations))
class ResidualLayernormWrapper(snt.Module):
"""Wrapper that applies residual connections and layer norm."""
def __init__(self, layer: base.Module):
"""Creates the Wrapper Class.
Args:
layer: module to wrap.
"""
super().__init__(name='ResidualLayernormWrapper')
self._layer = layer
self._layer_norm = snt.LayerNorm(
axis=-1, create_scale=True, create_offset=True)
def __call__(self, inputs: tf.Tensor):
"""Returns the result of the residual and layernorm computation.
Args:
inputs: inputs to the main module.
"""
# Apply main module.
outputs = self._layer(inputs)
outputs = self._layer_norm(outputs + inputs)
return outputs
class LayerNormAndResidualMLP(snt.Module):
"""MLP with residual connections and layer norm.
An MLP which applies residual connection and layer normalisation every two
linear layers. Similar to Resnet, but with FC layers instead of convolutions.
"""
def __init__(self, hidden_size: int, num_blocks: int):
"""Create the model.
Args:
hidden_size: width of each hidden layer.
num_blocks: number of blocks, each block being MLP([hidden_size,
hidden_size]) + layer norm + residual connection.
"""
super().__init__(name='LayerNormAndResidualMLP')
# Create initial MLP layer.
layers = [snt.nets.MLP([hidden_size], w_init=_uniform_initializer())]
# Follow it up with num_blocks MLPs with layernorm and residual connections.
for _ in range(num_blocks):
mlp = snt.nets.MLP([hidden_size, hidden_size],
w_init=_uniform_initializer())
layers.append(ResidualLayernormWrapper(mlp))
self._module = snt.Sequential(layers)
def __call__(self, inputs: tf.Tensor):
return self._module(inputs)
|
acme-master
|
acme/tf/networks/continuous.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapping trfl epsilon_greedy with legal action masking."""
from typing import Optional, Mapping, Union
import sonnet as snt
import tensorflow as tf
import trfl
class NetworkWithMaskedEpsilonGreedy(snt.Module):
"""Epsilon greedy sampling with action masking on network outputs."""
def __init__(self,
network: snt.Module,
epsilon: Optional[tf.Tensor] = None):
"""Initialize the network and epsilon.
Usage:
Wrap an observation in a dictionary in your environment as follows:
observation <-- {"your_key_for_observation": observation,
"legal_actions_mask": your_action_mask_tensor}
and update your network to use 'observation["your_key_for_observation"]'
rather than 'observation'.
Args:
network: the online Q network (the one being optimized)
epsilon: probability of taking a random action.
"""
super().__init__()
self._network = network
self._epsilon = epsilon
def __call__(
self, observation: Union[Mapping[str, tf.Tensor],
tf.Tensor]) -> tf.Tensor:
q = self._network(observation)
return trfl.epsilon_greedy(
q, epsilon=self._epsilon,
legal_actions_mask=observation['legal_actions_mask']).sample()
|
acme-master
|
acme/tf/networks/masked_epsilon_greedy.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenient base classes for custom networks."""
import abc
from typing import Tuple, TypeVar
from acme import types
import sonnet as snt
import tensorflow_probability as tfp
State = TypeVar('State')
class Module(snt.Module, abc.ABC):
"""A base class for module with abstract __call__ method."""
@abc.abstractmethod
def __call__(self, *args, **kwargs) -> types.NestedTensor:
"""Forward pass of the module."""
class DistributionalModule(snt.Module, abc.ABC):
"""A base class for modules that output distributions."""
@abc.abstractmethod
def __call__(self, *args, **kwargs) -> tfp.distributions.Distribution:
"""Forward pass of the module."""
class RNNCore(snt.RNNCore, abc.ABC):
"""An RNN core with a custom `unroll` function."""
@abc.abstractmethod
def unroll(self,
inputs: types.NestedTensor,
state: State,
sequence_length: int,
) -> Tuple[types.NestedTensor, State]:
"""A custom function for doing static unrolls over sequences.
This has the same API as `snt.static_unroll`, but allows the user to specify
their own implementation to take advantage of the structure of the network
for better performance, e.g. by batching the feed-forward pass over the
whole sequence.
Args:
inputs: A nest of `tf.Tensor` in time-major format.
state: The RNN core state.
sequence_length: How long the static_unroll should go for.
Returns:
Nested sequence output of RNN, and final state.
"""
|
acme-master
|
acme/tf/networks/base.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks used for handling illegal actions."""
from typing import Any, Callable, Iterable, Optional, Union
# pytype: disable=import-error
from acme.wrappers import open_spiel_wrapper
# pytype: enable=import-error
import numpy as np
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
class MaskedSequential(snt.Module):
"""Applies a legal actions mask to a linear chain of modules / callables.
It is assumed the trailing dimension of the final layer (representing
action values) is the same as the trailing dimension of legal_actions.
"""
def __init__(self,
layers: Optional[Iterable[Callable[..., Any]]] = None,
name: str = 'MaskedSequential'):
super().__init__(name=name)
self._layers = list(layers) if layers is not None else []
self._illegal_action_penalty = -1e9
# Note: illegal_action_penalty cannot be -np.inf because trfl's qlearning
# ops utilize a batched_index function that returns NaN whenever -np.inf
# is present among action values.
def __call__(self, inputs: open_spiel_wrapper.OLT) -> tf.Tensor:
# Extract observation, legal actions, and terminal
outputs = inputs.observation
legal_actions = inputs.legal_actions
terminal = inputs.terminal
for mod in self._layers:
outputs = mod(outputs)
# Apply legal actions mask
outputs = tf.where(tf.equal(legal_actions, 1), outputs,
tf.fill(tf.shape(outputs), self._illegal_action_penalty))
# When computing the Q-learning target (r_t + d_t * max q_t) we need to
# ensure max q_t = 0 in terminal states.
outputs = tf.where(tf.equal(terminal, 1), tf.zeros_like(outputs), outputs)
return outputs
# FIXME: Add functionality to support decaying epsilon parameter.
# FIXME: This is a modified version of trfl's epsilon_greedy() which
# incorporates code from the bug fix described here
# https://github.com/deepmind/trfl/pull/28
class EpsilonGreedy(snt.Module):
"""Computes an epsilon-greedy distribution over actions.
This policy does the following:
- With probability 1 - epsilon, take the action corresponding to the highest
action value, breaking ties uniformly at random.
- With probability epsilon, take an action uniformly at random.
"""
def __init__(self,
epsilon: Union[tf.Tensor, float],
threshold: float,
name: str = 'EpsilonGreedy'):
"""Initialize the policy.
Args:
epsilon: Exploratory param with value between 0 and 1.
threshold: Action values must exceed this value to qualify as a legal
action and possibly be selected by the policy.
name: Name of the network.
Returns:
policy: tfp.distributions.Categorical distribution representing the
policy.
"""
super().__init__(name=name)
self._epsilon = tf.Variable(epsilon, trainable=False)
self._threshold = threshold
def __call__(self, action_values: tf.Tensor) -> tfd.Categorical:
legal_actions_mask = tf.where(
tf.math.less_equal(action_values, self._threshold),
tf.fill(tf.shape(action_values), 0.),
tf.fill(tf.shape(action_values), 1.))
# Dithering action distribution.
dither_probs = 1 / tf.reduce_sum(legal_actions_mask, axis=-1,
keepdims=True) * legal_actions_mask
masked_action_values = tf.where(tf.equal(legal_actions_mask, 1),
action_values,
tf.fill(tf.shape(action_values), -np.inf))
# Greedy action distribution, breaking ties uniformly at random.
max_value = tf.reduce_max(masked_action_values, axis=-1, keepdims=True)
greedy_probs = tf.cast(
tf.equal(action_values * legal_actions_mask, max_value),
action_values.dtype)
greedy_probs /= tf.reduce_sum(greedy_probs, axis=-1, keepdims=True)
# Epsilon-greedy action distribution.
probs = self._epsilon * dither_probs + (1 - self._epsilon) * greedy_probs
# Make the policy object.
policy = tfd.Categorical(probs=probs)
return policy
|
acme-master
|
acme/tf/networks/legal_actions.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sphinx configuration.
"""
project = 'Acme'
author = 'DeepMind Technologies Limited'
copyright = '2018, DeepMind Technologies Limited' # pylint: disable=redefined-builtin
version = ''
release = ''
master_doc = 'index'
extensions = [
'myst_parser'
]
html_theme = 'sphinx_rtd_theme'
html_logo = 'imgs/acme.png'
html_theme_options = {
'logo_only': True,
}
html_css_files = [
'custom.css',
]
templates_path = []
html_static_path = ['_static']
exclude_patterns = ['_build', 'requirements.txt']
|
acme-master
|
docs/conf.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for multigrid environment."""
import functools
from typing import Any, Dict, NamedTuple, Sequence
from acme import specs
from acme.agents.jax import ppo
from acme.agents.jax.multiagent.decentralized import factories
from acme.jax import networks as networks_lib
from acme.jax import utils as acme_jax_utils
from acme.multiagent import types as ma_types
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow_probability
tfp = tensorflow_probability.substrates.jax
tfd = tfp.distributions
class CategoricalParams(NamedTuple):
"""Parameters for a categorical distribution."""
logits: jnp.ndarray
def multigrid_obs_preproc(obs: Dict[str, Any],
conv_filters: int = 8,
conv_kernel: int = 3,
scalar_fc: int = 5,
scalar_name: str = 'direction',
scalar_dim: int = 4) -> jnp.ndarray:
"""Conducts preprocessing on 'multigrid' environment dict observations.
The preprocessing applied here is similar to those in:
https://github.com/google-research/google-research/blob/master/social_rl/multiagent_tfagents/multigrid_networks.py
Args:
obs: multigrid observation dict, which can include observation inputs such
as 'image', 'position', and a custom additional observation (defined by
scalar_name).
conv_filters: Number of convolution filters.
conv_kernel: Size of the convolution kernel.
scalar_fc: Number of neurons in the fully connected layer processing the
scalar input.
scalar_name: a special observation key, which is set to
`direction` in most multigrid environments (and can be overridden here if
otherwise).
scalar_dim: Highest possible value for the scalar input. Used to convert to
one-hot representation.
Returns:
out: output observation.
"""
def _cast_and_scale(x, scale_by=10.0):
if isinstance(x, jnp.ndarray):
x = x.astype(jnp.float32)
return x / scale_by
outputs = []
if 'image' in obs.keys():
image_preproc = hk.Sequential([
_cast_and_scale,
hk.Conv2D(output_channels=conv_filters, kernel_shape=conv_kernel),
jax.nn.relu,
hk.Flatten()
])
outputs.append(image_preproc(obs['image']))
if 'position' in obs.keys():
position_preproc = hk.Sequential([_cast_and_scale, hk.Linear(scalar_fc)])
outputs.append(position_preproc(obs['position']))
if scalar_name in obs.keys():
direction_preproc = hk.Sequential([
functools.partial(jax.nn.one_hot, num_classes=scalar_dim),
hk.Flatten(),
hk.Linear(scalar_fc)
])
outputs.append(direction_preproc(obs[scalar_name]))
out = jnp.concatenate(outputs, axis=-1)
return out
def make_multigrid_dqn_networks(
environment_spec: specs.EnvironmentSpec) -> networks_lib.FeedForwardNetwork:
"""Returns DQN networks used by the agent in the multigrid environment."""
# Check that multigrid environment is defined with discrete actions, 0-indexed
assert np.issubdtype(environment_spec.actions.dtype, np.integer), (
'Expected multigrid environment to have discrete actions with int dtype'
f' but environment_spec.actions.dtype == {environment_spec.actions.dtype}'
)
assert environment_spec.actions.minimum == 0, (
'Expected multigrid environment to have 0-indexed action indices, but'
f' environment_spec.actions.minimum == {environment_spec.actions.minimum}'
)
num_actions = environment_spec.actions.maximum + 1
def network(inputs):
model = hk.Sequential([
hk.Flatten(),
hk.nets.MLP([50, 50, num_actions]),
])
processed_inputs = multigrid_obs_preproc(inputs)
return model(processed_inputs)
network_hk = hk.without_apply_rng(hk.transform(network))
dummy_obs = acme_jax_utils.add_batch_dim(
acme_jax_utils.zeros_like(environment_spec.observations))
return networks_lib.FeedForwardNetwork(
init=lambda rng: network_hk.init(rng, dummy_obs), apply=network_hk.apply)
def make_multigrid_ppo_networks(
environment_spec: specs.EnvironmentSpec,
hidden_layer_sizes: Sequence[int] = (64, 64),
) -> ppo.PPONetworks:
"""Returns PPO networks used by the agent in the multigrid environments."""
# Check that multigrid environment is defined with discrete actions, 0-indexed
assert np.issubdtype(environment_spec.actions.dtype, np.integer), (
'Expected multigrid environment to have discrete actions with int dtype'
f' but environment_spec.actions.dtype == {environment_spec.actions.dtype}'
)
assert environment_spec.actions.minimum == 0, (
'Expected multigrid environment to have 0-indexed action indices, but'
f' environment_spec.actions.minimum == {environment_spec.actions.minimum}'
)
num_actions = environment_spec.actions.maximum + 1
def forward_fn(inputs):
processed_inputs = multigrid_obs_preproc(inputs)
trunk = hk.nets.MLP(hidden_layer_sizes, activation=jnp.tanh)
h = trunk(processed_inputs)
logits = hk.Linear(num_actions)(h)
values = hk.Linear(1)(h)
values = jnp.squeeze(values, axis=-1)
return (CategoricalParams(logits=logits), values)
# Transform into pure functions.
forward_fn = hk.without_apply_rng(hk.transform(forward_fn))
dummy_obs = acme_jax_utils.zeros_like(environment_spec.observations)
dummy_obs = acme_jax_utils.add_batch_dim(dummy_obs) # Dummy 'sequence' dim.
network = networks_lib.FeedForwardNetwork(
lambda rng: forward_fn.init(rng, dummy_obs), forward_fn.apply)
return make_categorical_ppo_networks(network) # pylint:disable=undefined-variable
def make_categorical_ppo_networks(
network: networks_lib.FeedForwardNetwork) -> ppo.PPONetworks:
"""Constructs a PPONetworks for Categorical Policy from FeedForwardNetwork.
Args:
network: a transformed Haiku network (or equivalent in other libraries) that
takes in observations and returns the action distribution and value.
Returns:
A PPONetworks instance with pure functions wrapping the input network.
"""
def log_prob(params: CategoricalParams, action):
return tfd.Categorical(logits=params.logits).log_prob(action)
def entropy(params: CategoricalParams, key: networks_lib.PRNGKey):
del key
return tfd.Categorical(logits=params.logits).entropy()
def sample(params: CategoricalParams, key: networks_lib.PRNGKey):
return tfd.Categorical(logits=params.logits).sample(seed=key)
def sample_eval(params: CategoricalParams, key: networks_lib.PRNGKey):
del key
return tfd.Categorical(logits=params.logits).mode()
return ppo.PPONetworks(
network=network,
log_prob=log_prob,
entropy=entropy,
sample=sample,
sample_eval=sample_eval)
def init_default_multigrid_network(
agent_type: str,
agent_spec: specs.EnvironmentSpec) -> ma_types.Networks:
"""Returns default networks for multigrid environment."""
if agent_type == factories.DefaultSupportedAgent.PPO:
return make_multigrid_ppo_networks(agent_spec)
else:
raise ValueError(f'Unsupported agent type: {agent_type}.')
|
acme-master
|
examples/multiagent/multigrid/helpers.py
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiagent multigrid training run example."""
from typing import Callable, Dict
from absl import flags
from acme import specs
from acme.agents.jax.multiagent import decentralized
from absl import app
import helpers
from acme.jax import experiments
from acme.jax import types as jax_types
from acme.multiagent import types as ma_types
from acme.utils import lp_utils
from acme.wrappers import multigrid_wrapper
import dm_env
import launchpad as lp
FLAGS = flags.FLAGS
_RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
_NUM_STEPS = flags.DEFINE_integer('num_steps', 10000,
'Number of env steps to run training for.')
_EVAL_EVERY = flags.DEFINE_integer('eval_every', 1000,
'How often to run evaluation.')
_ENV_NAME = flags.DEFINE_string('env_name', 'MultiGrid-Empty-5x5-v0',
'What environment to run.')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 64, 'Batch size.')
_SEED = flags.DEFINE_integer('seed', 0, 'Random seed.')
def _make_environment_factory(env_name: str) -> jax_types.EnvironmentFactory:
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return multigrid_wrapper.make_multigrid_environment(env_name)
return environment_factory
def _make_network_factory(
agent_types: Dict[ma_types.AgentID, ma_types.GenericAgent]
) -> Callable[[specs.EnvironmentSpec], ma_types.MultiAgentNetworks]:
def environment_factory(
environment_spec: specs.EnvironmentSpec) -> ma_types.MultiAgentNetworks:
return decentralized.network_factory(environment_spec, agent_types,
helpers.init_default_multigrid_network)
return environment_factory
def build_experiment_config() -> experiments.ExperimentConfig[
ma_types.MultiAgentNetworks, ma_types.MultiAgentPolicyNetworks,
ma_types.MultiAgentSample]:
"""Returns a config for multigrid experiments."""
environment_factory = _make_environment_factory(_ENV_NAME.value)
environment = environment_factory(_SEED.value)
agent_types = {
str(i): decentralized.DefaultSupportedAgent.PPO
for i in range(environment.num_agents) # pytype: disable=attribute-error
}
# Example of how to set custom sub-agent configurations.
ppo_configs = {'unroll_length': 16, 'num_minibatches': 32, 'num_epochs': 10}
config_overrides = {
k: ppo_configs for k, v in agent_types.items() if v == 'ppo'
}
configs = decentralized.default_config_factory(agent_types, _BATCH_SIZE.value,
config_overrides)
builder = decentralized.DecentralizedMultiAgentBuilder(
agent_types=agent_types, agent_configs=configs)
return experiments.ExperimentConfig(
builder=builder,
environment_factory=environment_factory,
network_factory=_make_network_factory(agent_types=agent_types),
seed=_SEED.value,
max_num_actor_steps=_NUM_STEPS.value)
def main(_):
config = build_experiment_config()
if _RUN_DISTRIBUTED.value:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config, eval_every=_EVAL_EVERY.value, num_eval_episodes=5)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/multiagent/multigrid/run_multigrid.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running DQN on OpenSpiel game in a single process."""
from absl import app
from absl import flags
import acme
from acme import wrappers
from acme.agents.tf import dqn
from acme.environment_loops import open_spiel_environment_loop
from acme.tf.networks import legal_actions
from acme.wrappers import open_spiel_wrapper
import sonnet as snt
from open_spiel.python import rl_environment
flags.DEFINE_string('game', 'tic_tac_toe', 'Name of the game')
flags.DEFINE_integer('num_players', None, 'Number of players')
FLAGS = flags.FLAGS
def main(_):
# Create an environment and grab the spec.
env_configs = {'players': FLAGS.num_players} if FLAGS.num_players else {}
raw_environment = rl_environment.Environment(FLAGS.game, **env_configs)
environment = open_spiel_wrapper.OpenSpielWrapper(raw_environment)
environment = wrappers.SinglePrecisionWrapper(environment) # type: open_spiel_wrapper.OpenSpielWrapper # pytype: disable=annotation-type-mismatch
environment_spec = acme.make_environment_spec(environment)
# Build the networks.
networks = []
policy_networks = []
for _ in range(environment.num_players):
network = legal_actions.MaskedSequential([
snt.Flatten(),
snt.nets.MLP([50, 50, environment_spec.actions.num_values])
])
policy_network = snt.Sequential(
[network,
legal_actions.EpsilonGreedy(epsilon=0.1, threshold=-1e8)])
networks.append(network)
policy_networks.append(policy_network)
# Construct the agents.
agents = []
for network, policy_network in zip(networks, policy_networks):
agents.append(
dqn.DQN(environment_spec=environment_spec,
network=network,
policy_network=policy_network))
# Run the environment loop.
loop = open_spiel_environment_loop.OpenSpielEnvironmentLoop(
environment, agents)
loop.run(num_episodes=100000)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/open_spiel/run_dqn.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running MCTS on BSuite in a single process."""
from typing import Tuple
from absl import app
from absl import flags
import acme
from acme import specs
from acme import wrappers
from acme.agents.tf import mcts
from acme.agents.tf.mcts import models
from acme.agents.tf.mcts.models import mlp
from acme.agents.tf.mcts.models import simulator
from acme.tf import networks
import bsuite
from bsuite.logging import csv_logging
import dm_env
import sonnet as snt
# Bsuite flags
flags.DEFINE_string('bsuite_id', 'deep_sea/0', 'Bsuite id.')
flags.DEFINE_string('results_dir', '/tmp/bsuite', 'CSV results directory.')
flags.DEFINE_boolean('overwrite', False, 'Whether to overwrite csv results.')
# Agent flags
flags.DEFINE_boolean('simulator', True, 'Simulator or learned model?')
FLAGS = flags.FLAGS
def make_env_and_model(
bsuite_id: str,
results_dir: str,
overwrite: bool) -> Tuple[dm_env.Environment, models.Model]:
"""Create environment and corresponding model (learned or simulator)."""
raw_env = bsuite.load_from_id(bsuite_id)
if FLAGS.simulator:
model = simulator.Simulator(raw_env) # pytype: disable=attribute-error
else:
model = mlp.MLPModel(
specs.make_environment_spec(raw_env),
replay_capacity=1000,
batch_size=16,
hidden_sizes=(50,),
)
environment = csv_logging.wrap_environment(
raw_env, bsuite_id, results_dir, overwrite)
environment = wrappers.SinglePrecisionWrapper(environment)
return environment, model
def make_network(action_spec: specs.DiscreteArray) -> snt.Module:
return snt.Sequential([
snt.Flatten(),
snt.nets.MLP([50, 50]),
networks.PolicyValueHead(action_spec.num_values),
])
def main(_):
# Create an environment and environment model.
environment, model = make_env_and_model(
bsuite_id=FLAGS.bsuite_id,
results_dir=FLAGS.results_dir,
overwrite=FLAGS.overwrite,
)
environment_spec = specs.make_environment_spec(environment)
# Create the network and optimizer.
network = make_network(environment_spec.actions)
optimizer = snt.optimizers.Adam(learning_rate=1e-3)
# Construct the agent.
agent = mcts.MCTS(
environment_spec=environment_spec,
model=model,
network=network,
optimizer=optimizer,
discount=0.99,
replay_capacity=10000,
n_step=1,
batch_size=16,
num_simulations=50,
)
# Run the environment loop.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=environment.bsuite_num_episodes) # pytype: disable=attribute-error
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/bsuite/run_mcts.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs IMPALA on bsuite locally."""
from absl import app
from absl import flags
import acme
from acme import specs
from acme import wrappers
from acme.agents.tf import impala
from acme.tf import networks
import bsuite
import sonnet as snt
# Bsuite flags
flags.DEFINE_string('bsuite_id', 'deep_sea/0', 'Bsuite id.')
flags.DEFINE_string('results_dir', '/tmp/bsuite', 'CSV results directory.')
flags.DEFINE_boolean('overwrite', False, 'Whether to overwrite csv results.')
FLAGS = flags.FLAGS
def make_network(action_spec: specs.DiscreteArray) -> snt.RNNCore:
return snt.DeepRNN([
snt.Flatten(),
snt.nets.MLP([50, 50]),
snt.LSTM(20),
networks.PolicyValueHead(action_spec.num_values),
])
def main(_):
# Create an environment and grab the spec.
raw_environment = bsuite.load_and_record_to_csv(
bsuite_id=FLAGS.bsuite_id,
results_dir=FLAGS.results_dir,
overwrite=FLAGS.overwrite,
)
environment = wrappers.SinglePrecisionWrapper(raw_environment)
environment_spec = specs.make_environment_spec(environment)
# Create the networks to optimize.
network = make_network(environment_spec.actions)
agent = impala.IMPALA(
environment_spec=environment_spec,
network=network,
sequence_length=3,
sequence_period=3,
)
# Run the environment loop.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=environment.bsuite_num_episodes) # pytype: disable=attribute-error
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/bsuite/run_impala.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running DQN on BSuite in a single process."""
from absl import app
from absl import flags
import acme
from acme import specs
from acme import wrappers
from acme.agents.tf import dqn
import bsuite
import sonnet as snt
# Bsuite flags
flags.DEFINE_string('bsuite_id', 'deep_sea/0', 'Bsuite id.')
flags.DEFINE_string('results_dir', '/tmp/bsuite', 'CSV results directory.')
flags.DEFINE_boolean('overwrite', False, 'Whether to overwrite csv results.')
FLAGS = flags.FLAGS
def main(_):
# Create an environment and grab the spec.
raw_environment = bsuite.load_and_record_to_csv(
bsuite_id=FLAGS.bsuite_id,
results_dir=FLAGS.results_dir,
overwrite=FLAGS.overwrite,
)
environment = wrappers.SinglePrecisionWrapper(raw_environment)
environment_spec = specs.make_environment_spec(environment)
network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP([50, 50, environment_spec.actions.num_values])
])
# Construct the agent.
agent = dqn.DQN(
environment_spec=environment_spec, network=network)
# Run the environment loop.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=environment.bsuite_num_episodes) # pytype: disable=attribute-error
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/bsuite/run_dqn.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running R2D2 on discrete control tasks."""
from absl import flags
from acme.agents.jax import r2d2
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import dm_env
import launchpad as lp
# Flags which modify the behavior of the launcher.
flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
flags.DEFINE_string('env_name', 'Pong', 'What environment to run.')
flags.DEFINE_integer('seed', 0, 'Random seed (experiment).')
flags.DEFINE_integer('num_steps', 1_000_000,
'Number of environment steps to run for.')
FLAGS = flags.FLAGS
def build_experiment_config():
"""Builds R2D2 experiment config which can be executed in different ways."""
batch_size = 32
# The env_name must be dereferenced outside the environment factory as FLAGS
# cannot be pickled and pickling is necessary when launching distributed
# experiments via Launchpad.
env_name = FLAGS.env_name
# Create an environment factory.
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return helpers.make_atari_environment(
level=env_name,
sticky_actions=True,
zero_discount_on_life_loss=False,
oar_wrapper=True,
num_stacked_frames=1,
flatten_frame_stack=True,
grayscaling=False)
# Configure the agent.
config = r2d2.R2D2Config(
burn_in_length=8,
trace_length=40,
sequence_period=20,
min_replay_size=10_000,
batch_size=batch_size,
prefetch_size=1,
samples_per_insert=1.0,
evaluation_epsilon=1e-3,
learning_rate=1e-4,
target_update_period=1200,
variable_update_period=100,
)
return experiments.ExperimentConfig(
builder=r2d2.R2D2Builder(config),
network_factory=r2d2.make_atari_networks,
environment_factory=environment_factory,
seed=FLAGS.seed,
max_num_actor_steps=FLAGS.num_steps)
def main(_):
config = build_experiment_config()
if FLAGS.run_distributed:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4 if lp_utils.is_local_run() else 80)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(experiment=config)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_discrete/run_r2d2.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running MuZero on discrete control tasks."""
import datetime
import math
from absl import flags
from acme import specs
from acme.agents.jax import muzero
import helpers
from absl import app
from acme.jax import experiments
from acme.jax import inference_server as inference_server_lib
from acme.utils import lp_utils
import dm_env
import launchpad as lp
ENV_NAME = flags.DEFINE_string('env_name', 'Pong', 'What environment to run')
SEED = flags.DEFINE_integer('seed', 0, 'Random seed.')
NUM_STEPS = flags.DEFINE_integer(
'num_steps', 2_000_000, 'Number of env steps to run.'
)
NUM_LEARNERS = flags.DEFINE_integer('num_learners', 1, 'Number of learners.')
NUM_ACTORS = flags.DEFINE_integer('num_actors', 4, 'Number of actors.')
NUM_ACTORS_PER_NODE = flags.DEFINE_integer(
'num_actors_per_node',
2,
'Number of colocated actors',
)
RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.',)
def build_experiment_config() -> experiments.ExperimentConfig:
"""Builds DQN experiment config which can be executed in different ways."""
env_name = ENV_NAME.value
muzero_config = muzero.MZConfig()
def env_factory(seed: int) -> dm_env.Environment:
del seed
return helpers.make_atari_environment(
level=env_name,
sticky_actions=True,
zero_discount_on_life_loss=True,
num_stacked_frames=1,
grayscaling=False,
to_float=False,
)
def network_factory(
spec: specs.EnvironmentSpec,
) -> muzero.MzNetworks:
return muzero.make_network(
spec,
stack_size=muzero_config.stack_size,
)
# Construct the builder.
env_spec = specs.make_environment_spec(env_factory(SEED.value))
extra_spec = {
muzero.POLICY_PROBS_KEY: specs.Array(
shape=(env_spec.actions.num_values,), dtype='float32'
),
muzero.RAW_VALUES_KEY: specs.Array(shape=(), dtype='float32'),
}
muzero_builder = muzero.MzBuilder( # pytype: disable=wrong-arg-types # jax-ndarray
muzero_config,
extra_spec,
)
checkpointing_config = experiments.CheckpointingConfig(
replay_checkpointing_time_delta_minutes=20,
time_delta_minutes=1,
)
return experiments.ExperimentConfig(
builder=muzero_builder,
environment_factory=env_factory,
network_factory=network_factory,
seed=SEED.value,
max_num_actor_steps=NUM_STEPS.value,
checkpointing=checkpointing_config,
)
def main(_):
experiment_config = build_experiment_config()
if not RUN_DISTRIBUTED.value:
raise NotImplementedError('Single threaded experiment not supported.')
inference_server_config = inference_server_lib.InferenceServerConfig(
batch_size=64,
update_period=400,
timeout=datetime.timedelta(
seconds=1,
),
)
num_inference_servers = math.ceil(
NUM_ACTORS.value / (128 * NUM_ACTORS_PER_NODE.value),
)
program = experiments.make_distributed_experiment(
experiment=experiment_config,
num_actors=NUM_ACTORS.value,
num_learner_nodes=NUM_LEARNERS.value,
num_actors_per_node=NUM_ACTORS_PER_NODE.value,
num_inference_servers=num_inference_servers,
inference_server_config=inference_server_config,
)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program,),)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_discrete/run_muzero.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running Munchausen-DQN on discrete control tasks."""
from absl import flags
from acme.agents.jax import dqn
from acme.agents.jax.dqn import losses
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
ENV_NAME = flags.DEFINE_string('env_name', 'Pong', 'What environment to run')
SEED = flags.DEFINE_integer('seed', 0, 'Random seed.')
NUM_STEPS = flags.DEFINE_integer('num_steps', 1_000_000,
'Number of env steps to run.')
def build_experiment_config():
"""Builds MDQN experiment config which can be executed in different ways."""
# Create an environment, grab the spec, and use it to create networks.
env_name = ENV_NAME.value
def env_factory(seed):
del seed
return helpers.make_atari_environment(
level=env_name, sticky_actions=True, zero_discount_on_life_loss=False)
# Construct the agent.
config = dqn.DQNConfig(
discount=0.99,
eval_epsilon=0.,
learning_rate=5e-5,
n_step=1,
epsilon=0.01,
target_update_period=2000,
min_replay_size=20_000,
max_replay_size=1_000_000,
samples_per_insert=8,
batch_size=32)
loss_fn = losses.MunchausenQLearning(
discount=config.discount, max_abs_reward=1., huber_loss_parameter=1.,
entropy_temperature=0.03, munchausen_coefficient=0.9)
dqn_builder = dqn.DQNBuilder(config, loss_fn=loss_fn)
return experiments.ExperimentConfig(
builder=dqn_builder,
environment_factory=env_factory,
network_factory=helpers.make_dqn_atari_network,
seed=SEED.value,
max_num_actor_steps=NUM_STEPS.value)
def main(_):
experiment_config = build_experiment_config()
if RUN_DISTRIBUTED.value:
program = experiments.make_distributed_experiment(
experiment=experiment_config,
num_actors=4 if lp_utils.is_local_run() else 128)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(experiment_config)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_discrete/run_mdqn.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running IMPALA on discrete control tasks."""
from absl import flags
from acme.agents.jax import impala
from acme.agents.jax.impala import builder as impala_builder
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
import optax
# Flags which modify the behavior of the launcher.
RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
ENV_NAME = flags.DEFINE_string('env_name', 'Pong', 'What environment to run.')
SEED = flags.DEFINE_integer('seed', 0, 'Random seed (experiment).')
NUM_ACTOR_STEPS = flags.DEFINE_integer(
'num_steps', 1_000_000,
'Number of environment steps to run the agent for.')
_BATCH_SIZE = 32
_SEQUENCE_LENGTH = _SEQUENCE_PERIOD = 20 # Avoids overlapping sequences.
def build_experiment_config():
"""Builds IMPALA experiment config which can be executed in different ways."""
# Create an environment, grab the spec, and use it to create networks.
env_name = ENV_NAME.value
def env_factory(seed):
del seed
return helpers.make_atari_environment(
level=env_name,
sticky_actions=True,
zero_discount_on_life_loss=False,
oar_wrapper=True)
# Construct the agent.
num_learner_steps = NUM_ACTOR_STEPS.value // (_SEQUENCE_PERIOD * _BATCH_SIZE)
lr_schedule = optax.linear_schedule(2e-4, 0., num_learner_steps)
config = impala.IMPALAConfig(
batch_size=_BATCH_SIZE,
sequence_length=_SEQUENCE_LENGTH,
sequence_period=_SEQUENCE_PERIOD,
learning_rate=lr_schedule,
entropy_cost=5e-3,
max_abs_reward=1.,
)
return experiments.ExperimentConfig(
builder=impala_builder.IMPALABuilder(config),
environment_factory=env_factory,
network_factory=impala.make_atari_networks,
seed=SEED.value,
max_num_actor_steps=NUM_ACTOR_STEPS.value)
def main(_):
experiment_config = build_experiment_config()
if RUN_DISTRIBUTED.value:
program = experiments.make_distributed_experiment(
experiment=experiment_config,
num_actors=4 if lp_utils.is_local_run() else 256)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(experiment_config)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_discrete/run_impala.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running DQN on discrete control tasks."""
from absl import flags
from acme.agents.jax import dqn
from acme.agents.jax.dqn import losses
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
ENV_NAME = flags.DEFINE_string('env_name', 'Pong', 'What environment to run')
SEED = flags.DEFINE_integer('seed', 0, 'Random seed.')
NUM_STEPS = flags.DEFINE_integer('num_steps', 1_000_000,
'Number of env steps to run.')
def build_experiment_config():
"""Builds DQN experiment config which can be executed in different ways."""
# Create an environment, grab the spec, and use it to create networks.
env_name = ENV_NAME.value
def env_factory(seed):
del seed
return helpers.make_atari_environment(
level=env_name, sticky_actions=True, zero_discount_on_life_loss=False)
# Construct the agent.
config = dqn.DQNConfig(
discount=0.99,
eval_epsilon=0.,
learning_rate=5e-5,
n_step=1,
epsilon=0.01,
target_update_period=2000,
min_replay_size=20_000,
max_replay_size=1_000_000,
samples_per_insert=8,
batch_size=32)
loss_fn = losses.QLearning(
discount=config.discount, max_abs_reward=1.)
dqn_builder = dqn.DQNBuilder(config, loss_fn=loss_fn)
return experiments.ExperimentConfig(
builder=dqn_builder,
environment_factory=env_factory,
network_factory=helpers.make_dqn_atari_network,
seed=SEED.value,
max_num_actor_steps=NUM_STEPS.value)
def main(_):
experiment_config = build_experiment_config()
if RUN_DISTRIBUTED.value:
program = experiments.make_distributed_experiment(
experiment=experiment_config,
num_actors=4 if lp_utils.is_local_run() else 128)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(experiment_config)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_discrete/run_dqn.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared helpers for different discrete RL experiment flavours."""
import functools
import os
from typing import Tuple
from absl import flags
from acme import specs
from acme import wrappers
from acme.agents.jax import dqn
from acme.jax import networks as networks_lib
from acme.jax import utils
import atari_py # pylint:disable=unused-import
import dm_env
import gym
import haiku as hk
import jax.numpy as jnp
FLAGS = flags.FLAGS
def make_atari_environment(
level: str = 'Pong',
sticky_actions: bool = True,
zero_discount_on_life_loss: bool = False,
oar_wrapper: bool = False,
num_stacked_frames: int = 4,
flatten_frame_stack: bool = False,
grayscaling: bool = True,
to_float: bool = True,
scale_dims: Tuple[int, int] = (84, 84),
) -> dm_env.Environment:
"""Loads the Atari environment."""
# Internal logic.
version = 'v0' if sticky_actions else 'v4'
level_name = f'{level}NoFrameskip-{version}'
env = gym.make(level_name, full_action_space=True)
wrapper_list = [
wrappers.GymAtariAdapter,
functools.partial(
wrappers.AtariWrapper,
scale_dims=scale_dims,
to_float=to_float,
max_episode_len=108_000,
num_stacked_frames=num_stacked_frames,
flatten_frame_stack=flatten_frame_stack,
grayscaling=grayscaling,
zero_discount_on_life_loss=zero_discount_on_life_loss,
),
wrappers.SinglePrecisionWrapper,
]
if oar_wrapper:
# E.g. IMPALA and R2D2 use this particular variant.
wrapper_list.append(wrappers.ObservationActionRewardWrapper)
return wrappers.wrap_all(env, wrapper_list)
def make_dqn_atari_network(
environment_spec: specs.EnvironmentSpec) -> dqn.DQNNetworks:
"""Creates networks for training DQN on Atari."""
def network(inputs):
model = hk.Sequential([
networks_lib.AtariTorso(),
hk.nets.MLP([512, environment_spec.actions.num_values]),
])
return model(inputs)
network_hk = hk.without_apply_rng(hk.transform(network))
obs = utils.add_batch_dim(utils.zeros_like(environment_spec.observations))
network = networks_lib.FeedForwardNetwork(
init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply)
typed_network = networks_lib.non_stochastic_network_to_typed(network)
return dqn.DQNNetworks(policy_network=typed_network)
def make_distributional_dqn_atari_network(
environment_spec: specs.EnvironmentSpec,
num_quantiles: int) -> dqn.DQNNetworks:
"""Creates networks for training Distributional DQN on Atari."""
def network(inputs):
model = hk.Sequential([
networks_lib.AtariTorso(),
hk.nets.MLP([512, environment_spec.actions.num_values * num_quantiles]),
])
q_dist = model(inputs).reshape(-1, environment_spec.actions.num_values,
num_quantiles)
q_values = jnp.mean(q_dist, axis=-1)
return q_values, q_dist
network_hk = hk.without_apply_rng(hk.transform(network))
obs = utils.add_batch_dim(utils.zeros_like(environment_spec.observations))
network = networks_lib.FeedForwardNetwork(
init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply)
typed_network = networks_lib.non_stochastic_network_to_typed(network)
return dqn.DQNNetworks(policy_network=typed_network)
|
acme-master
|
examples/baselines/rl_discrete/helpers.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running QR-DQN on discrete control tasks."""
from absl import flags
from acme import specs
from acme.agents.jax import dqn
from acme.agents.jax.dqn import losses
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
ENV_NAME = flags.DEFINE_string('env_name', 'Pong', 'What environment to run')
SEED = flags.DEFINE_integer('seed', 0, 'Random seed.')
NUM_STEPS = flags.DEFINE_integer('num_steps', 1_000_000,
'Number of env steps to run.')
NUM_QUANTILES = flags.DEFINE_integer('num_quantiles', 200,
'Number of bins to use.')
def build_experiment_config():
"""Builds QR-DQN experiment config which can be executed in different ways."""
# Create an environment, grab the spec, and use it to create networks.
env_name = ENV_NAME.value
def env_factory(seed):
del seed
return helpers.make_atari_environment(
level=env_name, sticky_actions=True, zero_discount_on_life_loss=False)
num_quantiles = NUM_QUANTILES.value
def network_factory(environment_spec: specs.EnvironmentSpec):
return helpers.make_distributional_dqn_atari_network(
environment_spec=environment_spec, num_quantiles=num_quantiles)
# Construct the agent.
config = dqn.DQNConfig(
discount=0.99,
eval_epsilon=0.,
learning_rate=5e-5,
n_step=3,
epsilon=0.01 / 32,
target_update_period=2000,
min_replay_size=20_000,
max_replay_size=1_000_000,
samples_per_insert=8,
batch_size=32)
loss_fn = losses.QrDqn(num_atoms=NUM_QUANTILES.value, huber_param=1.)
dqn_builder = dqn.DistributionalDQNBuilder(config, loss_fn=loss_fn)
return experiments.ExperimentConfig(
builder=dqn_builder,
environment_factory=env_factory,
network_factory=network_factory,
seed=SEED.value,
max_num_actor_steps=NUM_STEPS.value)
def main(_):
experiment_config = build_experiment_config()
if RUN_DISTRIBUTED.value:
program = experiments.make_distributed_experiment(
experiment=experiment_config,
num_actors=4 if lp_utils.is_local_run() else 16)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(experiment_config)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_discrete/run_qr_dqn.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running D4PG on continuous control tasks."""
from absl import flags
from acme.agents.jax import d4pg
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
flags.DEFINE_string('env_name', 'gym:HalfCheetah-v2', 'What environment to run')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_integer('num_steps', 1_000_000, 'Number of env steps to run.')
flags.DEFINE_integer('eval_every', 50_000, 'How often to run evaluation.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
def build_experiment_config():
"""Builds D4PG experiment config which can be executed in different ways."""
# Create an environment, grab the spec, and use it to create networks.
suite, task = FLAGS.env_name.split(':', 1)
# Bound of the distributional critic. The reward for control environments is
# normalized, not for gym locomotion environments hence the different scales.
vmax_values = {
'gym': 1000.,
'control': 150.,
}
vmax = vmax_values[suite]
def network_factory(spec) -> d4pg.D4PGNetworks:
return d4pg.make_networks(
spec,
policy_layer_sizes=(256, 256, 256),
critic_layer_sizes=(256, 256, 256),
vmin=-vmax,
vmax=vmax,
)
# Configure the agent.
d4pg_config = d4pg.D4PGConfig(learning_rate=3e-4, sigma=0.2)
return experiments.ExperimentConfig(
builder=d4pg.D4PGBuilder(d4pg_config),
environment_factory=lambda seed: helpers.make_environment(suite, task),
network_factory=network_factory,
seed=FLAGS.seed,
max_num_actor_steps=FLAGS.num_steps)
def main(_):
config = build_experiment_config()
if FLAGS.run_distributed:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=FLAGS.eval_every,
num_eval_episodes=FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_continuous/run_d4pg.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running SAC on continuous control tasks."""
from absl import flags
from acme import specs
from acme.agents.jax import normalization
from acme.agents.jax import sac
from acme.agents.jax.sac import builder
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
flags.DEFINE_string('env_name', 'gym:HalfCheetah-v2', 'What environment to run')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_integer('num_steps', 1_000_000, 'Number of env steps to run.')
flags.DEFINE_integer('eval_every', 50_000, 'How often to run evaluation.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
def build_experiment_config():
"""Builds SAC experiment config which can be executed in different ways."""
# Create an environment, grab the spec, and use it to create networks.
suite, task = FLAGS.env_name.split(':', 1)
environment = helpers.make_environment(suite, task)
environment_spec = specs.make_environment_spec(environment)
network_factory = (
lambda spec: sac.make_networks(spec, hidden_layer_sizes=(256, 256, 256)))
# Construct the agent.
config = sac.SACConfig(
learning_rate=3e-4,
n_step=2,
target_entropy=sac.target_entropy_from_env_spec(environment_spec),
input_normalization=normalization.NormalizationConfig())
sac_builder = builder.SACBuilder(config)
return experiments.ExperimentConfig(
builder=sac_builder,
environment_factory=lambda seed: helpers.make_environment(suite, task),
network_factory=network_factory,
seed=FLAGS.seed,
max_num_actor_steps=FLAGS.num_steps)
def main(_):
config = build_experiment_config()
if FLAGS.run_distributed:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=FLAGS.eval_every,
num_eval_episodes=FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_continuous/run_sac.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running Distributional MPO on continuous control tasks."""
from absl import flags
from acme import specs
from acme.agents.jax import mpo
from acme.agents.jax.mpo import types as mpo_types
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
ENV_NAME = flags.DEFINE_string(
'env_name', 'gym:HalfCheetah-v2',
'What environment to run on, in the format {gym|control}:{task}, '
'where "control" refers to the DM control suite. DM Control tasks are '
'further split into {domain_name}:{task_name}.')
SEED = flags.DEFINE_integer('seed', 0, 'Random seed.')
NUM_STEPS = flags.DEFINE_integer(
'num_steps', 1_000_000,
'Number of environment steps to run the experiment for.')
EVAL_EVERY = flags.DEFINE_integer(
'eval_every', 50_000,
'How often (in actor environment steps) to run evaluation episodes.')
EVAL_EPISODES = flags.DEFINE_integer(
'evaluation_episodes', 10,
'Number of evaluation episodes to run periodically.')
def build_experiment_config():
"""Builds MPO experiment config which can be executed in different ways."""
suite, task = ENV_NAME.value.split(':', 1)
critic_type = mpo.CriticType.CATEGORICAL
vmax_values = {
'gym': 1600.,
'control': 150.,
}
vmax = vmax_values[suite]
def network_factory(spec: specs.EnvironmentSpec) -> mpo.MPONetworks:
return mpo.make_control_networks(
spec,
policy_layer_sizes=(256, 256, 256),
critic_layer_sizes=(256, 256, 256),
policy_init_scale=0.5,
vmin=-vmax,
vmax=vmax,
critic_type=critic_type)
# Configure and construct the agent builder.
config = mpo.MPOConfig(
critic_type=critic_type,
policy_loss_config=mpo_types.GaussianPolicyLossConfig(epsilon_mean=0.01),
samples_per_insert=64,
learning_rate=3e-4,
experience_type=mpo_types.FromTransitions(n_step=4))
agent_builder = mpo.MPOBuilder(config, sgd_steps_per_learner_step=1)
return experiments.ExperimentConfig(
builder=agent_builder,
environment_factory=lambda _: helpers.make_environment(suite, task),
network_factory=network_factory,
seed=SEED.value,
max_num_actor_steps=NUM_STEPS.value)
def main(_):
config = build_experiment_config()
if RUN_DISTRIBUTED.value:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=EVAL_EVERY.value,
num_eval_episodes=EVAL_EPISODES.value)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_continuous/run_dmpo.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running PPO on continuous control tasks."""
from absl import flags
from acme.agents.jax import ppo
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
flags.DEFINE_string('env_name', 'gym:HalfCheetah-v2', 'What environment to run')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_integer('num_steps', 1_000_000, 'Number of env steps to run.')
flags.DEFINE_integer('eval_every', 50_000, 'How often to run evaluation.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
flags.DEFINE_integer('num_distributed_actors', 64,
'Number of actors to use in the distributed setting.')
def build_experiment_config():
"""Builds PPO experiment config which can be executed in different ways."""
# Create an environment, grab the spec, and use it to create networks.
suite, task = FLAGS.env_name.split(':', 1)
config = ppo.PPOConfig(
normalize_advantage=True,
normalize_value=True,
obs_normalization_fns_factory=ppo.build_mean_std_normalizer)
ppo_builder = ppo.PPOBuilder(config)
layer_sizes = (256, 256, 256)
return experiments.ExperimentConfig(
builder=ppo_builder,
environment_factory=lambda seed: helpers.make_environment(suite, task),
network_factory=lambda spec: ppo.make_networks(spec, layer_sizes),
seed=FLAGS.seed,
max_num_actor_steps=FLAGS.num_steps)
def main(_):
config = build_experiment_config()
if FLAGS.run_distributed:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=FLAGS.num_distributed_actors)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=FLAGS.eval_every,
num_eval_episodes=FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_continuous/run_ppo.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running SAC on continuous control tasks."""
from absl import flags
from acme.agents.jax import td3
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
flags.DEFINE_string('env_name', 'gym:HalfCheetah-v2', 'What environment to run')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_integer('num_steps', 1_000_000, 'Number of env steps to run.')
flags.DEFINE_integer('eval_every', 50_000, 'How often to run evaluation.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
def build_experiment_config():
"""Builds TD3 experiment config which can be executed in different ways."""
# Create an environment, grab the spec, and use it to create networks.
suite, task = FLAGS.env_name.split(':', 1)
network_factory = (
lambda spec: td3.make_networks(spec, hidden_layer_sizes=(256, 256, 256)))
# Construct the agent.
config = td3.TD3Config(
policy_learning_rate=3e-4,
critic_learning_rate=3e-4,
)
td3_builder = td3.TD3Builder(config)
# pylint:disable=g-long-lambda
return experiments.ExperimentConfig(
builder=td3_builder,
environment_factory=lambda seed: helpers.make_environment(suite, task),
network_factory=network_factory,
seed=FLAGS.seed,
max_num_actor_steps=FLAGS.num_steps)
# pylint:enable=g-long-lambda
def main(_):
config = build_experiment_config()
if FLAGS.run_distributed:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=FLAGS.eval_every,
num_eval_episodes=FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_continuous/run_td3.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running Mixture of Gaussian MPO on continuous control tasks."""
from absl import flags
from acme import specs
from acme.agents.jax import mpo
from acme.agents.jax.mpo import types as mpo_types
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
ENV_NAME = flags.DEFINE_string(
'env_name', 'gym:HalfCheetah-v2',
'What environment to run on, in the format {gym|control}:{task}, '
'where "control" refers to the DM control suite. DM Control tasks are '
'further split into {domain_name}:{task_name}.')
SEED = flags.DEFINE_integer('seed', 0, 'Random seed.')
NUM_STEPS = flags.DEFINE_integer(
'num_steps', 1_000_000,
'Number of environment steps to run the experiment for.')
EVAL_EVERY = flags.DEFINE_integer(
'eval_every', 50_000,
'How often (in actor environment steps) to run evaluation episodes.')
EVAL_EPISODES = flags.DEFINE_integer(
'evaluation_episodes', 10,
'Number of evaluation episodes to run periodically.')
def build_experiment_config():
"""Builds MPO experiment config which can be executed in different ways."""
suite, task = ENV_NAME.value.split(':', 1)
critic_type = mpo.CriticType.MIXTURE_OF_GAUSSIANS
def network_factory(spec: specs.EnvironmentSpec) -> mpo.MPONetworks:
return mpo.make_control_networks(
spec,
policy_layer_sizes=(256, 256, 256),
critic_layer_sizes=(256, 256, 256),
policy_init_scale=0.5,
critic_type=critic_type)
# Configure and construct the agent builder.
config = mpo.MPOConfig(
critic_type=critic_type,
policy_loss_config=mpo_types.GaussianPolicyLossConfig(epsilon_mean=0.01),
samples_per_insert=64,
learning_rate=3e-4,
experience_type=mpo_types.FromTransitions(n_step=4))
agent_builder = mpo.MPOBuilder(config, sgd_steps_per_learner_step=1)
return experiments.ExperimentConfig(
builder=agent_builder,
environment_factory=lambda _: helpers.make_environment(suite, task),
network_factory=network_factory,
seed=SEED.value,
max_num_actor_steps=NUM_STEPS.value)
def main(_):
config = build_experiment_config()
if RUN_DISTRIBUTED.value:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=EVAL_EVERY.value,
num_eval_episodes=EVAL_EPISODES.value)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_continuous/run_mogmpo.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared helpers for rl_continuous experiments."""
from acme import wrappers
import dm_env
import gym
_VALID_TASK_SUITES = ('gym', 'control')
def make_environment(suite: str, task: str) -> dm_env.Environment:
"""Makes the requested continuous control environment.
Args:
suite: One of 'gym' or 'control'.
task: Task to load. If `suite` is 'control', the task must be formatted as
f'{domain_name}:{task_name}'
Returns:
An environment satisfying the dm_env interface expected by Acme agents.
"""
if suite not in _VALID_TASK_SUITES:
raise ValueError(
f'Unsupported suite: {suite}. Expected one of {_VALID_TASK_SUITES}')
if suite == 'gym':
env = gym.make(task)
# Make sure the environment obeys the dm_env.Environment interface.
env = wrappers.GymWrapper(env)
elif suite == 'control':
# Load dm_suite lazily not require Mujoco license when not using it.
from dm_control import suite as dm_suite # pylint: disable=g-import-not-at-top
domain_name, task_name = task.split(':')
env = dm_suite.load(domain_name, task_name)
env = wrappers.ConcatObservationWrapper(env)
# Wrap the environment so the expected continuous action spec is [-1, 1].
# Note: this is a no-op on 'control' tasks.
env = wrappers.CanonicalSpecWrapper(env, clip=True)
env = wrappers.SinglePrecisionWrapper(env)
return env
|
acme-master
|
examples/baselines/rl_continuous/helpers.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running MPO on continuous control tasks."""
from absl import flags
from acme import specs
from acme.agents.jax import mpo
from acme.agents.jax.mpo import types as mpo_types
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import launchpad as lp
RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
ENV_NAME = flags.DEFINE_string(
'env_name', 'gym:HalfCheetah-v2',
'What environment to run on, in the format {gym|control}:{task}, '
'where "control" refers to the DM control suite. DM Control tasks are '
'further split into {domain_name}:{task_name}.')
SEED = flags.DEFINE_integer('seed', 0, 'Random seed.')
NUM_STEPS = flags.DEFINE_integer(
'num_steps', 1_000_000,
'Number of environment steps to run the experiment for.')
EVAL_EVERY = flags.DEFINE_integer(
'eval_every', 50_000,
'How often (in actor environment steps) to run evaluation episodes.')
EVAL_EPISODES = flags.DEFINE_integer(
'evaluation_episodes', 10,
'Number of evaluation episodes to run periodically.')
def build_experiment_config():
"""Builds MPO experiment config which can be executed in different ways."""
suite, task = ENV_NAME.value.split(':', 1)
critic_type = mpo.CriticType.NONDISTRIBUTIONAL
def network_factory(spec: specs.EnvironmentSpec) -> mpo.MPONetworks:
return mpo.make_control_networks(
spec,
policy_layer_sizes=(256, 256, 256),
critic_layer_sizes=(256, 256, 256),
policy_init_scale=0.5,
critic_type=critic_type)
# Configure and construct the agent builder.
config = mpo.MPOConfig(
critic_type=critic_type,
policy_loss_config=mpo_types.GaussianPolicyLossConfig(epsilon_mean=0.01),
samples_per_insert=64,
learning_rate=3e-4,
experience_type=mpo_types.FromTransitions(n_step=4))
agent_builder = mpo.MPOBuilder(config, sgd_steps_per_learner_step=1)
return experiments.ExperimentConfig(
builder=agent_builder,
environment_factory=lambda _: helpers.make_environment(suite, task),
network_factory=network_factory,
seed=SEED.value,
max_num_actor_steps=NUM_STEPS.value)
def main(_):
config = build_experiment_config()
if RUN_DISTRIBUTED.value:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=EVAL_EVERY.value,
num_eval_episodes=EVAL_EPISODES.value)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/rl_continuous/run_mpo.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running SQIL on continuous control tasks.
SQIL: Imitation Learning via Reinforcement Learning with Sparse Rewards
Reddy et al., 2019 https://arxiv.org/abs/1905.11108
"""
from absl import flags
from acme import specs
from acme.agents.jax import sac
from acme.agents.jax import sqil
from acme.datasets import tfds
import helpers
from absl import app
from acme.jax import experiments
from acme.utils import lp_utils
import dm_env
import jax
import launchpad as lp
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
flags.DEFINE_string('env_name', 'HalfCheetah-v2', 'What environment to run')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_integer('num_steps', 1_000_000, 'Number of env steps to run.')
flags.DEFINE_integer('eval_every', 50_000, 'Number of env steps to run.')
flags.DEFINE_integer('num_demonstrations', 11,
'Number of demonstration trajectories.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
def build_experiment_config() -> experiments.ExperimentConfig:
"""Returns a configuration for SQIL experiments."""
# Create an environment, grab the spec, and use it to create networks.
env_name = FLAGS.env_name
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return helpers.make_environment(task=env_name)
dummy_seed = 1
environment = environment_factory(dummy_seed)
environment_spec = specs.make_environment_spec(environment)
# Construct the agent.
sac_config = sac.SACConfig(
target_entropy=sac.target_entropy_from_env_spec(environment_spec),
min_replay_size=1,
samples_per_insert_tolerance_rate=2.0)
sac_builder = sac.SACBuilder(sac_config)
# Create demonstrations function.
dataset_name = helpers.get_dataset_name(FLAGS.env_name)
num_demonstrations = FLAGS.num_demonstrations
def make_demonstrations(batch_size: int, seed: int = 0):
transitions_iterator = tfds.get_tfds_dataset(
dataset_name, num_demonstrations, env_spec=environment_spec)
return tfds.JaxInMemoryRandomSampleIterator(
transitions_iterator, jax.random.PRNGKey(seed), batch_size)
sqil_builder = sqil.SQILBuilder(sac_builder, sac_config.batch_size,
make_demonstrations)
return experiments.ExperimentConfig(
builder=sqil_builder,
environment_factory=environment_factory,
network_factory=sac.make_networks,
seed=FLAGS.seed,
max_num_actor_steps=FLAGS.num_steps)
def main(_):
config = build_experiment_config()
if FLAGS.run_distributed:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=FLAGS.eval_every,
num_eval_episodes=FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/imitation/run_sqil.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running GAIL/DAC on continuous control tasks.
GAIL: Generative Adversarial Imitation Learning
Ho & Ermon, 2016 https://arxiv.org/abs/1606.03476
DAC: Discriminator Actor-Critic: Addressing Sample Inefficiency And Reward
Bias In Adversarial Imitation Learning
Kostrikov et al., 2018 https://arxiv.org/pdf/1809.02925.pdf
We use TD3 similarly to DAC and do not use the extra absorbing state described
in DAC and use a different reward that corresponds to GAIL.
The network structure and hyperparameters of the discriminator are the ones
defined in the following paper:
What Matters in Adversarial Reinforcement Learning, Orsini et al., 2021
https://arxiv.org/pdf/2106.00672.pdf.
The changes lead to an improved agent able to learn from a single demonstration
(even for Humanoid).
"""
from absl import flags
from acme import specs
from acme.agents.jax import ail
from acme.agents.jax import td3
from acme.datasets import tfds
import helpers
from absl import app
from acme.jax import experiments
from acme.jax import networks as networks_lib
from acme.utils import lp_utils
import dm_env
import haiku as hk
import jax
import launchpad as lp
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
flags.DEFINE_string('env_name', 'HalfCheetah-v2', 'What environment to run')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_integer('num_steps', 1_000_000, 'Number of env steps to run.')
flags.DEFINE_integer('eval_every', 50_000, 'Number of env steps to run.')
flags.DEFINE_integer('num_demonstrations', 11,
'Number of demonstration trajectories.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
def build_experiment_config() -> experiments.ExperimentConfig:
"""Returns a configuration for GAIL/DAC experiments."""
# Create an environment, grab the spec, and use it to create networks.
environment = helpers.make_environment(task=FLAGS.env_name)
environment_spec = specs.make_environment_spec(environment)
# Create the direct RL agent.
td3_config = td3.TD3Config(
min_replay_size=1,
samples_per_insert_tolerance_rate=2.0)
td3_networks = td3.make_networks(environment_spec)
# Create the discriminator.
def discriminator(*args, **kwargs) -> networks_lib.Logits:
return ail.DiscriminatorModule(
environment_spec=environment_spec,
use_action=True,
use_next_obs=False,
network_core=ail.DiscriminatorMLP(
hidden_layer_sizes=[64,],
spectral_normalization_lipschitz_coeff=1.)
)(*args, **kwargs)
discriminator_transformed = hk.without_apply_rng(
hk.transform_with_state(discriminator))
def network_factory(
environment_spec: specs.EnvironmentSpec) -> ail.AILNetworks:
return ail.AILNetworks(
ail.make_discriminator(environment_spec, discriminator_transformed),
# reward balance = 0 corresponds to the GAIL reward: -ln(1-D)
imitation_reward_fn=ail.rewards.gail_reward(reward_balance=0.),
direct_rl_networks=td3_networks)
# Create demonstrations function.
dataset_name = helpers.get_dataset_name(FLAGS.env_name)
num_demonstrations = FLAGS.num_demonstrations
def make_demonstrations(batch_size, seed: int = 0):
transitions_iterator = tfds.get_tfds_dataset(
dataset_name, num_demonstrations, env_spec=environment_spec)
return tfds.JaxInMemoryRandomSampleIterator(
transitions_iterator, jax.random.PRNGKey(seed), batch_size)
# Create DAC agent.
ail_config = ail.AILConfig(direct_rl_batch_size=td3_config.batch_size *
td3_config.num_sgd_steps_per_step)
env_name = FLAGS.env_name
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return helpers.make_environment(task=env_name)
td3_builder = td3.TD3Builder(td3_config)
dac_loss = ail.losses.add_gradient_penalty(
ail.losses.gail_loss(entropy_coefficient=1e-3),
gradient_penalty_coefficient=10.,
gradient_penalty_target=1.)
ail_builder = ail.AILBuilder(
rl_agent=td3_builder,
config=ail_config,
discriminator_loss=dac_loss,
make_demonstrations=make_demonstrations)
return experiments.ExperimentConfig(
builder=ail_builder,
environment_factory=environment_factory,
network_factory=network_factory,
seed=FLAGS.seed,
max_num_actor_steps=FLAGS.num_steps)
def main(_):
config = build_experiment_config()
if FLAGS.run_distributed:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=FLAGS.eval_every,
num_eval_episodes=FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/imitation/run_gail.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running PWIL on continuous control tasks.
The network structure and hyperparameters are the same as the one used in the
PWIL paper: https://arxiv.org/pdf/2006.04678.pdf.
"""
from typing import Sequence
from absl import flags
from acme import specs
from acme.agents.jax import d4pg
from acme.agents.jax import pwil
from acme.datasets import tfds
import helpers
from absl import app
from acme.jax import experiments
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.utils import lp_utils
import dm_env
import haiku as hk
import jax.numpy as jnp
import launchpad as lp
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
flags.DEFINE_string('env_name', 'HalfCheetah-v2', 'What environment to run')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_integer('num_steps', 1_000_000, 'Number of env steps to run.')
flags.DEFINE_integer('eval_every', 50_000, 'Number of env steps to run.')
flags.DEFINE_integer('num_demonstrations', 11,
'Number of demonstration trajectories.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
def make_networks(
spec: specs.EnvironmentSpec,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 201,
) -> d4pg.D4PGNetworks:
"""Creates networks used by the agent."""
action_spec = spec.actions
num_dimensions = np.prod(action_spec.shape, dtype=int)
critic_atoms = jnp.linspace(vmin, vmax, num_atoms)
def _actor_fn(obs):
network = hk.Sequential([
utils.batch_concat,
networks_lib.LayerNormMLP(list(policy_layer_sizes) + [num_dimensions]),
networks_lib.TanhToSpec(action_spec),
])
return network(obs)
def _critic_fn(obs, action):
network = hk.Sequential([
utils.batch_concat,
networks_lib.LayerNormMLP(layer_sizes=[*critic_layer_sizes, num_atoms]),
])
value = network([obs, action])
return value, critic_atoms
policy = hk.without_apply_rng(hk.transform(_actor_fn))
critic = hk.without_apply_rng(hk.transform(_critic_fn))
# Create dummy observations and actions to create network parameters.
dummy_action = utils.zeros_like(spec.actions)
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.add_batch_dim(dummy_action)
dummy_obs = utils.add_batch_dim(dummy_obs)
return d4pg.D4PGNetworks(
policy_network=networks_lib.FeedForwardNetwork(
lambda rng: policy.init(rng, dummy_obs), policy.apply),
critic_network=networks_lib.FeedForwardNetwork(
lambda rng: critic.init(rng, dummy_obs, dummy_action), critic.apply))
def build_experiment_config() -> experiments.ExperimentConfig:
"""Returns a configuration for PWIL experiments."""
# Create an environment, grab the spec, and use it to create networks.
env_name = FLAGS.env_name
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return helpers.make_environment(task=env_name)
dummy_seed = 1
environment = environment_factory(dummy_seed)
environment_spec = specs.make_environment_spec(environment)
# Create d4pg agent
d4pg_config = d4pg.D4PGConfig(
learning_rate=5e-5, sigma=0.2, samples_per_insert=256)
d4pg_builder = d4pg.D4PGBuilder(config=d4pg_config)
# Create demonstrations function.
dataset_name = helpers.get_dataset_name(FLAGS.env_name)
num_demonstrations = FLAGS.num_demonstrations
def make_demonstrations():
transitions_iterator = tfds.get_tfds_dataset(
dataset_name, num_demonstrations, env_spec=environment_spec)
return pwil.PWILDemonstrations(
demonstrations=transitions_iterator, episode_length=1000)
# Construct PWIL agent
pwil_config = pwil.PWILConfig(num_transitions_rb=0)
pwil_builder = pwil.PWILBuilder(
rl_agent=d4pg_builder,
config=pwil_config,
demonstrations_fn=make_demonstrations)
return experiments.ExperimentConfig(
builder=pwil_builder,
environment_factory=environment_factory,
network_factory=make_networks,
seed=FLAGS.seed,
max_num_actor_steps=FLAGS.num_steps)
def main(_):
config = build_experiment_config()
if FLAGS.run_distributed:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=FLAGS.eval_every,
num_eval_episodes=FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/imitation/run_pwil.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running IQLearn on continuous control tasks.
This handles the online imitation setting.
"""
from typing import Callable, Iterator
from absl import flags
from acme import specs
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import iq_learn
from acme.datasets import tfds
import helpers
from absl import app
from acme.jax import experiments
from acme.jax import types as jax_types
from acme.utils import lp_utils
import dm_env
import jax
import launchpad as lp
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'run_distributed',
True,
(
'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.'
),
)
flags.DEFINE_string('env_name', 'HalfCheetah-v2', 'What environment to run')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_integer('num_steps', 1_000_000, 'Number of env steps to run.')
flags.DEFINE_integer('eval_every', 50_000, 'Number of env steps to run.')
flags.DEFINE_integer(
'num_demonstrations', 11, 'Number of demonstration trajectories.'
)
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
def _make_environment_factory(env_name: str) -> jax_types.EnvironmentFactory:
"""Returns the environment factory for the given environment."""
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return helpers.make_environment(task=env_name)
return environment_factory
def _make_demonstration_dataset_factory(
dataset_name: str,
environment_spec: specs.EnvironmentSpec,
num_demonstrations: int,
random_key: jax_types.PRNGKey,
) -> Callable[[jax_types.PRNGKey], Iterator[types.Transition]]:
"""Returns the demonstration dataset factory for the given dataset."""
def demonstration_dataset_factory(
batch_size: int,
) -> Iterator[types.Transition]:
"""Returns an iterator of demonstration samples."""
transitions_iterator = tfds.get_tfds_dataset(
dataset_name, num_episodes=num_demonstrations, env_spec=environment_spec
)
return tfds.JaxInMemoryRandomSampleIterator(
transitions_iterator, key=random_key, batch_size=batch_size
)
return demonstration_dataset_factory
def build_experiment_config() -> (
experiments.ExperimentConfig[
iq_learn.IQLearnNetworks,
actor_core_lib.ActorCore,
iq_learn.IQLearnSample,
]
):
"""Returns a configuration for IQLearn experiments."""
# Create an environment, grab the spec, and use it to create networks.
env_name = FLAGS.env_name
environment_factory = _make_environment_factory(env_name)
dummy_seed = 1
environment = environment_factory(dummy_seed)
environment_spec = specs.make_environment_spec(environment)
# Create demonstrations function.
dataset_name = helpers.get_dataset_name(env_name)
make_demonstrations = _make_demonstration_dataset_factory(
dataset_name,
environment_spec,
FLAGS.num_demonstrations,
jax.random.PRNGKey(FLAGS.seed),
)
# Construct the agent
iq_learn_config = iq_learn.IQLearnConfig(alpha=1.0)
iq_learn_builder = iq_learn.IQLearnBuilder(
config=iq_learn_config, make_demonstrations=make_demonstrations
)
return experiments.ExperimentConfig(
builder=iq_learn_builder,
environment_factory=environment_factory,
network_factory=iq_learn.make_networks,
seed=FLAGS.seed,
max_num_actor_steps=FLAGS.num_steps,
)
def main(_):
config = build_experiment_config()
if FLAGS.run_distributed:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4
)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config,
eval_every=FLAGS.eval_every,
num_eval_episodes=FLAGS.evaluation_episodes,
)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/imitation/run_iqlearn.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers functions for imitation tasks."""
from typing import Tuple
from acme import wrappers
import dm_env
import gym
import numpy as np
import tensorflow as tf
DATASET_NAMES = {
'HalfCheetah-v2': 'locomotion/halfcheetah_sac_1M_single_policy_stochastic',
'Ant-v2': 'locomotion/ant_sac_1M_single_policy_stochastic',
'Walker2d-v2': 'locomotion/walker2d_sac_1M_single_policy_stochastic',
'Hopper-v2': 'locomotion/hopper_sac_1M_single_policy_stochastic',
'Humanoid-v2': 'locomotion/humanoid_sac_15M_single_policy_stochastic'
}
def get_dataset_name(env_name: str) -> str:
return DATASET_NAMES[env_name]
def get_observation_stats(transitions_iterator: tf.data.Dataset
) -> Tuple[np.float64, np.float64]:
"""Returns scale and shift of the observations in demonstrations."""
observations = [step.observation for step in transitions_iterator]
mean = np.mean(observations, axis=0, dtype='float64')
std = np.std(observations, axis=0, dtype='float64')
shift = - mean
# The std is set to 1 if the observation values are below a threshold.
# This prevents normalizing observation values that are constant (which can
# be problematic with e.g. demonstrations coming from a different version
# of the environment and where the constant values are slightly different).
scale = 1 / ((std < 1e-6) + std)
return shift, scale
def make_environment(
task: str = 'MountainCarContinuous-v0') -> dm_env.Environment:
"""Creates an OpenAI Gym environment."""
# Load the gym environment.
environment = gym.make(task)
# Make sure the environment obeys the dm_env.Environment interface.
environment = wrappers.GymWrapper(environment)
# Clip the action returned by the agent to the environment spec.
environment = wrappers.CanonicalSpecWrapper(environment, clip=True)
environment = wrappers.SinglePrecisionWrapper(environment)
return environment
|
acme-master
|
examples/baselines/imitation/helpers.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running BC on continuous control tasks.
The network structure consists in a 3-layer MLP with ReLU activation
and dropout.
"""
from typing import Callable, Iterator, Tuple
from absl import flags
from acme import specs
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import bc
from acme.datasets import tfds
import helpers
from absl import app
from acme.jax import experiments
from acme.jax import types as jax_types
from acme.jax import utils
from acme.utils import lp_utils
import dm_env
import haiku as hk
import launchpad as lp
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
# Agent flags
flags.DEFINE_string('env_name', 'HalfCheetah-v2', 'What environment to run')
flags.DEFINE_integer('num_demonstrations', 11,
'Number of demonstration trajectories.')
flags.DEFINE_integer('num_bc_steps', 100_000, 'Number of bc learning steps.')
flags.DEFINE_integer('num_steps', 0, 'Number of environment steps.')
flags.DEFINE_integer('batch_size', 64, 'Batch size.')
flags.DEFINE_float('learning_rate', 1e-4, 'Optimizer learning rate.')
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate of bc network.')
flags.DEFINE_integer('num_layers', 3, 'Num layers of bc network.')
flags.DEFINE_integer('num_units', 256, 'Num units of bc network layers.')
flags.DEFINE_integer('eval_every', 5000, 'Evaluation period.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
flags.DEFINE_integer('seed', 0, 'Random seed for learner and evaluator.')
def _make_demonstration_dataset_factory(
dataset_name: str, num_demonstrations: int,
environment_spec: specs.EnvironmentSpec, batch_size: int
) -> Callable[[jax_types.PRNGKey], Iterator[types.Transition]]:
"""Returns the demonstration dataset factory for the given dataset."""
def demonstration_dataset_factory(
random_key: jax_types.PRNGKey) -> Iterator[types.Transition]:
"""Returns an iterator of demonstration samples."""
transitions_iterator = tfds.get_tfds_dataset(
dataset_name, num_demonstrations, env_spec=environment_spec)
return tfds.JaxInMemoryRandomSampleIterator(
transitions_iterator, key=random_key, batch_size=batch_size)
return demonstration_dataset_factory
def _make_environment_factory(env_name: str) -> jax_types.EnvironmentFactory:
"""Returns the environment factory for the given environment."""
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return helpers.make_environment(task=env_name)
return environment_factory
def _make_network_factory(
shift: Tuple[np.float64], scale: Tuple[np.float64], num_layers: int,
num_units: int,
dropout_rate: float) -> Callable[[specs.EnvironmentSpec], bc.BCNetworks]:
"""Returns the factory of networks to be used by the agent.
Args:
shift: Shift of the observations in demonstrations.
scale: Scale of the observations in demonstrations.
num_layers: Number of layers of the BC network.
num_units: Number of units of the BC network.
dropout_rate: Dropout rate of the BC network.
Returns:
Network factory.
"""
def network_factory(spec: specs.EnvironmentSpec) -> bc.BCNetworks:
"""Creates the network used by the agent."""
action_spec = spec.actions
num_dimensions = np.prod(action_spec.shape, dtype=int)
def actor_fn(obs, is_training=False, key=None):
obs += shift
obs *= scale
hidden_layers = [num_units] * num_layers
mlp = hk.Sequential([
hk.nets.MLP(hidden_layers + [num_dimensions]),
])
if is_training:
return mlp(obs, dropout_rate=dropout_rate, rng=key)
else:
return mlp(obs)
policy = hk.without_apply_rng(hk.transform(actor_fn))
# Create dummy observations to create network parameters.
dummy_obs = utils.zeros_like(spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
policy_network = bc.BCPolicyNetwork(lambda key: policy.init(key, dummy_obs),
policy.apply)
return bc.BCNetworks(policy_network=policy_network)
return network_factory
def build_experiment_config() -> experiments.OfflineExperimentConfig[
bc.BCNetworks, actor_core_lib.FeedForwardPolicy, types.Transition]:
"""Returns a config for BC experiments."""
# Create an environment, grab the spec, and use it to create networks.
environment = helpers.make_environment(task=FLAGS.env_name)
environment_spec = specs.make_environment_spec(environment)
# Define the demonstrations factory.
dataset_name = helpers.get_dataset_name(FLAGS.env_name)
demonstration_dataset_factory = _make_demonstration_dataset_factory(
dataset_name, FLAGS.num_demonstrations, environment_spec,
FLAGS.batch_size)
# Load the demonstrations to compute the stats.
dataset = tfds.get_tfds_dataset(
dataset_name, FLAGS.num_demonstrations, env_spec=environment_spec)
shift, scale = helpers.get_observation_stats(dataset)
# Define the network factory.
network_factory = _make_network_factory( # pytype: disable=wrong-arg-types # numpy-scalars
shift=shift,
scale=scale,
num_layers=FLAGS.num_layers,
num_units=FLAGS.num_units,
dropout_rate=FLAGS.dropout_rate)
# Create the BC builder.
bc_config = bc.BCConfig(learning_rate=FLAGS.learning_rate)
bc_builder = bc.BCBuilder(bc_config, loss_fn=bc.mse())
environment_factory = _make_environment_factory(FLAGS.env_name)
return experiments.OfflineExperimentConfig(
builder=bc_builder,
network_factory=network_factory,
demonstration_dataset_factory=demonstration_dataset_factory,
environment_factory=environment_factory,
max_num_learner_steps=FLAGS.num_bc_steps,
seed=FLAGS.seed,
environment_spec=environment_spec,
)
def main(_):
config = build_experiment_config()
if FLAGS.run_distributed:
program = experiments.make_distributed_offline_experiment(experiment=config)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_offline_experiment(
experiment=config,
eval_every=FLAGS.eval_every,
num_eval_episodes=FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/baselines/imitation/run_bc.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running a hyper sweep of DRQV2 + DMPO on the control suite."""
import functools
from typing import Dict, Sequence
from absl import app
from absl import flags
from acme import specs
from acme.agents.tf import dmpo
from acme.datasets import image_augmentation
import helpers
from acme.tf import networks
import launchpad as lp
import numpy as np
import sonnet as snt
import tensorflow as tf
# Flags which modify the behavior of the launcher.
FLAGS = flags.FLAGS
_MAX_ACTOR_STEPS = flags.DEFINE_integer(
'max_actor_steps', None,
'Number of actor steps to run; defaults to None for an endless loop.')
_DOMAIN = flags.DEFINE_string('domain', 'cartpole',
'Control suite domain name.')
_TASK = flags.DEFINE_string('task', 'balance', 'Control suite task name.')
def make_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (50, 1024, 1024),
critic_layer_sizes: Sequence[int] = (50, 1024, 1024),
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
) -> Dict[str, snt.Module]:
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormMLP(
policy_layer_sizes,
w_init=snt.initializers.Orthogonal(),
activation=tf.nn.relu,
activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=False,
init_scale=1.0,
fixed_scale=False,
use_tfd_independent=True,
w_init=snt.initializers.Orthogonal())
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = networks.CriticMultiplexer(
observation_network=snt.Sequential([
snt.Linear(critic_layer_sizes[0],
w_init=snt.initializers.Orthogonal()),
snt.LayerNorm(
axis=slice(1, None), create_scale=True, create_offset=True),
tf.nn.tanh]),
critic_network=snt.nets.MLP(
critic_layer_sizes[1:],
w_init=snt.initializers.Orthogonal(),
activation=tf.nn.relu,
activate_final=True),
action_network=networks.ClipToSpec(action_spec))
critic_network = snt.Sequential(
[critic_network,
networks.DiscreteValuedHead(vmin, vmax, num_atoms,
w_init=snt.initializers.Orthogonal())
])
observation_network = networks.DrQTorso()
return {
'policy': policy_network,
'critic': critic_network,
'observation': observation_network,
}
def main(_):
# Configure the environment factory with requested task.
make_environment = functools.partial(
helpers.make_environment,
domain_name=_DOMAIN.value,
task_name=_TASK.value,
from_pixels=True,
frames_to_stack=3,
flatten_stack=True,
num_action_repeats=2)
# Construct the program.
program_builder = dmpo.DistributedDistributionalMPO(
make_environment,
make_networks,
target_policy_update_period=100,
max_actor_steps=_MAX_ACTOR_STEPS.value,
num_actors=4,
samples_per_insert=256,
n_step=3, # Reduce the n-step to account for action-repeat.
observation_augmentation=image_augmentation.pad_and_crop,
)
# Launch experiment.
lp.launch(programs=program_builder.build())
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/tf/control_suite/lp_dmpo_pixels_drqv2.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch DMPO agent on the control suite from pixels via Launchpad."""
import functools
from typing import Dict, Sequence
from absl import app
from absl import flags
from acme import specs
from acme import types
from acme.agents.tf import dmpo
import helpers
from acme.tf import networks
import launchpad as lp
import numpy as np
import sonnet as snt
# Flags which modify the behavior of the launcher.
FLAGS = flags.FLAGS
_MAX_ACTOR_STEPS = flags.DEFINE_integer(
'max_actor_steps', None,
'Number of actor steps to run; defaults to None for an endless loop.')
_DOMAIN = flags.DEFINE_string('domain', 'cartpole',
'Control suite domain name.')
_TASK = flags.DEFINE_string('task', 'balance', 'Control suite task name.')
def make_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
) -> Dict[str, types.TensorTransformation]:
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
tanh_mean=False,
init_scale=1.0,
fixed_scale=False,
use_tfd_independent=True)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = networks.CriticMultiplexer(
critic_network=networks.LayerNormMLP(
critic_layer_sizes, activate_final=True),
action_network=networks.ClipToSpec(action_spec))
critic_network = snt.Sequential(
[critic_network,
networks.DiscreteValuedHead(vmin, vmax, num_atoms)])
observation_network = networks.ResNetTorso()
return {
'policy': policy_network,
'critic': critic_network,
'observation': observation_network,
}
def main(_):
# Configure the environment factory with requested task.
make_environment = functools.partial(
helpers.make_environment,
domain_name=_DOMAIN.value,
task_name=_TASK.value,
from_pixels=True,
frames_to_stack=3,
num_action_repeats=2)
# Construct the program.
program_builder = dmpo.DistributedDistributionalMPO(
make_environment,
make_networks,
n_step=3, # Reduce the n-step to account for action-repeat.
max_actor_steps=_MAX_ACTOR_STEPS.value,
num_actors=4)
# Launch experiment.
lp.launch(
programs=program_builder.build()
)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/tf/control_suite/lp_dmpo_pixels.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch DDPG agent on the control suite via Launchpad."""
import functools
from typing import Dict, Sequence
from absl import app
from absl import flags
from acme import specs
from acme import types
from acme.agents.tf import ddpg
import helpers
from acme.tf import networks
from acme.tf import utils as tf2_utils
import launchpad as lp
import numpy as np
import sonnet as snt
# Flags which modify the behavior of the launcher.
FLAGS = flags.FLAGS
_MAX_ACTOR_STEPS = flags.DEFINE_integer(
'max_actor_steps', None,
'Number of actor steps to run; defaults to None for an endless loop.')
_DOMAIN = flags.DEFINE_string('domain', 'cartpole',
'Control suite domain name.')
_TASK = flags.DEFINE_string('task', 'balance', 'Control suite task name.')
def make_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
) -> Dict[str, types.TensorTransformation]:
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.NearZeroInitializedLinear(num_dimensions),
networks.TanhToSpec(action_spec)
])
critic_network = snt.Sequential([
# The multiplexer concatenates the observations/actions.
networks.CriticMultiplexer(),
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.NearZeroInitializedLinear(1),
])
return {
'policy': policy_network,
'critic': critic_network,
'observation': tf2_utils.batch_concat,
}
def main(_):
# Configure the environment factory with requested task.
make_environment = functools.partial(
helpers.make_environment,
domain_name=_DOMAIN.value,
task_name=_TASK.value)
# Construct the program.
program_builder = ddpg.DistributedDDPG(
make_environment,
make_networks,
max_actor_steps=_MAX_ACTOR_STEPS.value,
num_actors=4)
# Launch experiment.
lp.launch(programs=program_builder.build())
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/tf/control_suite/lp_ddpg.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch MPO agent on the control suite via Launchpad."""
import functools
from typing import Dict, Sequence
from absl import app
from absl import flags
from acme import specs
from acme import types
from acme.agents.tf import mpo
import helpers
from acme.tf import networks
from acme.tf import utils as tf2_utils
import launchpad as lp
import numpy as np
import sonnet as snt
# Flags which modify the behavior of the launcher.
FLAGS = flags.FLAGS
_MAX_ACTOR_STEPS = flags.DEFINE_integer(
'max_actor_steps', None,
'Number of actor steps to run; defaults to None for an endless loop.')
_DOMAIN = flags.DEFINE_string('domain', 'cartpole',
'Control suite domain name.')
_TASK = flags.DEFINE_string('task', 'balance', 'Control suite task name.')
def make_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
) -> Dict[str, types.TensorTransformation]:
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions, init_scale=0.7, use_tfd_independent=True)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
multiplexer = networks.CriticMultiplexer(
action_network=networks.ClipToSpec(action_spec))
critic_network = snt.Sequential([
multiplexer,
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.NearZeroInitializedLinear(1),
])
return {
'policy': policy_network,
'critic': critic_network,
'observation': tf2_utils.batch_concat,
}
def main(_):
# Configure the environment factory with requested task.
make_environment = functools.partial(
helpers.make_environment,
domain_name=_DOMAIN.value,
task_name=_TASK.value)
# Construct the program.
program_builder = mpo.DistributedMPO(
make_environment,
make_networks,
target_policy_update_period=25,
max_actor_steps=_MAX_ACTOR_STEPS.value,
num_actors=4)
lp.launch(programs=program_builder.build())
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/tf/control_suite/lp_mpo.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch DMPO agent on the control suite via Launchpad."""
import functools
from typing import Dict, Sequence
from absl import app
from absl import flags
from acme import specs
from acme import types
from acme.agents.tf import dmpo
import helpers
from acme.tf import networks
from acme.tf import utils as tf2_utils
import launchpad as lp
import numpy as np
import sonnet as snt
# Flags which modify the behavior of the launcher.
FLAGS = flags.FLAGS
_MAX_ACTOR_STEPS = flags.DEFINE_integer(
'max_actor_steps', None,
'Number of actor steps to run; defaults to None for an endless loop.')
_DOMAIN = flags.DEFINE_string('domain', 'cartpole',
'Control suite domain name.')
_TASK = flags.DEFINE_string('task', 'balance', 'Control suite task name.')
def make_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
) -> Dict[str, types.TensorTransformation]:
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions, init_scale=0.7, use_tfd_independent=True)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
multiplexer = networks.CriticMultiplexer(
action_network=networks.ClipToSpec(action_spec))
critic_network = snt.Sequential([
multiplexer,
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.DiscreteValuedHead(vmin, vmax, num_atoms)
])
return {
'policy': policy_network,
'critic': critic_network,
'observation': tf2_utils.batch_concat,
}
def main(_):
# Configure the environment factory with requested task.
make_environment = functools.partial(
helpers.make_environment,
domain_name=_DOMAIN.value,
task_name=_TASK.value)
# Construct the program.
program_builder = dmpo.DistributedDistributionalMPO(
make_environment,
make_networks,
target_policy_update_period=25,
max_actor_steps=_MAX_ACTOR_STEPS.value,
num_actors=4)
# Launch experiment.
lp.launch(programs=program_builder.build())
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/tf/control_suite/lp_dmpo.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control suite environment factory."""
from typing import Optional
from acme import wrappers
import dm_env
def make_environment(
evaluation: bool = False,
domain_name: str = 'cartpole',
task_name: str = 'balance',
from_pixels: bool = False,
frames_to_stack: int = 3,
flatten_stack: bool = False,
num_action_repeats: Optional[int] = None,
) -> dm_env.Environment:
"""Implements a control suite environment factory."""
# Load dm_suite lazily not require Mujoco license when not using it.
from dm_control import suite # pylint: disable=g-import-not-at-top
from acme.wrappers import mujoco as mujoco_wrappers # pylint: disable=g-import-not-at-top
# Load raw control suite environment.
environment = suite.load(domain_name, task_name)
# Maybe wrap to get pixel observations from environment state.
if from_pixels:
environment = mujoco_wrappers.MujocoPixelWrapper(environment)
environment = wrappers.FrameStackingWrapper(
environment, num_frames=frames_to_stack, flatten=flatten_stack)
environment = wrappers.CanonicalSpecWrapper(environment, clip=True)
if num_action_repeats:
environment = wrappers.ActionRepeatWrapper(
environment, num_repeats=num_action_repeats)
environment = wrappers.SinglePrecisionWrapper(environment)
if evaluation:
# The evaluator in the distributed agent will set this to True so you can
# use this clause to, e.g., set up video recording by the evaluator.
pass
return environment
|
acme-master
|
examples/tf/control_suite/helpers.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch D4PG agent on the control suite via Launchpad."""
import functools
from typing import Callable, Dict, Sequence, Union
from absl import app
from absl import flags
from acme import specs
from acme.agents.tf import d4pg
import helpers
from acme.tf import networks
from acme.tf import utils as tf2_utils
import launchpad as lp
import numpy as np
import sonnet as snt
import tensorflow as tf
# Flags which modify the behavior of the launcher.
FLAGS = flags.FLAGS
_MAX_ACTOR_STEPS = flags.DEFINE_integer(
'max_actor_steps', None,
'Number of actor steps to run; defaults to None for an endless loop.')
_DOMAIN = flags.DEFINE_string('domain', 'cartpole',
'Control suite domain name.')
_TASK = flags.DEFINE_string('task', 'balance', 'Control suite task name.')
def make_networks(
action_spec: specs.BoundedArray,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
) -> Dict[str, Union[snt.Module, Callable[[tf.Tensor], tf.Tensor]]]:
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.NearZeroInitializedLinear(num_dimensions),
networks.TanhToSpec(action_spec)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = snt.Sequential([
networks.CriticMultiplexer(),
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.DiscreteValuedHead(vmin, vmax, num_atoms),
])
return {
'policy': policy_network,
'critic': critic_network,
'observation': tf2_utils.batch_concat,
}
def main(_):
# Configure the environment factory with requested task.
make_environment = functools.partial(
helpers.make_environment,
domain_name=_DOMAIN.value,
task_name=_TASK.value)
# Construct the program.
program_builder = d4pg.DistributedD4PG(
make_environment,
make_networks,
max_actor_steps=_MAX_ACTOR_STEPS.value,
num_actors=4)
# Launch experiment.
lp.launch(programs=program_builder.build())
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/tf/control_suite/lp_d4pg.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example BC running on BSuite."""
from absl import app
from absl import flags
import acme
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import bc
from acme.examples.offline import bc_utils
from acme.jax import utils
from acme.jax import variable_utils
from acme.utils import loggers
import haiku as hk
import jax
import optax
import rlax
# Agent flags
flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate.')
flags.DEFINE_integer('batch_size', 64, 'Batch size.')
flags.DEFINE_float('evaluation_epsilon', 0.,
'Epsilon for the epsilon greedy in the evaluation agent.')
flags.DEFINE_integer('evaluate_every', 20, 'Evaluation period.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
flags.DEFINE_integer('seed', 0, 'Random seed for learner and evaluator.')
FLAGS = flags.FLAGS
def main(_):
# Create an environment and grab the spec.
environment = bc_utils.make_environment()
environment_spec = specs.make_environment_spec(environment)
# Unwrap the environment to get the demonstrations.
dataset = bc_utils.make_demonstrations(environment.environment,
FLAGS.batch_size)
dataset = dataset.as_numpy_iterator()
# Create the networks to optimize.
bc_networks = bc_utils.make_network(environment_spec)
key = jax.random.PRNGKey(FLAGS.seed)
key, key1 = jax.random.split(key, 2)
loss_fn = bc.logp()
learner = bc.BCLearner(
networks=bc_networks,
random_key=key1,
loss_fn=loss_fn,
optimizer=optax.adam(FLAGS.learning_rate),
prefetching_iterator=utils.sharded_prefetch(dataset),
num_sgd_steps_per_step=1)
def evaluator_network(
params: hk.Params, key: jax.Array, observation: jax.Array
) -> jax.Array:
dist_params = bc_networks.policy_network.apply(params, observation)
return rlax.epsilon_greedy(FLAGS.evaluation_epsilon).sample(
key, dist_params)
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(
evaluator_network)
variable_client = variable_utils.VariableClient(
learner, 'policy', device='cpu')
evaluator = actors.GenericActor(
actor_core, key, variable_client, backend='cpu')
eval_loop = acme.EnvironmentLoop(
environment=environment,
actor=evaluator,
logger=loggers.TerminalLogger('evaluation', time_delta=0.))
# Run the environment loop.
while True:
for _ in range(FLAGS.evaluate_every):
learner.step()
eval_loop.run(FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/offline/run_bc_jax.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example offline TD3 running on locomotion datasets (mujoco) from D4rl."""
from absl import app
from absl import flags
import acme
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import td3
from acme.datasets import tfds
from acme.examples.offline import helpers as gym_helpers
from acme.jax import variable_utils
from acme.types import Transition
from acme.utils import loggers
import haiku as hk
import jax
import optax
import reverb
import rlds
import tensorflow as tf
import tree
# Agent flags
flags.DEFINE_integer('batch_size', 64, 'Batch size.')
flags.DEFINE_integer('evaluate_every', 20, 'Evaluation period.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
flags.DEFINE_integer(
'num_demonstrations', 10,
'Number of demonstration episodes to load from the dataset. If None, loads the full dataset.'
)
flags.DEFINE_integer('seed', 0, 'Random seed for learner and evaluator.')
# TD3 specific flags.
flags.DEFINE_float('discount', 0.99, 'Discount.')
flags.DEFINE_float('policy_learning_rate', 3e-4, 'Policy learning rate.')
flags.DEFINE_float('critic_learning_rate', 3e-4, 'Critic learning rate.')
flags.DEFINE_float('bc_alpha', 2.5,
'Add a bc regularization term to the policy loss.'
'If set to None, TD3 is run without bc regularisation.')
flags.DEFINE_bool(
'use_sarsa_target', True,
'Compute on-policy target using iterator actions rather than sampled '
'actions.'
)
# Environment flags.
flags.DEFINE_string('env_name', 'HalfCheetah-v2',
'Gym mujoco environment name.')
flags.DEFINE_string(
'dataset_name', 'd4rl_mujoco_halfcheetah/v2-medium',
'D4rl dataset name. Can be any locomotion dataset from '
'https://www.tensorflow.org/datasets/catalog/overview#d4rl.')
FLAGS = flags.FLAGS
def _add_next_action_extras(double_transitions: Transition
) -> reverb.ReplaySample:
# As TD3 is online by default, it expects an iterator over replay samples.
info = tree.map_structure(lambda dtype: tf.ones([], dtype),
reverb.SampleInfo.tf_dtypes())
return reverb.ReplaySample(
info=info,
data=Transition(
observation=double_transitions.observation[0],
action=double_transitions.action[0],
reward=double_transitions.reward[0],
discount=double_transitions.discount[0],
next_observation=double_transitions.next_observation[0],
extras={'next_action': double_transitions.action[1]}))
def main(_):
key = jax.random.PRNGKey(FLAGS.seed)
key_demonstrations, key_learner = jax.random.split(key, 2)
# Create an environment and grab the spec.
environment = gym_helpers.make_environment(task=FLAGS.env_name)
environment_spec = specs.make_environment_spec(environment)
# Get a demonstrations dataset with next_actions extra.
transitions = tfds.get_tfds_dataset(
FLAGS.dataset_name, FLAGS.num_demonstrations)
double_transitions = rlds.transformations.batch(
transitions, size=2, shift=1, drop_remainder=True)
transitions = double_transitions.map(_add_next_action_extras)
demonstrations = tfds.JaxInMemoryRandomSampleIterator(
transitions, key=key_demonstrations, batch_size=FLAGS.batch_size)
# Create the networks to optimize.
networks = td3.make_networks(environment_spec)
# Create the learner.
learner = td3.TD3Learner(
networks=networks,
random_key=key_learner,
discount=FLAGS.discount,
iterator=demonstrations,
policy_optimizer=optax.adam(FLAGS.policy_learning_rate),
critic_optimizer=optax.adam(FLAGS.critic_learning_rate),
twin_critic_optimizer=optax.adam(FLAGS.critic_learning_rate),
use_sarsa_target=FLAGS.use_sarsa_target,
bc_alpha=FLAGS.bc_alpha,
num_sgd_steps_per_step=1)
def evaluator_network(
params: hk.Params, key: jax.Array, observation: jax.Array
) -> jax.Array:
del key
return networks.policy_network.apply(params, observation)
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(
evaluator_network)
variable_client = variable_utils.VariableClient(
learner, 'policy', device='cpu')
evaluator = actors.GenericActor(
actor_core, key, variable_client, backend='cpu')
eval_loop = acme.EnvironmentLoop(
environment=environment,
actor=evaluator,
logger=loggers.TerminalLogger('evaluation', time_delta=0.))
# Run the environment loop.
while True:
for _ in range(FLAGS.evaluate_every):
learner.step()
eval_loop.run(FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/offline/run_offline_td3_jax.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example running MBOP on D4RL dataset."""
import functools
from absl import app
from absl import flags
import acme
from acme import specs
from acme.agents.jax import mbop
from acme.datasets import tfds
from acme.examples.offline import helpers as gym_helpers
from acme.jax import running_statistics
from acme.utils import loggers
import jax
import optax
import tensorflow_datasets
# Training flags.
_NUM_NETWORKS = flags.DEFINE_integer('num_networks', 10,
'Number of ensemble networks.')
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate.')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 64, 'Batch size.')
_HIDDEN_LAYER_SIZES = flags.DEFINE_multi_integer('hidden_layer_sizes', [64, 64],
'Sizes of the hidden layers.')
_NUM_SGD_STEPS_PER_STEP = flags.DEFINE_integer(
'num_sgd_steps_per_step', 1,
'Denotes how many gradient updates perform per one learner step.')
_NUM_NORMALIZATION_BATCHES = flags.DEFINE_integer(
'num_normalization_batches', 50,
'Number of batches used for calculating the normalization statistics.')
_EVALUATE_EVERY = flags.DEFINE_integer('evaluate_every', 20,
'Evaluation period.')
_EVALUATION_EPISODES = flags.DEFINE_integer('evaluation_episodes', 10,
'Evaluation episodes.')
_SEED = flags.DEFINE_integer('seed', 0,
'Random seed for learner and evaluator.')
# Environment flags.
_ENV_NAME = flags.DEFINE_string('env_name', 'HalfCheetah-v2',
'Gym mujoco environment name.')
_DATASET_NAME = flags.DEFINE_string(
'dataset_name', 'd4rl_mujoco_halfcheetah/v2-medium',
'D4rl dataset name. Can be any locomotion dataset from '
'https://www.tensorflow.org/datasets/catalog/overview#d4rl.')
def main(_):
# Create an environment and grab the spec.
environment = gym_helpers.make_environment(task=_ENV_NAME.value)
spec = specs.make_environment_spec(environment)
key = jax.random.PRNGKey(_SEED.value)
key, dataset_key, evaluator_key = jax.random.split(key, 3)
# Load the dataset.
dataset = tensorflow_datasets.load(_DATASET_NAME.value)['train']
# Unwrap the environment to get the demonstrations.
dataset = mbop.episodes_to_timestep_batched_transitions(
dataset, return_horizon=10)
dataset = tfds.JaxInMemoryRandomSampleIterator(
dataset, key=dataset_key, batch_size=_BATCH_SIZE.value)
# Apply normalization to the dataset.
mean_std = mbop.get_normalization_stats(dataset,
_NUM_NORMALIZATION_BATCHES.value)
apply_normalization = jax.jit(
functools.partial(running_statistics.normalize, mean_std=mean_std))
dataset = (apply_normalization(sample) for sample in dataset)
# Create the networks.
networks = mbop.make_networks(
spec, hidden_layer_sizes=tuple(_HIDDEN_LAYER_SIZES.value))
# Use the default losses.
losses = mbop.MBOPLosses()
def logger_fn(label: str, steps_key: str):
return loggers.make_default_logger(label, steps_key=steps_key)
def make_learner(name, logger_fn, counter, rng_key, dataset, network, loss):
return mbop.make_ensemble_regressor_learner(
name,
_NUM_NETWORKS.value,
logger_fn,
counter,
rng_key,
dataset,
network,
loss,
optax.adam(_LEARNING_RATE.value),
_NUM_SGD_STEPS_PER_STEP.value,
)
learner = mbop.MBOPLearner(networks, losses, dataset, key, logger_fn,
functools.partial(make_learner, 'world_model'),
functools.partial(make_learner, 'policy_prior'),
functools.partial(make_learner, 'n_step_return'))
planning_config = mbop.MPPIConfig()
assert planning_config.n_trajectories % _NUM_NETWORKS.value == 0, (
'Number of trajectories must be a multiple of the number of networks.')
actor_core = mbop.make_ensemble_actor_core(
networks, planning_config, spec, mean_std, use_round_robin=False)
evaluator = mbop.make_actor(actor_core, evaluator_key, learner)
eval_loop = acme.EnvironmentLoop(
environment=environment,
actor=evaluator,
logger=loggers.TerminalLogger('evaluation', time_delta=0.))
# Train the agent.
while True:
for _ in range(_EVALUATE_EVERY.value):
learner.step()
eval_loop.run(_EVALUATION_EPISODES.value)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/offline/run_mbop_jax.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running DQfD on BSuite in a single process.
"""
from absl import app
from absl import flags
import acme
from acme import specs
from acme import wrappers
from acme.agents.tf import dqfd
from acme.agents.tf.dqfd import bsuite_demonstrations
import bsuite
import sonnet as snt
# Bsuite flags
flags.DEFINE_string('bsuite_id', 'deep_sea/0', 'Bsuite id.')
flags.DEFINE_string('results_dir', '/tmp/bsuite', 'CSV results directory.')
flags.DEFINE_boolean('overwrite', False, 'Whether to overwrite csv results.')
# Agent flags
flags.DEFINE_float('demonstration_ratio', 0.5,
('Proportion of demonstration transitions in the replay '
'buffer.'))
flags.DEFINE_integer('n_step', 5,
('Number of steps to squash into a single transition.'))
flags.DEFINE_float('samples_per_insert', 8,
('Number of samples to take from replay for every insert '
'that is made.'))
flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate.')
FLAGS = flags.FLAGS
def make_network(action_spec: specs.DiscreteArray) -> snt.Module:
return snt.Sequential([
snt.Flatten(),
snt.nets.MLP([50, 50, action_spec.num_values]),
])
def main(_):
# Create an environment and grab the spec.
raw_environment = bsuite.load_and_record_to_csv(
bsuite_id=FLAGS.bsuite_id,
results_dir=FLAGS.results_dir,
overwrite=FLAGS.overwrite,
)
environment = wrappers.SinglePrecisionWrapper(raw_environment)
environment_spec = specs.make_environment_spec(environment)
# Construct the agent.
agent = dqfd.DQfD(
environment_spec=environment_spec,
network=make_network(environment_spec.actions),
demonstration_dataset=bsuite_demonstrations.make_dataset(
raw_environment, stochastic=False),
demonstration_ratio=FLAGS.demonstration_ratio,
samples_per_insert=FLAGS.samples_per_insert,
learning_rate=FLAGS.learning_rate)
# Run the environment loop.
loop = acme.EnvironmentLoop(environment, agent)
loop.run(num_episodes=environment.bsuite_num_episodes) # pytype: disable=attribute-error
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/offline/run_dqfd.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example CQL running on locomotion datasets (mujoco) from D4rl."""
from absl import app
from absl import flags
import acme
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import cql
from acme.datasets import tfds
from acme.examples.offline import helpers as gym_helpers
from acme.jax import variable_utils
from acme.utils import loggers
import haiku as hk
import jax
import optax
# Agent flags
flags.DEFINE_integer('batch_size', 64, 'Batch size.')
flags.DEFINE_integer('evaluate_every', 20, 'Evaluation period.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
flags.DEFINE_integer(
'num_demonstrations', 10,
'Number of demonstration episodes to load from the dataset. If None, loads the full dataset.'
)
flags.DEFINE_integer('seed', 0, 'Random seed for learner and evaluator.')
# CQL specific flags.
flags.DEFINE_float('policy_learning_rate', 3e-5, 'Policy learning rate.')
flags.DEFINE_float('critic_learning_rate', 3e-4, 'Critic learning rate.')
flags.DEFINE_float('fixed_cql_coefficient', None,
'Fixed CQL coefficient. If None, an adaptive one is used.')
flags.DEFINE_float('cql_lagrange_threshold', 10.,
'Lagrange threshold for the adaptive CQL coefficient.')
# Environment flags.
flags.DEFINE_string('env_name', 'HalfCheetah-v2',
'Gym mujoco environment name.')
flags.DEFINE_string(
'dataset_name', 'd4rl_mujoco_halfcheetah/v2-medium',
'D4rl dataset name. Can be any locomotion dataset from '
'https://www.tensorflow.org/datasets/catalog/overview#d4rl.')
FLAGS = flags.FLAGS
def main(_):
key = jax.random.PRNGKey(FLAGS.seed)
key_demonstrations, key_learner = jax.random.split(key, 2)
# Create an environment and grab the spec.
environment = gym_helpers.make_environment(task=FLAGS.env_name)
environment_spec = specs.make_environment_spec(environment)
# Get a demonstrations dataset.
transitions_iterator = tfds.get_tfds_dataset(FLAGS.dataset_name,
FLAGS.num_demonstrations)
demonstrations = tfds.JaxInMemoryRandomSampleIterator(
transitions_iterator, key=key_demonstrations, batch_size=FLAGS.batch_size)
# Create the networks to optimize.
networks = cql.make_networks(environment_spec)
# Create the learner.
learner = cql.CQLLearner(
batch_size=FLAGS.batch_size,
networks=networks,
random_key=key_learner,
policy_optimizer=optax.adam(FLAGS.policy_learning_rate),
critic_optimizer=optax.adam(FLAGS.critic_learning_rate),
fixed_cql_coefficient=FLAGS.fixed_cql_coefficient,
cql_lagrange_threshold=FLAGS.cql_lagrange_threshold,
demonstrations=demonstrations,
num_sgd_steps_per_step=1)
def evaluator_network(
params: hk.Params, key: jax.Array, observation: jax.Array
) -> jax.Array:
dist_params = networks.policy_network.apply(params, observation)
return networks.sample_eval(dist_params, key)
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(
evaluator_network)
variable_client = variable_utils.VariableClient(
learner, 'policy', device='cpu')
evaluator = actors.GenericActor(
actor_core, key, variable_client, backend='cpu')
eval_loop = acme.EnvironmentLoop(
environment=environment,
actor=evaluator,
logger=loggers.TerminalLogger('evaluation', time_delta=0.))
# Run the environment loop.
while True:
for _ in range(FLAGS.evaluate_every):
learner.step()
eval_loop.run(FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/offline/run_cql_jax.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example CRR running on locomotion datasets (mujoco) from D4rl."""
from absl import app
from absl import flags
import acme
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import crr
from acme.datasets import tfds
from acme.examples.offline import helpers as gym_helpers
from acme.jax import variable_utils
from acme.types import Transition
from acme.utils import loggers
import haiku as hk
import jax
import optax
import rlds
# Agent flags
flags.DEFINE_integer('batch_size', 64, 'Batch size.')
flags.DEFINE_integer('evaluate_every', 20, 'Evaluation period.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
flags.DEFINE_integer(
'num_demonstrations', 10,
'Number of demonstration episodes to load from the dataset. If None, loads the full dataset.'
)
flags.DEFINE_integer('seed', 0, 'Random seed for learner and evaluator.')
# CQL specific flags.
flags.DEFINE_float('policy_learning_rate', 3e-5, 'Policy learning rate.')
flags.DEFINE_float('critic_learning_rate', 3e-4, 'Critic learning rate.')
flags.DEFINE_float('discount', 0.99, 'Discount.')
flags.DEFINE_integer('target_update_period', 100, 'Target update periode.')
flags.DEFINE_integer('grad_updates_per_batch', 1, 'Grad updates per batch.')
flags.DEFINE_bool(
'use_sarsa_target', True,
'Compute on-policy target using iterator actions rather than sampled '
'actions.'
)
# Environment flags.
flags.DEFINE_string('env_name', 'HalfCheetah-v2',
'Gym mujoco environment name.')
flags.DEFINE_string(
'dataset_name', 'd4rl_mujoco_halfcheetah/v2-medium',
'D4rl dataset name. Can be any locomotion dataset from '
'https://www.tensorflow.org/datasets/catalog/overview#d4rl.')
FLAGS = flags.FLAGS
def _add_next_action_extras(double_transitions: Transition) -> Transition:
return Transition(
observation=double_transitions.observation[0],
action=double_transitions.action[0],
reward=double_transitions.reward[0],
discount=double_transitions.discount[0],
next_observation=double_transitions.next_observation[0],
extras={'next_action': double_transitions.action[1]})
def main(_):
key = jax.random.PRNGKey(FLAGS.seed)
key_demonstrations, key_learner = jax.random.split(key, 2)
# Create an environment and grab the spec.
environment = gym_helpers.make_environment(task=FLAGS.env_name)
environment_spec = specs.make_environment_spec(environment)
# Get a demonstrations dataset with next_actions extra.
transitions = tfds.get_tfds_dataset(
FLAGS.dataset_name, FLAGS.num_demonstrations)
double_transitions = rlds.transformations.batch(
transitions, size=2, shift=1, drop_remainder=True)
transitions = double_transitions.map(_add_next_action_extras)
demonstrations = tfds.JaxInMemoryRandomSampleIterator(
transitions, key=key_demonstrations, batch_size=FLAGS.batch_size)
# Create the networks to optimize.
networks = crr.make_networks(environment_spec)
# CRR policy loss function.
policy_loss_coeff_fn = crr.policy_loss_coeff_advantage_exp
# Create the learner.
learner = crr.CRRLearner(
networks=networks,
random_key=key_learner,
discount=FLAGS.discount,
target_update_period=FLAGS.target_update_period,
policy_loss_coeff_fn=policy_loss_coeff_fn,
iterator=demonstrations,
policy_optimizer=optax.adam(FLAGS.policy_learning_rate),
critic_optimizer=optax.adam(FLAGS.critic_learning_rate),
grad_updates_per_batch=FLAGS.grad_updates_per_batch,
use_sarsa_target=FLAGS.use_sarsa_target)
def evaluator_network(
params: hk.Params, key: jax.Array, observation: jax.Array
) -> jax.Array:
dist_params = networks.policy_network.apply(params, observation)
return networks.sample_eval(dist_params, key)
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(
evaluator_network)
variable_client = variable_utils.VariableClient(
learner, 'policy', device='cpu')
evaluator = actors.GenericActor(
actor_core, key, variable_client, backend='cpu')
eval_loop = acme.EnvironmentLoop(
environment=environment,
actor=evaluator,
logger=loggers.TerminalLogger('evaluation', time_delta=0.))
# Run the environment loop.
while True:
for _ in range(FLAGS.evaluate_every):
learner.step()
eval_loop.run(FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/offline/run_crr_jax.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example BC running on BSuite."""
import functools
import operator
from absl import app
from absl import flags
import acme
from acme import specs
from acme import types
from acme.agents.tf import actors
from acme.agents.tf.bc import learning
from acme.agents.tf.dqfd import bsuite_demonstrations
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
from acme.wrappers import single_precision
import bsuite
import reverb
import sonnet as snt
import tensorflow as tf
import tree
import trfl
# Bsuite flags
flags.DEFINE_string('bsuite_id', 'deep_sea/0', 'Bsuite id.')
flags.DEFINE_string('results_dir', '/tmp/bsuite', 'CSV results directory.')
flags.DEFINE_boolean('overwrite', False, 'Whether to overwrite csv results.')
# Agent flags
flags.DEFINE_float('learning_rate', 2e-4, 'Learning rate.')
flags.DEFINE_integer('batch_size', 16, 'Batch size.')
flags.DEFINE_float('epsilon', 0., 'Epsilon for the epsilon greedy in the env.')
flags.DEFINE_integer('evaluate_every', 100, 'Evaluation period.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
FLAGS = flags.FLAGS
def make_policy_network(action_spec: specs.DiscreteArray) -> snt.Module:
return snt.Sequential([
snt.Flatten(),
snt.nets.MLP([64, 64, action_spec.num_values]),
])
# TODO(b/152733199): Move this function to acme utils.
def _n_step_transition_from_episode(observations: types.NestedTensor,
actions: tf.Tensor, rewards: tf.Tensor,
discounts: tf.Tensor, n_step: int,
additional_discount: float):
"""Produce Reverb-like N-step transition from a full episode.
Observations, actions, rewards and discounts have the same length. This
function will ignore the first reward and discount and the last action.
Args:
observations: [L, ...] Tensor.
actions: [L, ...] Tensor.
rewards: [L] Tensor.
discounts: [L] Tensor.
n_step: number of steps to squash into a single transition.
additional_discount: discount to use for TD updates.
Returns:
(o_t, a_t, r_t, d_t, o_tp1) tuple.
"""
max_index = tf.shape(rewards)[0] - 1
first = tf.random.uniform(
shape=(), minval=0, maxval=max_index - 1, dtype=tf.int32)
last = tf.minimum(first + n_step, max_index)
o_t = tree.map_structure(operator.itemgetter(first), observations)
a_t = tree.map_structure(operator.itemgetter(first), actions)
o_tp1 = tree.map_structure(operator.itemgetter(last), observations)
# 0, 1, ..., n-1.
discount_range = tf.cast(tf.range(last - first), tf.float32)
# 1, g, ..., g^{n-1}.
additional_discounts = tf.pow(additional_discount, discount_range)
# 1, d_t, d_t * d_{t+1}, ..., d_t * ... * d_{t+n-2}.
discounts = tf.concat([[1.], tf.math.cumprod(discounts[first:last - 1])], 0)
# 1, g * d_t, ..., g^{n-1} * d_t * ... * d_{t+n-2}.
discounts *= additional_discounts
# r_t + g * d_t * r_{t+1} + ... + g^{n-1} * d_t * ... * d_{t+n-2} * r_{t+n-1}
# We have to shift rewards by one so last=max_index corresponds to transitions
# that include the last reward.
r_t = tf.reduce_sum(rewards[first + 1:last + 1] * discounts)
# g^{n-1} * d_{t} * ... * d_{t+n-1}.
d_t = discounts[-1]
# Reverb requires every sample to be given a key and priority.
# In the supervised learning case for BC, neither of those will be used.
# We set the key to `0` and the priorities probabilities to `1`, but that
# should not matter much.
key = tf.constant(0, tf.uint64)
probability = tf.constant(1.0, tf.float64)
table_size = tf.constant(1, tf.int64)
priority = tf.constant(1.0, tf.float64)
times_sampled = tf.constant(1, tf.int32)
info = reverb.SampleInfo(
key=key,
probability=probability,
table_size=table_size,
priority=priority,
times_sampled=times_sampled,
)
return reverb.ReplaySample(info=info, data=(o_t, a_t, r_t, d_t, o_tp1))
def main(_):
# Create an environment and grab the spec.
raw_environment = bsuite.load_and_record_to_csv(
bsuite_id=FLAGS.bsuite_id,
results_dir=FLAGS.results_dir,
overwrite=FLAGS.overwrite,
)
environment = single_precision.SinglePrecisionWrapper(raw_environment)
environment_spec = specs.make_environment_spec(environment)
# Build demonstration dataset.
if hasattr(raw_environment, 'raw_env'):
raw_environment = raw_environment.raw_env
batch_dataset = bsuite_demonstrations.make_dataset(raw_environment,
stochastic=False)
# Combine with demonstration dataset.
transition = functools.partial(
_n_step_transition_from_episode, n_step=1, additional_discount=1.)
dataset = batch_dataset.map(transition)
# Batch and prefetch.
dataset = dataset.batch(FLAGS.batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
# Create the networks to optimize.
policy_network = make_policy_network(environment_spec.actions)
# If the agent is non-autoregressive use epsilon=0 which will be a greedy
# policy.
evaluator_network = snt.Sequential([
policy_network,
lambda q: trfl.epsilon_greedy(q, epsilon=FLAGS.epsilon).sample(),
])
# Ensure that we create the variables before proceeding (maybe not needed).
tf2_utils.create_variables(policy_network, [environment_spec.observations])
counter = counting.Counter()
learner_counter = counting.Counter(counter, prefix='learner')
# Create the actor which defines how we take actions.
evaluation_network = actors.FeedForwardActor(evaluator_network)
eval_loop = acme.EnvironmentLoop(
environment=environment,
actor=evaluation_network,
counter=counter,
logger=loggers.TerminalLogger('evaluation', time_delta=1.))
# The learner updates the parameters (and initializes them).
learner = learning.BCLearner(
network=policy_network,
learning_rate=FLAGS.learning_rate,
dataset=dataset,
counter=learner_counter)
# Run the environment loop.
while True:
for _ in range(FLAGS.evaluate_every):
learner.step()
learner_counter.increment(learner_steps=FLAGS.evaluate_every)
eval_loop.run(FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/offline/run_bc.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Run BCQ offline agent on Atari RL Unplugged datasets.
Instructions:
1 - Download dataset:
> gsutil cp gs://rl_unplugged/atari/Pong/run_1-00000-of-00100 \
/tmp/dataset/Pong/run_1-00000-of-00001
2 - Install RL Unplugged dependencies:
https://github.com/deepmind/deepmind-research/tree/master/rl_unplugged#running-the-code
3 - Download RL Unplugged library:
> git clone https://github.com/deepmind/deepmind-research.git deepmind_research
4 - Run script:
> python -m run_atari_bcq --dataset_path=/tmp/dataset --game=Pong --run=1 \
--num_shards=1
"""
from absl import app
from absl import flags
import acme
from acme import specs
from acme.agents.tf import actors
from acme.agents.tf import bcq
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import sonnet as snt
import tensorflow as tf
import trfl
from deepmind_research.rl_unplugged import atari # type: ignore
# Atari dataset flags
flags.DEFINE_string('dataset_path', None, 'Dataset path.')
flags.DEFINE_string('game', 'Pong', 'Dataset path.')
flags.DEFINE_integer('run', 1, 'Dataset path.')
flags.DEFINE_integer('num_shards', 100, 'Number of dataset shards.')
flags.DEFINE_integer('batch_size', 16, 'Batch size.')
# Agent flags
flags.DEFINE_float('bcq_threshold', 0.5, 'BCQ threshold.')
flags.DEFINE_float('learning_rate', 1e-4, 'Learning rate.')
flags.DEFINE_float('discount', 0.99, 'Discount.')
flags.DEFINE_float('importance_sampling_exponent', 0.2,
'Importance sampling exponent.')
flags.DEFINE_integer('target_update_period', 2500,
('Number of learner steps to perform before updating'
'the target networks.'))
# Evaluation flags.
flags.DEFINE_float('epsilon', 0., 'Epsilon for the epsilon greedy in the env.')
flags.DEFINE_integer('evaluate_every', 100, 'Evaluation period.')
flags.DEFINE_integer('evaluation_episodes', 10, 'Evaluation episodes.')
FLAGS = flags.FLAGS
def make_network(action_spec: specs.DiscreteArray) -> snt.Module:
return snt.Sequential([
lambda x: tf.image.convert_image_dtype(x, tf.float32),
networks.DQNAtariNetwork(action_spec.num_values)
])
def main(_):
# Create an environment and grab the spec.
environment = atari.environment(FLAGS.game)
environment_spec = specs.make_environment_spec(environment)
# Create dataset.
dataset = atari.dataset(path=FLAGS.dataset_path,
game=FLAGS.game,
run=FLAGS.run,
num_shards=FLAGS.num_shards)
# Discard extra inputs
dataset = dataset.map(lambda x: x._replace(data=x.data[:5]))
# Batch and prefetch.
dataset = dataset.batch(FLAGS.batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
# Build network.
g_network = make_network(environment_spec.actions)
q_network = make_network(environment_spec.actions)
network = networks.DiscreteFilteredQNetwork(g_network=g_network,
q_network=q_network,
threshold=FLAGS.bcq_threshold)
tf2_utils.create_variables(network, [environment_spec.observations])
evaluator_network = snt.Sequential([
q_network,
lambda q: trfl.epsilon_greedy(q, epsilon=FLAGS.epsilon).sample(),
])
# Counters.
counter = counting.Counter()
learner_counter = counting.Counter(counter, prefix='learner')
# Create the actor which defines how we take actions.
evaluation_network = actors.FeedForwardActor(evaluator_network)
eval_loop = acme.EnvironmentLoop(
environment=environment,
actor=evaluation_network,
counter=counter,
logger=loggers.TerminalLogger('evaluation', time_delta=1.))
# The learner updates the parameters (and initializes them).
learner = bcq.DiscreteBCQLearner(
network=network,
dataset=dataset,
learning_rate=FLAGS.learning_rate,
discount=FLAGS.discount,
importance_sampling_exponent=FLAGS.importance_sampling_exponent,
target_update_period=FLAGS.target_update_period,
counter=counter)
# Run the environment loop.
while True:
for _ in range(FLAGS.evaluate_every):
learner.step()
learner_counter.increment(learner_steps=FLAGS.evaluate_every)
eval_loop.run(FLAGS.evaluation_episodes)
if __name__ == '__main__':
app.run(main)
|
acme-master
|
examples/offline/run_bcq.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for running behavioral cloning.
"""
import functools
import operator
from typing import Callable
from acme import core
from acme import environment_loop
from acme import specs
from acme import types
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax import bc
from acme.agents.tf.dqfd import bsuite_demonstrations
from acme.jax import networks as networks_lib
from acme.jax import types as jax_types
from acme.jax import utils
from acme.jax import variable_utils
from acme.jax.deprecated import offline_distributed_layout
from acme.utils import counting
from acme.utils import loggers
from acme.wrappers import single_precision
import bsuite
import dm_env
import haiku as hk
import jax
import jax.numpy as jnp
from jax.scipy import special
import rlax
import tensorflow as tf
import tree
def make_network(spec: specs.EnvironmentSpec) -> bc.BCNetworks:
"""Creates networks used by the agent."""
num_actions = spec.actions.num_values
def actor_fn(obs, is_training=True, key=None):
# is_training and key allows to utilize train/test dependant modules
# like dropout.
del is_training
del key
mlp = hk.Sequential(
[hk.Flatten(),
hk.nets.MLP([64, 64, num_actions])])
return mlp(obs)
policy = hk.without_apply_rng(hk.transform(actor_fn))
# Create dummy observations to create network parameters.
dummy_obs = utils.zeros_like(spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
policy_network = bc.BCPolicyNetwork(lambda key: policy.init(key, dummy_obs),
policy.apply)
def sample_fn(logits: networks_lib.NetworkOutput,
key: jax_types.PRNGKey) -> networks_lib.Action:
return rlax.epsilon_greedy(epsilon=0.0).sample(key, logits)
def log_prob(logits: networks_lib.Params,
actions: networks_lib.Action) -> networks_lib.LogProb:
logits_actions = jnp.sum(
jax.nn.one_hot(actions, logits.shape[-1]) * logits, axis=-1)
logits_actions = logits_actions - special.logsumexp(logits, axis=-1)
return logits_actions
return bc.BCNetworks(policy_network, sample_fn, log_prob)
def _n_step_transition_from_episode(
observations: types.NestedTensor,
actions: tf.Tensor,
rewards: tf.Tensor,
discounts: tf.Tensor, n_step: int,
additional_discount: float) -> types.Transition:
"""Produce Reverb-like N-step transition from a full episode.
Observations, actions, rewards and discounts have the same length. This
function will ignore the first reward and discount and the last action.
Args:
observations: [episode_length, ...] Tensor.
actions: [episode_length, ...] Tensor.
rewards: [episode_length] Tensor.
discounts: [episode_length] Tensor.
n_step: number of steps to squash into a single transition.
additional_discount: discount to use for TD updates.
Returns:
A types.Transition.
"""
max_index = tf.shape(rewards)[0] - 1
first = tf.random.uniform(
shape=(), minval=0, maxval=max_index - 1, dtype=tf.int32)
last = tf.minimum(first + n_step, max_index)
o_t = tree.map_structure(operator.itemgetter(first), observations)
a_t = tree.map_structure(operator.itemgetter(first), actions)
o_tp1 = tree.map_structure(operator.itemgetter(last), observations)
# 0, 1, ..., n-1.
discount_range = tf.cast(tf.range(last - first), tf.float32)
# 1, g, ..., g^{n-1}.
additional_discounts = tf.pow(additional_discount, discount_range)
# 1, d_t, d_t * d_{t+1}, ..., d_t * ... * d_{t+n-2}.
discounts = tf.concat([[1.], tf.math.cumprod(discounts[first:last - 1])], 0)
# 1, g * d_t, ..., g^{n-1} * d_t * ... * d_{t+n-2}.
discounts *= additional_discounts
# r_t + g * d_t * r_{t+1} + ... + g^{n-1} * d_t * ... * d_{t+n-2} * r_{t+n-1}
# We have to shift rewards by one so last=max_index corresponds to transitions
# that include the last reward.
r_t = tf.reduce_sum(rewards[first + 1:last + 1] * discounts)
# g^{n-1} * d_{t} * ... * d_{t+n-1}.
d_t = discounts[-1]
return types.Transition(o_t, a_t, r_t, d_t, o_tp1)
def make_environment(training: bool = True):
del training
env = bsuite.load(experiment_name='deep_sea', kwargs={'size': 10})
return single_precision.SinglePrecisionWrapper(env)
def make_demonstrations(env: dm_env.Environment,
batch_size: int) -> tf.data.Dataset:
"""Prepare the dataset of demonstrations."""
batch_dataset = bsuite_demonstrations.make_dataset(env, stochastic=False)
# Combine with demonstration dataset.
transition = functools.partial(
_n_step_transition_from_episode, n_step=1, additional_discount=1.)
dataset = batch_dataset.map(transition)
# Batch and prefetch.
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def make_actor_evaluator(
environment_factory: Callable[[bool], dm_env.Environment],
evaluator_network: actor_core_lib.FeedForwardPolicy,
) -> offline_distributed_layout.EvaluatorFactory:
"""Makes an evaluator that runs the agent on the environment.
Args:
environment_factory: Function that creates a dm_env.
evaluator_network: Network to be use by the actor.
Returns:
actor_evaluator: Function that returns a Worker that will be executed
by launchpad.
"""
def actor_evaluator(
random_key: networks_lib.PRNGKey,
variable_source: core.VariableSource,
counter: counting.Counter,
):
"""The evaluation process."""
# Create the actor loading the weights from variable source.
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(
evaluator_network)
# Inference happens on CPU, so it's better to move variables there too.
variable_client = variable_utils.VariableClient(variable_source, 'policy',
device='cpu')
actor = actors.GenericActor(
actor_core, random_key, variable_client, backend='cpu')
# Logger.
logger = loggers.make_default_logger(
'evaluator', steps_key='evaluator_steps')
# Create environment and evaluator networks
environment = environment_factory(False)
# Create logger and counter.
counter = counting.Counter(counter, 'evaluator')
# Create the run loop and return it.
return environment_loop.EnvironmentLoop(
environment,
actor,
counter,
logger,
)
return actor_evaluator
|
acme-master
|
examples/offline/bc_utils.py
|
from skbuild import setup
setup(
name='hanabi_learning_environment',
version='0.0.1',
description='Learning environment for the game of hanabi.',
author='deepmind/hanabi-learning-environment',
packages=['hanabi_learning_environment', 'hanabi_learning_environment.agents'],
install_requires=['cffi']
)
|
hanabi-learning-environment-master
|
setup.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python interface to Hanabi code."""
import os
import re
import cffi
import enum
import sys
DEFAULT_CDEF_PREFIXES = (None, ".", os.path.dirname(__file__), "/include")
DEFAULT_LIB_PREFIXES = (None, ".", os.path.dirname(__file__), "/lib")
PYHANABI_HEADER = "pyhanabi.h"
PYHANABI_LIB = ["libpyhanabi.so", "libpyhanabi.dylib"]
COLOR_CHAR = ["R", "Y", "G", "W", "B"] # consistent with hanabi_lib/util.cc
CHANCE_PLAYER_ID = -1
ffi = cffi.FFI()
lib = None
cdef_loaded_flag = False
lib_loaded_flag = False
if sys.version_info < (3,):
def encode_ffi_string(x):
return ffi.string(x)
else:
def encode_ffi_string(x):
return str(ffi.string(x), 'ascii')
def try_cdef(header=PYHANABI_HEADER, prefixes=DEFAULT_CDEF_PREFIXES):
"""Try parsing library header file. Must be called before any pyhanabi calls.
Args:
header: filename of pyhanabi header file.
prefixes: list of paths to search for pyhanabi header file.
Returns:
True if header was successfully parsed, False on failure.
"""
global cdef_loaded_flag
if cdef_loaded_flag: return True
for prefix in prefixes:
try:
cdef_file = header if prefix is None else prefix + "/" + header
lines = open(cdef_file).readlines()
reading_cdef = False
cdef_string = ""
for line in lines:
line = line.rstrip()
if re.match("extern *\"C\" *{", line):
reading_cdef = True
continue
elif re.match("} */[*] *extern *\"C\" *[*]/", line):
reading_cdef = False
continue
if reading_cdef:
cdef_string = cdef_string + line + "\n"
ffi.cdef(cdef_string)
cdef_loaded_flag = True
return True
except IOError:
pass
return False
def try_load(library=None, prefixes=DEFAULT_LIB_PREFIXES):
"""Try loading library. Must be called before any pyhanabi calls.
Args:
library: filename of pyhanabi library file.
prefixes: list of paths to search for pyhanabi library file.
Returns:
True if library was successfully loaded, False on failure.
"""
global lib_loaded_flag
global lib
if lib_loaded_flag: return True
if library is None:
libnames = PYHANABI_LIB
elif type(library) in (list, tuple):
libnames = library
else:
libnames = (library,)
for prefix in prefixes:
for libname in libnames:
try:
lib_file = libname if prefix is None else prefix + "/" + libname
lib = ffi.dlopen(lib_file)
lib_loaded_flag = True
return True
except OSError:
pass
return False
def cdef_loaded():
"""Return True if pyhanabi header has been successfully parsed."""
return cdef_loaded_flag
def lib_loaded():
"""Return True if pyhanabi library has been successfully loaded."""
return lib_loaded_flag
def color_idx_to_char(color_idx):
"""Helper function for converting color index to a character.
Args:
color_idx: int, index into color char vector.
Returns:
color_char: str, A single character representing a color.
Raises:
AssertionError: If index is not in range.
"""
assert isinstance(color_idx, int)
if color_idx == -1:
return None
else:
return COLOR_CHAR[color_idx]
def color_char_to_idx(color_char):
r"""Helper function for converting color character to index.
Args:
color_char: str, Character representing a color.
Returns:
color_idx: int, Index into a color array \in [0, num_colors -1]
Raises:
ValueError: If color_char is not a valid color.
"""
assert isinstance(color_char, str)
try:
return next(idx for (idx, c) in enumerate(COLOR_CHAR) if c == color_char)
except StopIteration:
raise ValueError("Invalid color: {}. Should be one of {}.".format(
color_char, COLOR_CHAR))
class HanabiCard(object):
"""Hanabi card, with a color and a rank.
Python implementation of C++ HanabiCard class.
"""
def __init__(self, color, rank):
"""A simple HanabiCard object.
Args:
color: an integer, starting at 0. Colors are in this order RYGWB.
rank: an integer, starting at 0 (representing a 1 card). In the standard
game, the largest value is 4 (representing a 5 card).
"""
self._color = color
self._rank = rank
def color(self):
return self._color
def rank(self):
return self._rank
def __str__(self):
if self.valid():
return COLOR_CHAR[self._color] + str(self._rank + 1)
else:
return "XX"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self._color == other.color() and self._rank == other.rank()
def valid(self):
return self._color >= 0 and self._rank >= 0
def to_dict(self):
"""Serialize to dict.
Returns:
d: dict, containing color and rank of card.
"""
return {"color": color_idx_to_char(self.color()), "rank": self.rank()}
class HanabiCardKnowledge(object):
"""Accumulated knowledge about color and rank of an initially unknown card.
Stores two types of knowledge: direct hints about a card, and indirect
knowledge from hints about other cards.
For example, say we had two cards that we know nothing about, but our
partners know are a R1 and B2. Before any hints, both color() and rank()
return None, and color_plausible(c) and rank_plausible(r) returns True for
all colors c and ranks r, for both cards.
Say our partner reveals that our first card is a 1 -- rank index 0. Now for
the first card we have rank()=0, rank_plausible(0)=True, and
rank_plausible(r)=False for r != 0. The same hint also tells us the second
card is NOT a 1 (rank index 0). For the second card, we have rank()=None,
rank_plausible(0)=False, and rank_plausible(r)=True for r!= 0.
Note that color() and rank() only consider directly revealed information.
Both methods will always return None unless the color or rank, respectively,
are directly revealed. That is, even we have seen hints for all ranks except
rank index 0, so that rank_plausible(0)=True and rank_plausible(r)=False
for all r != 0, rank() will still be None.
Python wrapper of C++ HanabiHand::CardKnowledge class.
"""
def __init__(self, knowledge):
self._knowledge = knowledge
def color(self):
"""Returns color index if exact color was revealed, or None otherwise.
Does not perform inference to deduce the color from other color hints.
"""
if lib.ColorWasHinted(self._knowledge):
return lib.KnownColor(self._knowledge)
else:
return None
def color_plausible(self, color_index):
"""Returns true if we have no hint saying card is not the given color.
Args:
color_index: 0-based color index.
"""
return lib.ColorIsPlausible(self._knowledge, color_index)
def rank(self):
"""Returns rank index if exact rank was revealed, or None otherwise.
Does not perform inference to deduce the rank from other rank hints.
"""
if lib.RankWasHinted(self._knowledge):
return lib.KnownRank(self._knowledge)
else:
return None
def rank_plausible(self, rank_index):
"""Returns true if we have no hint saying card is not the given rank.
Args:
rank_index: 0-based rank index.
"""
return lib.RankIsPlausible(self._knowledge, rank_index)
def __str__(self):
c_string = lib.CardKnowledgeToString(self._knowledge)
string = encode_ffi_string(c_string)
lib.DeleteString(c_string)
return string
def __repr__(self):
return self.__str__()
def to_dict(self):
"""Serialize to dict.
Returns:
d: dict, containing color and rank of hint.
"""
return {"color": color_idx_to_char(self.color()), "rank": self.rank()}
class HanabiMoveType(enum.IntEnum):
"""Move types, consistent with hanabi_lib/hanabi_move.h."""
INVALID = 0
PLAY = 1
DISCARD = 2
REVEAL_COLOR = 3
REVEAL_RANK = 4
DEAL = 5
class HanabiMove(object):
"""Description of an agent move or chance event.
Python wrapper of C++ HanabiMove class.
"""
def __init__(self, move):
assert move is not None
self._move = move
@property
def c_move(self):
return self._move
def type(self):
return HanabiMoveType(lib.MoveType(self._move))
def card_index(self):
"""Returns 0-based card index for PLAY and DISCARD moves."""
return lib.CardIndex(self._move)
def target_offset(self):
"""Returns target player offset for REVEAL_XYZ moves."""
return lib.TargetOffset(self._move)
def color(self):
"""Returns 0-based color index for REVEAL_COLOR and DEAL moves."""
return lib.MoveColor(self._move)
def rank(self):
"""Returns 0-based rank index for REVEAL_RANK and DEAL moves."""
return lib.MoveRank(self._move)
@staticmethod
def get_discard_move(card_index):
c_move = ffi.new("pyhanabi_move_t*")
assert lib.GetDiscardMove(card_index, c_move)
return HanabiMove(c_move)
@staticmethod
def get_play_move(card_index):
c_move = ffi.new("pyhanabi_move_t*")
assert lib.GetPlayMove(card_index, c_move)
return HanabiMove(c_move)
@staticmethod
def get_reveal_color_move(target_offset, color):
"""current player is 0, next player clockwise is target_offset 1, etc."""
c_move = ffi.new("pyhanabi_move_t*")
assert lib.GetRevealColorMove(target_offset, color, c_move)
return HanabiMove(c_move)
@staticmethod
def get_reveal_rank_move(target_offset, rank):
"""current player is 0, next player clockwise is target_offset 1, etc."""
c_move = ffi.new("pyhanabi_move_t*")
assert lib.GetRevealRankMove(target_offset, rank, c_move)
return HanabiMove(c_move)
def __str__(self):
c_string = lib.MoveToString(self._move)
string = encode_ffi_string(c_string)
lib.DeleteString(c_string)
return string
def __repr__(self):
return self.__str__()
def __del__(self):
if self._move is not None:
lib.DeleteMove(self._move)
self._move = None
del self
def to_dict(self):
"""Serialize to dict.
Returns:
d: dict, Containing type and information of a hanabi move.
Raises:
ValueError: If move type is not supported.
"""
move_dict = {}
move_type = self.type()
move_dict["action_type"] = move_type.name
if move_type == HanabiMoveType.PLAY or move_type == HanabiMoveType.DISCARD:
move_dict["card_index"] = self.card_index()
elif move_type == HanabiMoveType.REVEAL_COLOR:
move_dict["target_offset"] = self.target_offset()
move_dict["color"] = color_idx_to_char(self.color())
elif move_type == HanabiMoveType.REVEAL_RANK:
move_dict["target_offset"] = self.target_offset()
move_dict["rank"] = self.rank()
elif move_type == HanabiMoveType.DEAL:
move_dict["color"] = color_idx_to_char(self.color())
move_dict["rank"] = self.rank()
else:
raise ValueError("Unsupported move: {}".format(self))
return move_dict
class HanabiHistoryItem(object):
"""A move that has been made within a game, along with the side-effects.
For example, a play move simply selects a card index between 0-5, but after
making the move, there is an associated color and rank for the selected card,
a possibility that the card was successfully added to the fireworks, and an
information token added if the firework stack was completed.
Python wrapper of C++ HanabiHistoryItem class.
"""
def __init__(self, item):
self._item = item
def move(self):
c_move = ffi.new("pyhanabi_move_t*")
lib.HistoryItemMove(self._item, c_move)
return HanabiMove(c_move)
def player(self):
return lib.HistoryItemPlayer(self._item)
def scored(self):
"""Play move succeeded in placing card on fireworks."""
return bool(lib.HistoryItemScored(self._item))
def information_token(self):
"""Play/Discard move increased the number of information tokens."""
return bool(lib.HistoryItemInformationToken(self._item))
def color(self):
"""Color index of card that was Played/Discarded."""
return lib.HistoryItemColor(self._item)
def rank(self):
"""Rank index of card that was Played/Discarded."""
return lib.HistoryItemRank(self._item)
def card_info_revealed(self):
"""Returns information about whether color/rank was revealed.
Indices where card i color/rank matches the reveal move. E.g.,
for Reveal player 1 color red when player 1 has R1 W1 R2 R4 __ the
result would be [0, 2, 3].
"""
revealed = []
bitmask = lib.HistoryItemRevealBitmask(self._item)
for i in range(8): # 8 bits in reveal_bitmask
if bitmask & (1 << i):
revealed.append(i)
return revealed
def card_info_newly_revealed(self):
"""Returns information about whether color/rank was newly revealed.
Indices where card i color/rank was not previously known. E.g.,
for Reveal player 1 color red when player 1 has R1 W1 R2 R4 __ the
result might be [2, 3]. Cards 2 and 3 were revealed to be red,
but card 0 was previously known to be red, so nothing new was
revealed. Card 4 is missing, so nothing was revealed about it.
"""
revealed = []
bitmask = lib.HistoryItemNewlyRevealedBitmask(self._item)
for i in range(8): # 8 bits in reveal_bitmask
if bitmask & (1 << i):
revealed.append(i)
return revealed
def deal_to_player(self):
"""player that card was dealt to for Deal moves."""
return lib.HistoryItemDealToPlayer(self._item)
def __str__(self):
c_string = lib.HistoryItemToString(self._item)
string = encode_ffi_string(c_string)
lib.DeleteString(c_string)
return string
def __repr__(self):
return self.__str__()
def __del__(self):
if self._item is not None:
lib.DeleteHistoryItem(self._item)
self._item = None
del self
class HanabiEndOfGameType(enum.IntEnum):
"""Possible end-of-game conditions, consistent with hanabi_state.h."""
NOT_FINISHED = 0
OUT_OF_LIFE_TOKENS = 1
OUT_OF_CARDS = 2
COMPLETED_FIREWORKS = 3
class HanabiState(object):
"""Current environment state for an active Hanabi game.
The game is turn-based, with only one active agent at a time. Chance events
are explicitly included, so the active agent may be "nature" (represented
by cur_player() returning CHANCE_PLAYER_ID).
Python wrapper of C++ HanabiState class.
"""
def __init__(self, game, c_state=None):
"""Returns a new state.
Args:
game: HanabiGame describing the parameters for a game of Hanabi.
c_state: C++ state to copy, or None for a new state.
NOTE: If c_state is supplied, game is ignored and c_state game is used.
"""
self._state = ffi.new("pyhanabi_state_t*")
if c_state is None:
self._game = game.c_game
lib.NewState(self._game, self._state)
else:
self._game = ffi.new("pyhanabi_game_t*")
lib.StateParentGame(c_state, self._game)
lib.CopyState(c_state, self._state)
def copy(self):
"""Returns a copy of the state."""
return HanabiState(None, self._state)
def observation(self, player):
"""Returns player's observed view of current environment state."""
return HanabiObservation(self._state, self._game, player)
def apply_move(self, move):
"""Advance the environment state by making move for acting player."""
lib.StateApplyMove(self._state, move.c_move)
def cur_player(self):
"""Returns index of next player to act.
Index will be CHANCE_PLAYER_ID if a chance event needs to be resolved.
"""
return lib.StateCurPlayer(self._state)
def deck_size(self):
"""Returns number of cards left in the deck."""
return lib.StateDeckSize(self._state)
def discard_pile(self):
"""Returns a list of all discarded cards, in order they were discarded."""
discards = []
c_card = ffi.new("pyhanabi_card_t*")
for index in range(lib.StateDiscardPileSize(self._state)):
lib.StateGetDiscard(self._state, index, c_card)
discards.append(HanabiCard(c_card.color, c_card.rank))
return discards
def fireworks(self):
"""Returns a list of fireworks levels by value, ordered by color (RYGWB).
Important note on representation / format: when no fireworks have been
played, this function returns [0, 0, 0, 0, 0]. When only the red 1 has been
played, this function returns [1, 0, 0, 0, 0].
"""
firework_list = []
num_colors = lib.NumColors(self._game)
for c in range(num_colors):
firework_list.append(lib.StateFireworks(self._state, c))
return firework_list
def deal_random_card(self):
"""If cur_player == CHANCE_PLAYER_ID, make a random card-deal move."""
lib.StateDealRandomCard(self._state)
def player_hands(self):
"""Returns a list of all hands, with cards ordered oldest to newest."""
hand_list = []
c_card = ffi.new("pyhanabi_card_t*")
for pid in range(self.num_players()):
player_hand = []
hand_size = lib.StateGetHandSize(self._state, pid)
for i in range(hand_size):
lib.StateGetHandCard(self._state, pid, i, c_card)
player_hand.append(HanabiCard(c_card.color, c_card.rank))
hand_list.append(player_hand)
return hand_list
def information_tokens(self):
"""Returns the number of information tokens remaining."""
return lib.StateInformationTokens(self._state)
def end_of_game_status(self):
"""Returns the end of game status, NOT_FINISHED if game is still active."""
return HanabiEndOfGameType(lib.StateEndOfGameStatus(self._state))
def is_terminal(self):
"""Returns false if game is still active, true otherwise."""
return (lib.StateEndOfGameStatus(self._state) !=
HanabiEndOfGameType.NOT_FINISHED)
def legal_moves(self):
"""Returns list of legal moves for currently acting player."""
moves = []
c_movelist = lib.StateLegalMoves(self._state)
num_moves = lib.NumMoves(c_movelist)
for i in range(num_moves):
c_move = ffi.new("pyhanabi_move_t*")
lib.GetMove(c_movelist, i, c_move)
moves.append(HanabiMove(c_move))
lib.DeleteMoveList(c_movelist)
return moves
def move_is_legal(self, move):
"""Returns true if and only if move is legal for active agent."""
return lib.MoveIsLegal(self._state, move.c_move)
def card_playable_on_fireworks(self, color, rank):
"""Returns true if and only if card can be successfully played.
Args:
color: 0-based color index of card
rank: 0-based rank index of card
"""
return lib.CardPlayableOnFireworks(self._state, color, rank)
def life_tokens(self):
"""Returns the number of information tokens remaining."""
return lib.StateLifeTokens(self._state)
def num_players(self):
"""Returns the number of players in the game."""
return lib.StateNumPlayers(self._state)
def score(self):
"""Returns the co-operative game score at a terminal state.
NOTE: result is undefined when game is NOT_FINISHED.
"""
return lib.StateScore(self._state)
def move_history(self):
"""Returns list of moves made, from oldest to most recent."""
history = []
history_len = lib.StateLenMoveHistory(self._state)
for i in range(history_len):
c_history_item = ffi.new("pyhanabi_history_item_t*")
lib.StateGetMoveHistory(self._state, i, c_history_item)
history.append(HanabiHistoryItem(c_history_item))
return history
def __str__(self):
c_string = lib.StateToString(self._state)
string = encode_ffi_string(c_string)
lib.DeleteString(c_string)
return string
def __repr__(self):
return self.__str__()
def __del__(self):
if self._state is not None:
lib.DeleteState(self._state)
self._state = None
del self
class AgentObservationType(enum.IntEnum):
"""Possible agent observation types, consistent with hanabi_game.h.
A kMinimal observation is similar to what a human sees, and does not
include any memory of past RevalColor/RevealRank hints. A CardKnowledge
observation includes per-card knowledge of past hints, as well as simple
inferred knowledge of the form "this card is not red, because it was
not revealed as red in a past <RevealColor Red> move. A Seer observation
shows all cards, including the player's own cards, regardless of what
hints have been given.
"""
MINIMAL = 0
CARD_KNOWLEDGE = 1
SEER = 2
class HanabiGame(object):
"""Game parameters describing a specific instance of Hanabi.
Python wrapper of C++ HanabiGame class.
"""
def __init__(self, params=None):
"""Creates a HanabiGame object.
Args:
params: is a dictionary of parameters and their values.
Possible parameters include
"players": 2 <= number of players <= 5
"colors": 1 <= number of different card colors in deck <= 5
"rank": 1 <= number of different card ranks in deck <= 5
"hand_size": 1 <= number of cards in player hand
"max_information_tokens": 1 <= maximum (and initial) number of info tokens.
"max_life_tokens": 1 <= maximum (and initial) number of life tokens.
"seed": random number seed. -1 to use system random device to get seed.
"random_start_player": boolean. If true, start with random player, not 0.
"observation_type": int AgentObservationType.
"""
if params is None:
self._game = ffi.new("pyhanabi_game_t*")
lib.NewDefaultGame(self._game)
else:
param_list = []
for key in params:
param_list.append(ffi.new("char[]", key.encode('ascii')))
param_list.append(ffi.new("char[]", str(params[key]).encode('ascii')))
c_array = ffi.new("char * [" + str(len(param_list)) + "]", param_list)
self._game = ffi.new("pyhanabi_game_t*")
lib.NewGame(self._game, len(param_list), c_array)
def new_initial_state(self):
return HanabiState(self)
@property
def c_game(self):
"""Return the C++ HanabiGame object."""
return self._game
def __del__(self):
if self._game is not None:
lib.DeleteGame(self._game)
self._game = None
del self
def parameter_string(self):
"""Returns string with all parameter choices."""
c_string = lib.GameParamString(self._game)
string = encode_ffi_string(c_string)
lib.DeleteString(c_string)
return string
def num_players(self):
"""Returns the number of players in the game."""
return lib.NumPlayers(self._game)
def num_colors(self):
"""Returns the number of card colors in the initial deck."""
return lib.NumColors(self._game)
def num_ranks(self):
"""Returns the number of card ranks in the initial deck."""
return lib.NumRanks(self._game)
def hand_size(self):
"""Returns the maximum number of cards in each player hand.
The number of cards in a player's hand may be smaller than this maximum
a) at the beginning of the game before cards are dealt out, b) after
any Play or Discard action and before the subsequent deal event, and c)
after the deck is empty and cards can no longer be dealt to a player.
"""
return lib.HandSize(self._game)
def max_information_tokens(self):
"""Returns the initial number of information tokens."""
return lib.MaxInformationTokens(self._game)
def max_life_tokens(self):
"""Returns the initial number of life tokens."""
return lib.MaxLifeTokens(self._game)
def observation_type(self):
return AgentObservationType(lib.ObservationType(self._game))
def max_moves(self):
"""Returns the number of possible legal moves in the game."""
return lib.MaxMoves(self._game)
def num_cards(self, color, rank):
"""Returns number of instances of Card(color, rank) in the initial deck."""
return lib.NumCards(self._game, color, rank)
def get_move_uid(self, move):
"""Returns a unique ID describing a legal move, or -1 for invalid move."""
return lib.GetMoveUid(self._game, move.c_move)
def get_move(self, move_uid):
"""Returns a HanabiMove represented by 0 <= move_uid < max_moves()."""
move = ffi.new("pyhanabi_move_t*")
lib.GetMoveByUid(self._game, move_uid, move)
return HanabiMove(move)
class HanabiObservation(object):
"""Player's observed view of an environment HanabiState.
The main differences are that 1) a player's own cards are not visible, and
2) a player does not know their own player index (seat) so that all player
indices are described relative to the observing player (or equivalently,
that from the player's point of view, they are always player index 0).
Python wrapper of C++ HanabiObservation class.
"""
def __init__(self, state, game, player):
"""Construct using HanabiState.observation(player)."""
self._observation = ffi.new("pyhanabi_observation_t*")
self._game = game
lib.NewObservation(state, player, self._observation)
def __str__(self):
c_string = lib.ObsToString(self._observation)
string = encode_ffi_string(c_string)
lib.DeleteString(c_string)
return string
def __repr__(self):
return self.__str__()
def __del__(self):
if self._observation is not None:
lib.DeleteObservation(self._observation)
self._observation = None
del self
def observation(self):
"""Returns the C++ HanabiObservation object."""
return self._observation
def cur_player_offset(self):
"""Returns the player index of the acting player, relative to observer."""
return lib.ObsCurPlayerOffset(self._observation)
def num_players(self):
"""Returns the number of players in the game."""
return lib.ObsNumPlayers(self._observation)
def observed_hands(self):
"""Returns a list of all hands, with cards ordered oldest to newest.
The observing player's cards are always invalid.
"""
hand_list = []
c_card = ffi.new("pyhanabi_card_t*")
for pid in range(self.num_players()):
player_hand = []
hand_size = lib.ObsGetHandSize(self._observation, pid)
for i in range(hand_size):
lib.ObsGetHandCard(self._observation, pid, i, c_card)
player_hand.append(HanabiCard(c_card.color, c_card.rank))
hand_list.append(player_hand)
return hand_list
def card_knowledge(self):
"""Returns a per-player list of hinted card knowledge.
Each player's entry is a per-card list of HanabiCardKnowledge objects.
Each HanabiCardKnowledge for a card gives the knowledge about the cards
accumulated over all past reveal actions.
"""
card_knowledge_list = []
for pid in range(self.num_players()):
player_card_knowledge = []
hand_size = lib.ObsGetHandSize(self._observation, pid)
for i in range(hand_size):
c_knowledge = ffi.new("pyhanabi_card_knowledge_t*")
lib.ObsGetHandCardKnowledge(self._observation, pid, i, c_knowledge)
player_card_knowledge.append(HanabiCardKnowledge(c_knowledge))
card_knowledge_list.append(player_card_knowledge)
return card_knowledge_list
def discard_pile(self):
"""Returns a list of all discarded cards, in order they were discarded."""
discards = []
c_card = ffi.new("pyhanabi_card_t*")
for index in range(lib.ObsDiscardPileSize(self._observation)):
lib.ObsGetDiscard(self._observation, index, c_card)
discards.append(HanabiCard(c_card.color, c_card.rank))
return discards
def fireworks(self):
"""Returns a list of fireworks levels by value, ordered by color."""
firework_list = []
num_colors = lib.NumColors(self._game)
for c in range(num_colors):
firework_list.append(lib.ObsFireworks(self._observation, c))
return firework_list
def deck_size(self):
"""Returns number of cards left in the deck."""
return lib.ObsDeckSize(self._observation)
def last_moves(self):
"""Returns moves made since observing player last acted.
Each entry in list is a HanabiHistoryItem, ordered from most recent
move to oldest. Oldest move is the last action made by observing
player. Skips initial chance moves to deal hands.
"""
history_items = []
for i in range(lib.ObsNumLastMoves(self._observation)):
history_item = ffi.new("pyhanabi_history_item_t*")
lib.ObsGetLastMove(self._observation, i, history_item)
history_items.append(HanabiHistoryItem(history_item))
return history_items
def information_tokens(self):
"""Returns the number of information tokens remaining."""
return lib.ObsInformationTokens(self._observation)
def life_tokens(self):
"""Returns the number of information tokens remaining."""
return lib.ObsLifeTokens(self._observation)
def legal_moves(self):
"""Returns list of legal moves for observing player.
List is empty if cur_player() != 0 (observer is not currently acting).
"""
moves = []
for i in range(lib.ObsNumLegalMoves(self._observation)):
move = ffi.new("pyhanabi_move_t*")
lib.ObsGetLegalMove(self._observation, i, move)
moves.append(HanabiMove(move))
return moves
def card_playable_on_fireworks(self, color, rank):
"""Returns true if and only if card can be successfully played.
Args:
color: 0-based color index of card
rank: 0-based rank index of card
"""
return lib.ObsCardPlayableOnFireworks(self._observation, color, rank)
class ObservationEncoderType(enum.IntEnum):
"""Encoder types, consistent with observation_encoder.h."""
CANONICAL = 0
class ObservationEncoder(object):
"""ObservationEncoder class.
The canonical observations wrap an underlying C++ class. To make custom
observation encoders, create a subclass of this base class and override
the shape and encode methods.
"""
def __init__(self, game, enc_type=ObservationEncoderType.CANONICAL):
"""Construct using HanabiState.observation(player)."""
self._game = game.c_game
self._encoder = ffi.new("pyhanabi_observation_encoder_t*")
lib.NewObservationEncoder(self._encoder, self._game, enc_type)
def __del__(self):
if self._encoder is not None:
lib.DeleteObservationEncoder(self._encoder)
self._encoder = None
self._game = None
del self
def shape(self):
c_shape_str = lib.ObservationShape(self._encoder)
shape_string = encode_ffi_string(c_shape_str)
lib.DeleteString(c_shape_str)
shape = [int(x) for x in shape_string.split(",")]
return shape
def encode(self, observation):
"""Encode the observation as a sequence of bits."""
c_encoding_str = lib.EncodeObservation(self._encoder,
observation.observation())
encoding_string = encode_ffi_string(c_encoding_str)
lib.DeleteString(c_encoding_str)
# Canonical observations are bit strings, so it is ok to encode using a
# string. For float or double observations, make a custom object
encoding = [int(x) for x in encoding_string.split(",")]
return encoding
try_cdef()
if cdef_loaded():
try_load()
|
hanabi-learning-environment-master
|
hanabi_learning_environment/pyhanabi.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
hanabi-learning-environment-master
|
hanabi_learning_environment/__init__.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RL environment for Hanabi, using an API similar to OpenAI Gym."""
from __future__ import absolute_import
from __future__ import division
from hanabi_learning_environment import pyhanabi
from hanabi_learning_environment.pyhanabi import color_char_to_idx
MOVE_TYPES = [_.name for _ in pyhanabi.HanabiMoveType]
#-------------------------------------------------------------------------------
# Environment API
#-------------------------------------------------------------------------------
class Environment(object):
"""Abstract Environment interface.
All concrete implementations of an environment should derive from this
interface and implement the method stubs.
"""
def reset(self, config):
"""Reset the environment with a new config.
Signals environment handlers to reset and restart the environment using
a config dict.
Args:
config: dict, specifying the parameters of the environment to be
generated.
Returns:
observation: A dict containing the full observation state.
"""
raise NotImplementedError("Not implemented in Abstract Base class")
def step(self, action):
"""Take one step in the game.
Args:
action: dict, mapping to an action taken by an agent.
Returns:
observation: dict, Containing full observation state.
reward: float, Reward obtained from taking the action.
done: bool, Whether the game is done.
info: dict, Optional debugging information.
Raises:
AssertionError: When an illegal action is provided.
"""
raise NotImplementedError("Not implemented in Abstract Base class")
class HanabiEnv(Environment):
"""RL interface to a Hanabi environment.
```python
environment = rl_env.make()
config = { 'players': 5 }
observation = environment.reset(config)
while not done:
# Agent takes action
action = ...
# Environment take a step
observation, reward, done, info = environment.step(action)
```
"""
def __init__(self, config):
r"""Creates an environment with the given game configuration.
Args:
config: dict, With parameters for the game. Config takes the following
keys and values.
- colors: int, Number of colors \in [2,5].
- ranks: int, Number of ranks \in [2,5].
- players: int, Number of players \in [2,5].
- hand_size: int, Hand size \in [4,5].
- max_information_tokens: int, Number of information tokens (>=0).
- max_life_tokens: int, Number of life tokens (>=1).
- observation_type: int.
0: Minimal observation.
1: First-order common knowledge observation.
- seed: int, Random seed.
- random_start_player: bool, Random start player.
"""
assert isinstance(config, dict), "Expected config to be of type dict."
self.game = pyhanabi.HanabiGame(config)
self.observation_encoder = pyhanabi.ObservationEncoder(
self.game, pyhanabi.ObservationEncoderType.CANONICAL)
self.players = self.game.num_players()
def reset(self):
r"""Resets the environment for a new game.
Returns:
observation: dict, containing the full observation about the game at the
current step. *WARNING* This observation contains all the hands of the
players and should not be passed to the agents.
An example observation:
{'current_player': 0,
'player_observations': [{'current_player': 0,
'current_player_offset': 0,
'deck_size': 40,
'discard_pile': [],
'fireworks': {'B': 0,
'G': 0,
'R': 0,
'W': 0,
'Y': 0},
'information_tokens': 8,
'legal_moves': [{'action_type': 'PLAY',
'card_index': 0},
{'action_type': 'PLAY',
'card_index': 1},
{'action_type': 'PLAY',
'card_index': 2},
{'action_type': 'PLAY',
'card_index': 3},
{'action_type': 'PLAY',
'card_index': 4},
{'action_type':
'REVEAL_COLOR',
'color': 'R',
'target_offset': 1},
{'action_type':
'REVEAL_COLOR',
'color': 'G',
'target_offset': 1},
{'action_type':
'REVEAL_COLOR',
'color': 'B',
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 0,
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 1,
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 2,
'target_offset': 1}],
'life_tokens': 3,
'observed_hands': [[{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1}],
[{'color': 'G', 'rank': 2},
{'color': 'R', 'rank': 0},
{'color': 'R', 'rank': 1},
{'color': 'B', 'rank': 0},
{'color': 'R', 'rank':
1}]],
'num_players': 2,
'vectorized': [ 0, 0, 1, ... ]},
{'current_player': 0,
'current_player_offset': 1,
'deck_size': 40,
'discard_pile': [],
'fireworks': {'B': 0,
'G': 0,
'R': 0,
'W': 0,
'Y': 0},
'information_tokens': 8,
'legal_moves': [],
'life_tokens': 3,
'observed_hands': [[{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1}],
[{'color': 'W', 'rank': 2},
{'color': 'Y', 'rank': 4},
{'color': 'Y', 'rank': 2},
{'color': 'G', 'rank': 0},
{'color': 'W', 'rank':
1}]],
'num_players': 2,
'vectorized': [ 0, 0, 1, ... ]}]}
"""
self.state = self.game.new_initial_state()
while self.state.cur_player() == pyhanabi.CHANCE_PLAYER_ID:
self.state.deal_random_card()
obs = self._make_observation_all_players()
obs["current_player"] = self.state.cur_player()
return obs
def vectorized_observation_shape(self):
"""Returns the shape of the vectorized observation.
Returns:
A list of integer dimensions describing the observation shape.
"""
return self.observation_encoder.shape()
def num_moves(self):
"""Returns the total number of moves in this game (legal or not).
Returns:
Integer, number of moves.
"""
return self.game.max_moves()
def step(self, action):
"""Take one step in the game.
Args:
action: dict, mapping to a legal action taken by an agent. The following
actions are supported:
- { 'action_type': 'PLAY', 'card_index': int }
- { 'action_type': 'DISCARD', 'card_index': int }
- {
'action_type': 'REVEAL_COLOR',
'color': str,
'target_offset': int >=0
}
- {
'action_type': 'REVEAL_RANK',
'rank': str,
'target_offset': int >=0
}
Alternatively, action may be an int in range [0, num_moves()).
Returns:
observation: dict, containing the full observation about the game at the
current step. *WARNING* This observation contains all the hands of the
players and should not be passed to the agents.
An example observation:
{'current_player': 0,
'player_observations': [{'current_player': 0,
'current_player_offset': 0,
'deck_size': 40,
'discard_pile': [],
'fireworks': {'B': 0,
'G': 0,
'R': 0,
'W': 0,
'Y': 0},
'information_tokens': 8,
'legal_moves': [{'action_type': 'PLAY',
'card_index': 0},
{'action_type': 'PLAY',
'card_index': 1},
{'action_type': 'PLAY',
'card_index': 2},
{'action_type': 'PLAY',
'card_index': 3},
{'action_type': 'PLAY',
'card_index': 4},
{'action_type': 'REVEAL_COLOR',
'color': 'R',
'target_offset': 1},
{'action_type': 'REVEAL_COLOR',
'color': 'G',
'target_offset': 1},
{'action_type': 'REVEAL_COLOR',
'color': 'B',
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 0,
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 1,
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 2,
'target_offset': 1}],
'life_tokens': 3,
'observed_hands': [[{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1}],
[{'color': 'G', 'rank': 2},
{'color': 'R', 'rank': 0},
{'color': 'R', 'rank': 1},
{'color': 'B', 'rank': 0},
{'color': 'R', 'rank': 1}]],
'num_players': 2,
'vectorized': [ 0, 0, 1, ... ]},
{'current_player': 0,
'current_player_offset': 1,
'deck_size': 40,
'discard_pile': [],
'fireworks': {'B': 0,
'G': 0,
'R': 0,
'W': 0,
'Y': 0},
'information_tokens': 8,
'legal_moves': [],
'life_tokens': 3,
'observed_hands': [[{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1}],
[{'color': 'W', 'rank': 2},
{'color': 'Y', 'rank': 4},
{'color': 'Y', 'rank': 2},
{'color': 'G', 'rank': 0},
{'color': 'W', 'rank': 1}]],
'num_players': 2,
'vectorized': [ 0, 0, 1, ... ]}]}
reward: float, Reward obtained from taking the action.
done: bool, Whether the game is done.
info: dict, Optional debugging information.
Raises:
AssertionError: When an illegal action is provided.
"""
if isinstance(action, dict):
# Convert dict action HanabiMove
action = self._build_move(action)
elif isinstance(action, int):
# Convert int action into a Hanabi move.
action = self.game.get_move(action)
else:
raise ValueError("Expected action as dict or int, got: {}".format(
action))
last_score = self.state.score()
# Apply the action to the state.
self.state.apply_move(action)
while self.state.cur_player() == pyhanabi.CHANCE_PLAYER_ID:
self.state.deal_random_card()
observation = self._make_observation_all_players()
done = self.state.is_terminal()
# Reward is score differential. May be large and negative at game end.
reward = self.state.score() - last_score
info = {}
return (observation, reward, done, info)
def _make_observation_all_players(self):
"""Make observation for all players.
Returns:
dict, containing observations for all players.
"""
obs = {}
player_observations = [self._extract_dict_from_backend(
player_id, self.state.observation(player_id))
for player_id in range(self.players)] # pylint: disable=bad-continuation
obs["player_observations"] = player_observations
obs["current_player"] = self.state.cur_player()
return obs
def _extract_dict_from_backend(self, player_id, observation):
"""Extract a dict of features from an observation from the backend.
Args:
player_id: Int, player from whose perspective we generate the observation.
observation: A `pyhanabi.HanabiObservation` object.
Returns:
obs_dict: dict, mapping from HanabiObservation to a dict.
"""
obs_dict = {}
obs_dict["current_player"] = self.state.cur_player()
obs_dict["current_player_offset"] = observation.cur_player_offset()
obs_dict["life_tokens"] = observation.life_tokens()
obs_dict["information_tokens"] = observation.information_tokens()
obs_dict["num_players"] = observation.num_players()
obs_dict["deck_size"] = observation.deck_size()
obs_dict["fireworks"] = {}
fireworks = self.state.fireworks()
for color, firework in zip(pyhanabi.COLOR_CHAR, fireworks):
obs_dict["fireworks"][color] = firework
obs_dict["legal_moves"] = []
obs_dict["legal_moves_as_int"] = []
for move in observation.legal_moves():
obs_dict["legal_moves"].append(move.to_dict())
obs_dict["legal_moves_as_int"].append(self.game.get_move_uid(move))
obs_dict["observed_hands"] = []
for player_hand in observation.observed_hands():
cards = [card.to_dict() for card in player_hand]
obs_dict["observed_hands"].append(cards)
obs_dict["discard_pile"] = [
card.to_dict() for card in observation.discard_pile()
]
# Return hints received.
obs_dict["card_knowledge"] = []
for player_hints in observation.card_knowledge():
player_hints_as_dicts = []
for hint in player_hints:
hint_d = {}
if hint.color() is not None:
hint_d["color"] = pyhanabi.color_idx_to_char(hint.color())
else:
hint_d["color"] = None
hint_d["rank"] = hint.rank()
player_hints_as_dicts.append(hint_d)
obs_dict["card_knowledge"].append(player_hints_as_dicts)
# ipdb.set_trace()
obs_dict["vectorized"] = self.observation_encoder.encode(observation)
obs_dict["pyhanabi"] = observation
return obs_dict
def _build_move(self, action):
"""Build a move from an action dict.
Args:
action: dict, mapping to a legal action taken by an agent. The following
actions are supported:
- { 'action_type': 'PLAY', 'card_index': int }
- { 'action_type': 'DISCARD', 'card_index': int }
- {
'action_type': 'REVEAL_COLOR',
'color': str,
'target_offset': int >=0
}
- {
'action_type': 'REVEAL_RANK',
'rank': str,
'target_offset': int >=0
}
Returns:
move: A `HanabiMove` object constructed from action.
Raises:
ValueError: Unknown action type.
"""
assert isinstance(action, dict), "Expected dict, got: {}".format(action)
assert "action_type" in action, ("Action should contain `action_type`. "
"action: {}").format(action)
action_type = action["action_type"]
assert (action_type in MOVE_TYPES), (
"action_type: {} should be one of: {}".format(action_type, MOVE_TYPES))
if action_type == "PLAY":
card_index = action["card_index"]
move = pyhanabi.HanabiMove.get_play_move(card_index=card_index)
elif action_type == "DISCARD":
card_index = action["card_index"]
move = pyhanabi.HanabiMove.get_discard_move(card_index=card_index)
elif action_type == "REVEAL_RANK":
target_offset = action["target_offset"]
rank = action["rank"]
move = pyhanabi.HanabiMove.get_reveal_rank_move(
target_offset=target_offset, rank=rank)
elif action_type == "REVEAL_COLOR":
target_offset = action["target_offset"]
assert isinstance(action["color"], str)
color = color_char_to_idx(action["color"])
move = pyhanabi.HanabiMove.get_reveal_color_move(
target_offset=target_offset, color=color)
else:
raise ValueError("Unknown action_type: {}".format(action_type))
legal_moves = self.state.legal_moves()
assert (str(move) in map(
str,
legal_moves)), "Illegal action: {}. Move should be one of : {}".format(
move, legal_moves)
return move
def make(environment_name="Hanabi-Full", num_players=2, pyhanabi_path=None):
"""Make an environment.
Args:
environment_name: str, Name of the environment to instantiate.
num_players: int, Number of players in this game.
pyhanabi_path: str, absolute path to header files for c code linkage.
Returns:
env: An `Environment` object.
Raises:
ValueError: Unknown environment name.
"""
if pyhanabi_path is not None:
prefixes=(pyhanabi_path,)
assert pyhanabi.try_cdef(prefixes=prefixes), "cdef failed to load"
assert pyhanabi.try_load(prefixes=prefixes), "library failed to load"
if (environment_name == "Hanabi-Full" or
environment_name == "Hanabi-Full-CardKnowledge"):
return HanabiEnv(
config={
"colors":
5,
"ranks":
5,
"players":
num_players,
"max_information_tokens":
8,
"max_life_tokens":
3,
"observation_type":
pyhanabi.AgentObservationType.CARD_KNOWLEDGE.value
})
elif environment_name == "Hanabi-Full-Minimal":
return HanabiEnv(
config={
"colors": 5,
"ranks": 5,
"players": num_players,
"max_information_tokens": 8,
"max_life_tokens": 3,
"observation_type": pyhanabi.AgentObservationType.MINIMAL.value
})
elif environment_name == "Hanabi-Small":
return HanabiEnv(
config={
"colors":
2,
"ranks":
5,
"players":
num_players,
"hand_size":
2,
"max_information_tokens":
3,
"max_life_tokens":
1,
"observation_type":
pyhanabi.AgentObservationType.CARD_KNOWLEDGE.value
})
elif environment_name == "Hanabi-Very-Small":
return HanabiEnv(
config={
"colors":
1,
"ranks":
5,
"players":
num_players,
"hand_size":
2,
"max_information_tokens":
3,
"max_life_tokens":
1,
"observation_type":
pyhanabi.AgentObservationType.CARD_KNOWLEDGE.value
})
else:
raise ValueError("Unknown environment {}".format(environment_name))
#-------------------------------------------------------------------------------
# Hanabi Agent API
#-------------------------------------------------------------------------------
class Agent(object):
"""Agent interface.
All concrete implementations of an Agent should derive from this interface
and implement the method stubs.
```python
class MyAgent(Agent):
...
agents = [MyAgent(config) for _ in range(players)]
while not done:
...
for agent_id, agent in enumerate(agents):
action = agent.act(observation)
if obs.current_player == agent_id:
assert action is not None
else
assert action is None
...
```
"""
def __init__(self, config, *args, **kwargs):
r"""Initialize the agent.
Args:
config: dict, With parameters for the game. Config takes the following
keys and values.
- colors: int, Number of colors \in [2,5].
- ranks: int, Number of ranks \in [2,5].
- players: int, Number of players \in [2,5].
- hand_size: int, Hand size \in [4,5].
- max_information_tokens: int, Number of information tokens (>=0)
- max_life_tokens: int, Number of life tokens (>=0)
- seed: int, Random seed.
- random_start_player: bool, Random start player.
*args: Optional arguments
**kwargs: Optional keyword arguments.
Raises:
AgentError: Custom exceptions.
"""
raise NotImplementedError("Not implemeneted in abstract base class.")
def reset(self, config):
r"""Reset the agent with a new config.
Signals agent to reset and restart using a config dict.
Args:
config: dict, With parameters for the game. Config takes the following
keys and values.
- colors: int, Number of colors \in [2,5].
- ranks: int, Number of ranks \in [2,5].
- players: int, Number of players \in [2,5].
- hand_size: int, Hand size \in [4,5].
- max_information_tokens: int, Number of information tokens (>=0)
- max_life_tokens: int, Number of life tokens (>=0)
- seed: int, Random seed.
- random_start_player: bool, Random start player.
"""
raise NotImplementedError("Not implemeneted in abstract base class.")
def act(self, observation):
"""Act based on an observation.
Args:
observation: dict, containing observation from the view of this agent.
An example:
{'current_player': 0,
'current_player_offset': 1,
'deck_size': 40,
'discard_pile': [],
'fireworks': {'B': 0,
'G': 0,
'R': 0,
'W': 0,
'Y': 0},
'information_tokens': 8,
'legal_moves': [],
'life_tokens': 3,
'observed_hands': [[{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1}],
[{'color': 'W', 'rank': 2},
{'color': 'Y', 'rank': 4},
{'color': 'Y', 'rank': 2},
{'color': 'G', 'rank': 0},
{'color': 'W', 'rank': 1}]],
'num_players': 2}]}
Returns:
action: dict, mapping to a legal action taken by this agent. The following
actions are supported:
- { 'action_type': 'PLAY', 'card_index': int }
- { 'action_type': 'DISCARD', 'card_index': int }
- {
'action_type': 'REVEAL_COLOR',
'color': str,
'target_offset': int >=0
}
- {
'action_type': 'REVEAL_RANK',
'rank': str,
'target_offset': int >=0
}
"""
raise NotImplementedError("Not implemented in Abstract Base class")
|
hanabi-learning-environment-master
|
hanabi_learning_environment/rl_env.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Random Agent."""
import random
from hanabi_learning_environment.rl_env import Agent
class RandomAgent(Agent):
"""Agent that takes random legal actions."""
def __init__(self, config, *args, **kwargs):
"""Initialize the agent."""
self.config = config
def act(self, observation):
"""Act based on an observation."""
if observation['current_player_offset'] == 0:
return random.choice(observation['legal_moves'])
else:
return None
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/random_agent.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Agent."""
from hanabi_learning_environment.rl_env import Agent
class SimpleAgent(Agent):
"""Agent that applies a simple heuristic."""
def __init__(self, config, *args, **kwargs):
"""Initialize the agent."""
self.config = config
# Extract max info tokens or set default to 8.
self.max_information_tokens = config.get('information_tokens', 8)
@staticmethod
def playable_card(card, fireworks):
"""A card is playable if it can be placed on the fireworks pile."""
return card['rank'] == fireworks[card['color']]
def act(self, observation):
"""Act based on an observation."""
if observation['current_player_offset'] != 0:
return None
# Check if there are any pending hints and play the card corresponding to
# the hint.
for card_index, hint in enumerate(observation['card_knowledge'][0]):
if hint['color'] is not None or hint['rank'] is not None:
return {'action_type': 'PLAY', 'card_index': card_index}
# Check if it's possible to hint a card to your colleagues.
fireworks = observation['fireworks']
if observation['information_tokens'] > 0:
# Check if there are any playable cards in the hands of the opponents.
for player_offset in range(1, observation['num_players']):
player_hand = observation['observed_hands'][player_offset]
player_hints = observation['card_knowledge'][player_offset]
# Check if the card in the hand of the opponent is playable.
for card, hint in zip(player_hand, player_hints):
if SimpleAgent.playable_card(card,
fireworks) and hint['color'] is None:
return {
'action_type': 'REVEAL_COLOR',
'color': card['color'],
'target_offset': player_offset
}
# If no card is hintable then discard or play.
if observation['information_tokens'] < self.max_information_tokens:
return {'action_type': 'DISCARD', 'card_index': 0}
else:
return {'action_type': 'PLAY', 'card_index': 0}
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/simple_agent.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/__init__.py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors and Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# This file is a fork of the original Dopamine code incorporating changes for
# the multiplayer setting and the Hanabi Learning Environment.
#
"""The standard DQN replay memory.
This implementation is an out-of-graph replay memory + in-graph wrapper. It
supports vanilla n-step updates of the form typically found in the literature,
i.e. where rewards are accumulated for n steps and the intermediate trajectory
is not exposed to the agent. This does not allow, for example, performing
off-policy corrections.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import math
import os
import pickle
import gin.tf
import numpy as np
import tensorflow as tf
# This constant determines how many iterations a checkpoint is kept for.
CHECKPOINT_DURATION = 4
MAX_SAMPLE_ATTEMPTS = 1000000
def invalid_range(cursor, replay_capacity, stack_size):
"""Returns an array with all the indices invalidated by cursor.
It handles special cases in a circular buffer in the beginning and the end.
Args:
cursor: int, The position of the cursor.
replay_capacity: int, The size of the replay memory.
stack_size: int, The size of the stacks returned by the replay memory.
Returns:
`np.array` of size stack_size with the invalid indices.
"""
assert cursor < replay_capacity
return np.array(
[(cursor - 1 + i) % replay_capacity for i in range(stack_size)])
class OutOfGraphReplayMemory(object):
"""A simple out of graph replay memory.
Stores transitions (i.e. state, action, reward, next_state, terminal)
efficiently when the states consist of stacks. The writing behaves like
a FIFO buffer and the sampling is uniformly random.
Attributes:
add_count: counter of how many transitions have been added.
observations: `np.array`, circular buffer of observations.
actions: `np.array`, circular buffer of actions.
rewards: `np.array`, circular buffer of rewards.
terminals: `np.array`, circular buffer of terminals.
legal_actions: `np.array`, circular buffer of legal actions for hanabi.
invalid_range: `np.array`, currently invalid indices.
"""
def __init__(self, num_actions, observation_size, stack_size, replay_capacity,
batch_size, update_horizon=1, gamma=1.0):
"""Data structure doing the heavy lifting.
Args:
num_actions: int, number of possible actions.
observation_size: int, size of an input frame.
stack_size: int, number of frames to use in state stack.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int, batch size.
update_horizon: int, length of update ('n' in n-step update).
gamma: float, the discount factor.
"""
self._observation_size = observation_size
self._num_actions = num_actions
self._replay_capacity = replay_capacity
self._batch_size = batch_size
self._stack_size = stack_size
self._update_horizon = update_horizon
self._gamma = gamma
# When the horizon is > 1, we compute the sum of discounted rewards as a dot
# product using the precomputed vector <gamma^0, gamma^1, ..., gamma^{n-1}>.
self._cumulative_discount_vector = np.array(
[math.pow(self._gamma, n) for n in range(update_horizon)],
dtype=np.float32)
# Create numpy arrays used to store sampled transitions.
self.observations = np.empty(
(replay_capacity, observation_size), dtype=np.uint8)
self.actions = np.empty((replay_capacity), dtype=np.int32)
self.rewards = np.empty((replay_capacity), dtype=np.float32)
self.terminals = np.empty((replay_capacity), dtype=np.uint8)
self.legal_actions = np.empty((replay_capacity, num_actions),
dtype=np.float32)
self.reset_state_batch_arrays(batch_size)
self.add_count = np.array(0)
self.invalid_range = np.zeros((self._stack_size))
def add(self, observation, action, reward, terminal, legal_actions):
"""Adds a transition to the replay memory.
Since the next_observation in the transition will be the observation added
next there is no need to pass it.
If the replay memory is at capacity the oldest transition will be discarded.
Args:
observation: `np.array` uint8, (observation_size).
action: uint8, indicating the action in the transition.
reward: float, indicating the reward received in the transition.
terminal: uint8, acting as a boolean indicating whether the transition
was terminal (1) or not (0).
legal_actions: Binary vector indicating legal actions (1 == legal).
"""
if self.is_empty() or self.terminals[self.cursor() - 1] == 1:
dummy_observation = np.zeros((self._observation_size))
dummy_legal_actions = np.zeros((self._num_actions))
for _ in range(self._stack_size - 1):
self._add(dummy_observation, 0, 0, 0, dummy_legal_actions)
self._add(observation, action, reward, terminal, legal_actions)
def _add(self, observation, action, reward, terminal, legal_actions):
cursor = self.cursor()
self.observations[cursor] = observation
self.actions[cursor] = action
self.rewards[cursor] = reward
self.terminals[cursor] = terminal
self.legal_actions[cursor] = legal_actions
self.add_count += 1
self.invalid_range = invalid_range(self.cursor(), self._replay_capacity,
self._stack_size)
def is_empty(self):
"""Is the replay memory empty?"""
return self.add_count == 0
def is_full(self):
"""Is the replay memory full?"""
return self.add_count >= self._replay_capacity
def cursor(self):
"""Index to the location where the next transition will be written."""
return self.add_count % self._replay_capacity
def get_stack(self, array, index):
"""Returns the stack of array at the index.
Args:
array: `np.array`, to get the stack from.
index: int, index to the first terminal in the stack to be returned.
Returns:
`Tensor` with shape (stack_size)
"""
assert index >= 0
assert index < self._replay_capacity
if not self.is_full():
assert index < self.cursor(), 'Index %i has not been added.' % index
assert index >= self._stack_size - 1, ('Not enough elements to sample '
'index %i' % index)
# Fast slice read.
if index >= self._stack_size - 1:
stack = array[(index - self._stack_size + 1):(index + 1), ...]
# Slow list read.
else:
indices = [(index - i) % self._replay_capacity
for i in reversed(range(self._stack_size))]
stack = array[indices, ...]
return stack
def get_observation_stack(self, index):
state = self.get_stack(self.observations, index)
return np.transpose(state, [1, 0])
def get_terminal_stack(self, index):
return self.get_stack(self.terminals, index)
def is_valid_transition(self, index):
"""Checks if the index contains a valid transition.
The index range needs to be valid and it must not collide with the end of an
episode.
Args:
index: int, index to the state in the transition. Note that next_state
must also be valid.
Returns:
bool, True if transition is valid.
"""
# Range checks
if index < 0 or index >= self._replay_capacity:
return False
if not self.is_full():
# The indices and next_indices must be smaller than the cursor.
if index >= self.cursor() - self._update_horizon:
return False
# The first few indices contain the padding states of the first episode.
if index < self._stack_size - 1:
return False
# Skip transitions that straddle the cursor.
if index in set(self.invalid_range):
return False
# If there are terminal flags in any other frame other than the last one
# the stack is not valid, so don't sample it.
if self.get_terminal_stack(index)[:-1].any():
return False
return True
def reset_state_batch_arrays(self, batch_size):
self._next_state_batch = np.empty(
(batch_size, self._observation_size, self._stack_size), dtype=np.uint8)
self._state_batch = np.empty(
(batch_size, self._observation_size, self._stack_size), dtype=np.uint8)
def sample_index_batch(self, batch_size):
"""Returns a batch of valid indices.
Args:
batch_size: int, number of indices returned.
Returns:
list of batch_size, containing valid indices.
Raises:
Exception: If the batch was not constructed after maximum number of tries.
"""
indices = []
attempt_count = 0
while len(indices) < batch_size and attempt_count < MAX_SAMPLE_ATTEMPTS:
attempt_count += 1
# index references the state and index + 1 points to next_state
if self.is_full():
index = np.random.randint(0, self._replay_capacity)
else:
# Can't start at 0 because the buffer is not yet circular
index = np.random.randint(self._stack_size - 1, self.cursor() - 1)
if self.is_valid_transition(index):
indices.append(index)
if len(indices) != batch_size:
raise Exception('I tried %i times but only sampled %i valid transitions' %
(MAX_SAMPLE_ATTEMPTS, len(indices)))
return indices
def sample_transition_batch(self, batch_size=None, indices=None):
"""Returns a batch of transitions.
Args:
batch_size: int, number of transitions returned. If None the batch_size
defined at init will be used.
indices: list of ints. If not None, use the given indices instead of
sampling them.
Returns:
Minibatch of transitions: A tuple with elements state_batch,
action_batch, reward_batch, next_state_batch and terminal_batch. The
shape of state_batch and next_state_batch is (minibatch_size,
observation_size, stack_size) and the rest of tensors have
shape (minibatch_size)
"""
if batch_size is None:
batch_size = self._batch_size
if batch_size != self._state_batch.shape[0]:
self.reset_state_batch_arrays(batch_size)
if not self.is_full():
assert self.add_count >= batch_size, (
'There is not enough to sample.'
' You need to call add at '
'least %i (batch_size) times (currently it is %i)' % (batch_size,
self.add_count))
if indices is None:
indices = self.sample_index_batch(batch_size)
assert len(indices) == batch_size
action_batch = self.actions[indices]
reward_batch = np.empty((batch_size), dtype=np.float32)
terminal_batch = np.empty((batch_size), dtype=np.uint8)
indices_batch = np.empty((batch_size), dtype=np.int32)
next_legal_actions_batch = np.empty((batch_size, self._num_actions),
dtype=np.float32)
for batch_element, memory_index in enumerate(indices):
indices_batch[batch_element] = memory_index
self._state_batch[batch_element] = self.get_observation_stack(
memory_index)
# Compute indices in the replay memory up to n steps ahead.
trajectory_indices = [(memory_index + j) % self._replay_capacity for
j in range(self._update_horizon)]
# Determine if this trajectory segment contains a terminal state.
terminals_in_trajectory = np.nonzero(self.terminals[trajectory_indices])
if terminals_in_trajectory[0].size == 0:
# If not, sum rewards along the trajectory, property discounted.
terminal_batch[batch_element] = 0
reward_batch[batch_element] = self._cumulative_discount_vector.dot(
self.rewards[trajectory_indices])
else:
# Updates leading to a terminal state require a little more care, in
# particular to avoid summing rewards past the end of the episode.
terminal_batch[batch_element] = 1
# Fetch smallest index corresponding to a terminal state.
terminal_index = np.min(terminals_in_trajectory)
truncated_discount_vector = (
self._cumulative_discount_vector[0:terminal_index + 1])
reward_batch[batch_element] = truncated_discount_vector.dot(
self.rewards[trajectory_indices[0:terminal_index + 1]])
bootstrap_state_index = (
(memory_index + self._update_horizon) % self._replay_capacity)
self._next_state_batch[batch_element] = (
self.get_observation_stack(bootstrap_state_index))
next_legal_actions_batch[batch_element] = (
self.legal_actions[bootstrap_state_index])
return (self._state_batch, action_batch, reward_batch,
self._next_state_batch, terminal_batch, indices_batch,
next_legal_actions_batch)
def _generate_filename(self, checkpoint_dir, name, suffix):
return os.path.join(checkpoint_dir, '{}_ckpt.{}.gz'.format(name, suffix))
def save(self, checkpoint_dir, iteration_number):
"""Save the python replay memory attributes into a file.
This method will save all the replay memory's state in a single file.
Args:
checkpoint_dir: str, directory where numpy checkpoint files should be
saved.
iteration_number: int, iteration_number to use as a suffix in naming numpy
checkpoint files.
"""
if not tf.gfile.Exists(checkpoint_dir):
return
for attr in self.__dict__:
if not attr.startswith('_'):
filename = self._generate_filename(checkpoint_dir, attr,
iteration_number)
with tf.gfile.Open(filename, 'wb') as f:
with gzip.GzipFile(fileobj=f) as outfile:
# Checkpoint numpy arrays directly with np.save to avoid excessive
# memory usage. This is particularly important for the observations
# data.
if isinstance(self.__dict__[attr], np.ndarray):
np.save(outfile, self.__dict__[attr], allow_pickle=False)
else:
pickle.dump(self.__dict__[attr], outfile)
# After writing a checkpoint file, we garbage collect the checkpoint file
# that is four versions old.
stale_iteration_number = iteration_number - CHECKPOINT_DURATION
if stale_iteration_number >= 0:
stale_filename = self._generate_filename(checkpoint_dir, attr,
stale_iteration_number)
try:
tf.gfile.Remove(stale_filename)
except tf.errors.NotFoundError:
pass
def load(self, checkpoint_dir, suffix):
"""Restores the object from bundle_dictionary and numpy checkpoints.
Args:
checkpoint_dir: str, directory where to read the numpy checkpointed files
from.
suffix: str, suffix to use in numpy checkpoint files.
Raises:
NotFoundError: if all expected files are not found in directory.
"""
# We will first make sure we have all the necessary files available to avoid
# loading a partially-specified (i.e. corrupted) replay buffer.
for attr in self.__dict__:
if attr.startswith('_'):
continue
filename = self._generate_filename(checkpoint_dir, attr, suffix)
if not tf.gfile.Exists(filename):
raise tf.errors.NotFoundError(None, None,
'Missing file: {}'.format(filename))
# If we've reached this point then we have verified that all expected files
# are available.
for attr in self.__dict__:
if attr.startswith('_'):
continue
filename = self._generate_filename(checkpoint_dir, attr, suffix)
with tf.gfile.Open(filename, 'rb') as f:
with gzip.GzipFile(fileobj=f) as infile:
if isinstance(self.__dict__[attr], np.ndarray):
self.__dict__[attr] = np.load(infile, allow_pickle=False)
else:
self.__dict__[attr] = pickle.load(infile)
@gin.configurable(denylist=['observation_size', 'stack_size'])
class WrappedReplayMemory(object):
"""In-graph wrapper for the python replay memory.
Usage:
To add a transition: run the operation add_transition_op
(and feed all the placeholders in add_transition_ph).
To sample a batch: Construct operations that depend on any of the
sampling tensors. Every sess.run using any of these
tensors will sample a new transition.
When using staging: Need to prefetch the next batch with each train_op by
calling self.prefetch_batch.
Everytime this op is called a new transition batch
would be prefetched.
Attributes:
The following tensors are sampled randomly each sess.run:
states actions rewards next_states terminals
add_transition_op: tf operation to add a transition to the replay memory.
All the following placeholders need to be fed. add_obs_ph add_action_ph
add_reward_ph add_terminal_ph
"""
def __init__(self,
num_actions,
observation_size,
stack_size,
use_staging=True,
replay_capacity=1000000,
batch_size=32,
update_horizon=1,
gamma=1.0,
wrapped_memory=None):
"""Initializes a graph wrapper for the python replay memory.
Args:
num_actions: int, number of possible actions.
observation_size: int, size of an input frame.
stack_size: int, number of frames to use in state stack.
use_staging: bool, when True it would use a staging area to prefetch the
next sampling batch.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
wrapped_memory: The 'inner' memory data structure. Defaults to None, which
creates the standard DQN replay memory.
Raises:
ValueError: If update_horizon is not positive.
ValueError: If discount factor is not in [0, 1].
"""
if replay_capacity < update_horizon + 1:
raise ValueError('Update horizon (%i) should be significantly smaller '
'than replay capacity (%i).'
% (update_horizon, replay_capacity))
if not update_horizon >= 1:
raise ValueError('Update horizon must be positive.')
if not 0.0 <= gamma <= 1.0:
raise ValueError('Discount factor (gamma) must be in [0, 1].')
# Allow subclasses to create self.memory.
if wrapped_memory is not None:
self.memory = wrapped_memory
else:
self.memory = OutOfGraphReplayMemory(
num_actions, observation_size, stack_size,
replay_capacity, batch_size, update_horizon, gamma)
with tf.name_scope('replay'):
with tf.name_scope('add_placeholders'):
self.add_obs_ph = tf.placeholder(
tf.uint8, [observation_size], name='add_obs_ph')
self.add_action_ph = tf.placeholder(tf.int32, [], name='add_action_ph')
self.add_reward_ph = tf.placeholder(
tf.float32, [], name='add_reward_ph')
self.add_terminal_ph = tf.placeholder(
tf.uint8, [], name='add_terminal_ph')
self.add_legal_actions_ph = tf.placeholder(
tf.float32, [num_actions], name='add_legal_actions_ph')
add_transition_ph = [
self.add_obs_ph, self.add_action_ph, self.add_reward_ph,
self.add_terminal_ph, self.add_legal_actions_ph
]
with tf.device('/cpu:*'):
self.add_transition_op = tf.py_func(
self.memory.add, add_transition_ph, [], name='replay_add_py_func')
self.transition = tf.py_func(
self.memory.sample_transition_batch, [],
[tf.uint8, tf.int32, tf.float32, tf.uint8, tf.uint8, tf.int32,
tf.float32],
name='replay_sample_py_func')
if use_staging:
# To hide the py_func latency use a staging area to pre-fetch the next
# batch of transitions.
(states, actions, rewards, next_states,
terminals, indices, next_legal_actions) = self.transition
# StagingArea requires all the shapes to be defined.
states.set_shape([batch_size, observation_size, stack_size])
actions.set_shape([batch_size])
rewards.set_shape([batch_size])
next_states.set_shape(
[batch_size, observation_size, stack_size])
terminals.set_shape([batch_size])
indices.set_shape([batch_size])
next_legal_actions.set_shape([batch_size, num_actions])
# Create the staging area in CPU.
prefetch_area = tf.contrib.staging.StagingArea(
[tf.uint8, tf.int32, tf.float32, tf.uint8, tf.uint8, tf.int32,
tf.float32])
self.prefetch_batch = prefetch_area.put(
(states, actions, rewards, next_states, terminals, indices,
next_legal_actions))
else:
self.prefetch_batch = tf.no_op()
if use_staging:
# Get the sample_transition_batch in GPU. This would do the copy from
# CPU to GPU.
self.transition = prefetch_area.get()
(self.states, self.actions, self.rewards, self.next_states,
self.terminals, self.indices, self.next_legal_actions) = self.transition
# Since these are py_func tensors, no information about their shape is
# present. Setting the shape only for the necessary tensors
self.states.set_shape([None, observation_size, stack_size])
self.next_states.set_shape([None, observation_size, stack_size])
def save(self, checkpoint_dir, iteration_number):
"""Save the underlying replay memory's contents in a file.
Args:
checkpoint_dir: str, directory from where to read the numpy checkpointed
files.
iteration_number: int, iteration_number to use as a suffix in naming
numpy checkpoint files.
"""
self.memory.save(checkpoint_dir, iteration_number)
def load(self, checkpoint_dir, suffix):
"""Loads the replay memory's state from a saved file.
Args:
checkpoint_dir: str, directory from where to read the numpy checkpointed
files.
suffix: str, suffix to use in numpy checkpoint files.
"""
self.memory.load(checkpoint_dir, suffix)
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/replay_memory.py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors and Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# This file is a fork of the original Dopamine code incorporating changes for
# the multiplayer setting and the Hanabi Learning Environment.
#
"""The entry point for running a Rainbow agent on Hanabi."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from third_party.dopamine import logger
import run_experiment
FLAGS = flags.FLAGS
flags.DEFINE_multi_string(
'gin_files', [],
'List of paths to gin configuration files (e.g.'
'"configs/hanabi_rainbow.gin").')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override the values set in the config files '
'(e.g. "DQNAgent.epsilon_train=0.1").')
flags.DEFINE_string('base_dir', None,
'Base directory to host all required sub-directories.')
flags.DEFINE_string('checkpoint_dir', '',
'Directory where checkpoint files should be saved. If '
'empty, no checkpoints will be saved.')
flags.DEFINE_string('checkpoint_file_prefix', 'ckpt',
'Prefix to use for the checkpoint files.')
flags.DEFINE_string('logging_dir', '',
'Directory where experiment data will be saved. If empty '
'no checkpoints will be saved.')
flags.DEFINE_string('logging_file_prefix', 'log',
'Prefix to use for the log files.')
def launch_experiment():
"""Launches the experiment.
Specifically:
- Load the gin configs and bindings.
- Initialize the Logger object.
- Initialize the environment.
- Initialize the observation stacker.
- Initialize the agent.
- Reload from the latest checkpoint, if available, and initialize the
Checkpointer object.
- Run the experiment.
"""
if FLAGS.base_dir == None:
raise ValueError('--base_dir is None: please provide a path for '
'logs and checkpoints.')
run_experiment.load_gin_configs(FLAGS.gin_files, FLAGS.gin_bindings)
experiment_logger = logger.Logger('{}/logs'.format(FLAGS.base_dir))
environment = run_experiment.create_environment()
obs_stacker = run_experiment.create_obs_stacker(environment)
agent = run_experiment.create_agent(environment, obs_stacker)
checkpoint_dir = '{}/checkpoints'.format(FLAGS.base_dir)
start_iteration, experiment_checkpointer = (
run_experiment.initialize_checkpointing(agent,
experiment_logger,
checkpoint_dir,
FLAGS.checkpoint_file_prefix))
run_experiment.run_experiment(agent, environment, start_iteration,
obs_stacker,
experiment_logger, experiment_checkpointer,
checkpoint_dir,
logging_file_prefix=FLAGS.logging_file_prefix)
def main(unused_argv):
"""This main function acts as a wrapper around a gin-configurable experiment.
Args:
unused_argv: Arguments (unused).
"""
launch_experiment()
if __name__ == '__main__':
app.run(main)
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/train.py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors and Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# This file is a fork of the original Dopamine code incorporating changes for
# the multiplayer setting and the Hanabi Learning Environment.
#
"""An implementation of Prioritized Experience Replay (PER).
This implementation is based on the paper "Prioritized Experience Replay"
by Tom Schaul et al. (2015). Many thanks to Tom Schaul, John Quan, and Matteo
Hessel for providing useful pointers on the algorithm and its implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from third_party.dopamine import sum_tree
import gin.tf
import numpy as np
import replay_memory
import tensorflow as tf
DEFAULT_PRIORITY = 100.0
class OutOfGraphPrioritizedReplayMemory(replay_memory.OutOfGraphReplayMemory):
"""An Out of Graph Replay Memory for Prioritized Experience Replay.
See replay_memory.py for details.
"""
def __init__(self, num_actions, observation_size, stack_size, replay_capacity,
batch_size, update_horizon=1, gamma=1.0):
"""This data structure does the heavy lifting in the replay memory.
Args:
num_actions: int, number of actions.
observation_size: int, size of an input observation.
stack_size: int, number of frames to use in state stack.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int, batch size.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
"""
super(OutOfGraphPrioritizedReplayMemory, self).__init__(
num_actions=num_actions,
observation_size=observation_size, stack_size=stack_size,
replay_capacity=replay_capacity, batch_size=batch_size,
update_horizon=update_horizon, gamma=gamma)
self.sum_tree = sum_tree.SumTree(replay_capacity)
def add(self, observation, action, reward, terminal, legal_actions):
"""Adds a transition to the replay memory.
Since the next_observation in the transition will be the observation added
next there is no need to pass it.
If the replay memory is at capacity the oldest transition will be discarded.
Compared to OutOfGraphReplayMemory.add(), this version also sets the
priority of dummy frames to 0.
Args:
observation: `np.array` uint8, (observation_size, observation_size).
action: int, indicating the action in the transition.
reward: float, indicating the reward received in the transition.
terminal: int, acting as a boolean indicating whether the transition
was terminal (1) or not (0).
legal_actions: Binary vector indicating legal actions (1 == legal).
"""
if self.is_empty() or self.terminals[self.cursor() - 1] == 1:
dummy_observation = np.zeros((self._observation_size))
dummy_legal_actions = np.zeros((self._num_actions))
for _ in range(self._stack_size - 1):
self._add(dummy_observation, 0, 0, 0, dummy_legal_actions, priority=0.0)
self._add(observation, action, reward, terminal, legal_actions,
priority=DEFAULT_PRIORITY)
def _add(self, observation, action, reward, terminal, legal_actions,
priority=DEFAULT_PRIORITY):
new_element_index = self.cursor()
super(OutOfGraphPrioritizedReplayMemory, self)._add(
observation, action, reward, terminal, legal_actions)
self.sum_tree.set(new_element_index, priority)
def sample_index_batch(self, batch_size):
"""Returns a batch of valid indices.
Args:
batch_size: int, number of indices returned.
Returns:
List of size batch_size containing valid indices.
Raises:
Exception: If the batch was not constructed after maximum number of tries.
"""
indices = []
allowed_attempts = replay_memory.MAX_SAMPLE_ATTEMPTS
while len(indices) < batch_size and allowed_attempts > 0:
index = self.sum_tree.sample()
if self.is_valid_transition(index):
indices.append(index)
else:
allowed_attempts -= 1
if len(indices) != batch_size:
raise Exception('Could only sample {} valid transitions'.format(
len(indices)))
else:
return indices
def set_priority(self, indices, priorities):
"""Sets the priority of the given elements according to Schaul et al.
Args:
indices: `np.array` of indices in range [0, replay_capacity).
priorities: list of floats, the corresponding priorities.
"""
assert indices.dtype == np.int32, ('Indices must be integers, '
'given: {}'.format(indices.dtype))
for i, memory_index in enumerate(indices):
self.sum_tree.set(memory_index, priorities[i])
def get_priority(self, indices, batch_size=None):
"""Fetches the priorities correspond to a batch of memory indices.
For any memory location not yet used, the corresponding priority is 0.
Args:
indices: `np.array` of indices in range [0, replay_capacity).
batch_size: int, requested number of items.
Returns:
The corresponding priorities.
"""
if batch_size is None:
batch_size = self._batch_size
if batch_size != self._state_batch.shape[0]:
self.reset_state_batch_arrays(batch_size)
priority_batch = np.empty((batch_size), dtype=np.float32)
assert indices.dtype == np.int32, ('Indices must be integers, '
'given: {}'.format(indices.dtype))
for i, memory_index in enumerate(indices):
priority_batch[i] = self.sum_tree.get(memory_index)
return priority_batch
@gin.configurable(denylist=['observation_size', 'stack_size'])
class WrappedPrioritizedReplayMemory(replay_memory.WrappedReplayMemory):
"""In graph wrapper for the python Replay Memory.
Usage:
To add a transition: run the operation add_transition_op
(and feed all the placeholders in add_transition_ph)
To sample a batch: Construct operations that depend on any of the
sampling tensors. Every sess.run using any of these
tensors will sample a new transition.
When using staging: Need to prefetch the next batch with each train_op by
calling self.prefetch_batch.
Everytime this op is called a new transition batch
would be prefetched.
Attributes:
# The following tensors are sampled randomly each sess.run
states
actions
rewards
next_states
terminals
add_transition_op: tf operation to add a transition to the replay
memory. All the following placeholders need to be fed.
add_obs_ph
add_action_ph
add_reward_ph
add_terminal_ph
"""
def __init__(self,
num_actions,
observation_size,
stack_size,
use_staging=True,
replay_capacity=1000000,
batch_size=32,
update_horizon=1,
gamma=1.0):
"""Initializes a graph wrapper for the python Replay Memory.
Args:
num_actions: int, number of possible actions.
observation_size: int, size of an input observation.
stack_size: int, number of frames to use in state stack.
use_staging: bool, when True it would use a staging area to prefetch
the next sampling batch.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
Raises:
ValueError: If update_horizon is not positive.
ValueError: If discount factor is not in [0, 1].
"""
memory = OutOfGraphPrioritizedReplayMemory(num_actions, observation_size,
stack_size, replay_capacity,
batch_size, update_horizon,
gamma)
super(WrappedPrioritizedReplayMemory, self).__init__(
num_actions,
observation_size, stack_size, use_staging, replay_capacity, batch_size,
update_horizon, gamma, wrapped_memory=memory)
def tf_set_priority(self, indices, losses):
"""Sets the priorities for the given indices.
Args:
indices: tensor of indices (int32), size k.
losses: tensor of losses (float), size k.
Returns:
A TF op setting the priorities according to Prioritized Experience
Replay.
"""
return tf.py_func(
self.memory.set_priority, [indices, losses],
[],
name='prioritized_replay_set_priority_py_func')
def tf_get_priority(self, indices):
"""Gets the priorities for the given indices.
Args:
indices: tensor of indices (int32), size k.
Returns:
A tensor (float32) of priorities.
"""
return tf.py_func(
self.memory.get_priority, [indices],
[tf.float32],
name='prioritized_replay_get_priority_py_func')
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/prioritized_replay_memory.py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors and Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# This file is a fork of the original Dopamine code incorporating changes for
# the multiplayer setting and the Hanabi Learning Environment.
#
"""Implementation of a DQN agent adapted to the multiplayer setting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import gin.tf
import numpy as np
import replay_memory
import tensorflow as tf
slim = tf.contrib.slim
Transition = collections.namedtuple(
'Transition', ['reward', 'observation', 'legal_actions', 'action', 'begin'])
def linearly_decaying_epsilon(decay_period, step, warmup_steps, epsilon):
"""Returns the current epsilon parameter for the agent's e-greedy policy.
Args:
decay_period: float, the decay period for epsilon.
step: Integer, the number of training steps completed so far.
warmup_steps: int, the number of steps taken before training starts.
epsilon: float, the epsilon value.
Returns:
A float, the linearly decaying epsilon value.
"""
steps_left = decay_period + warmup_steps - step
bonus = (1.0 - epsilon) * steps_left / decay_period
bonus = np.clip(bonus, 0.0, 1.0 - epsilon)
return epsilon + bonus
def dqn_template(state, num_actions, layer_size=512, num_layers=1):
r"""Builds a DQN Network mapping states to Q-values.
Args:
state: A `tf.placeholder` for the RL state.
num_actions: int, number of actions that the RL agent can take.
layer_size: int, number of hidden units per layer.
num_layers: int, Number of hidden layers.
Returns:
net: A `tf.Graphdef` for DQN:
`\theta : \mathcal{X}\rightarrow\mathbb{R}^{|\mathcal{A}|}`
"""
weights_initializer = slim.variance_scaling_initializer(
factor=1.0 / np.sqrt(3.0), mode='FAN_IN', uniform=True)
net = tf.cast(state, tf.float32)
net = tf.squeeze(net, axis=2)
for _ in range(num_layers):
net = slim.fully_connected(net, layer_size,
activation_fn=tf.nn.relu)
net = slim.fully_connected(net, num_actions, activation_fn=None,
weights_initializer=weights_initializer)
return net
@gin.configurable
class DQNAgent(object):
"""A compact implementation of the multiplayer DQN agent."""
@gin.configurable
def __init__(self,
num_actions=None,
observation_size=None,
num_players=None,
gamma=0.99,
update_horizon=1,
min_replay_history=500,
update_period=4,
stack_size=1,
target_update_period=500,
epsilon_fn=linearly_decaying_epsilon,
epsilon_train=0.02,
epsilon_eval=0.001,
epsilon_decay_period=1000,
graph_template=dqn_template,
tf_device='/cpu:*',
use_staging=True,
optimizer=tf.train.RMSPropOptimizer(
learning_rate=.0025,
decay=0.95,
momentum=0.0,
epsilon=1e-6,
centered=True)):
"""Initializes the agent and constructs its graph.
Args:
num_actions: int, number of actions the agent can take at any state.
observation_size: int, size of observation vector.
num_players: int, number of players playing this game.
gamma: float, discount factor as commonly used in the RL literature.
update_horizon: int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: int, number of stored transitions before training.
update_period: int, period between DQN updates.
stack_size: int, number of observations to use as state.
target_update_period: Update period for the target network.
epsilon_fn: Function expecting 4 parameters: (decay_period, step,
warmup_steps, epsilon), and which returns the epsilon value used for
exploration during training.
epsilon_train: float, final epsilon for training.
epsilon_eval: float, epsilon during evaluation.
epsilon_decay_period: int, number of steps for epsilon to decay.
graph_template: function for building the neural network graph.
tf_device: str, Tensorflow device on which to run computations.
use_staging: bool, when True use a staging area to prefetch the next
sampling batch.
optimizer: Optimizer instance used for learning.
"""
tf.logging.info('Creating %s agent with the following parameters:',
self.__class__.__name__)
tf.logging.info('\t gamma: %f', gamma)
tf.logging.info('\t update_horizon: %f', update_horizon)
tf.logging.info('\t min_replay_history: %d', min_replay_history)
tf.logging.info('\t update_period: %d', update_period)
tf.logging.info('\t target_update_period: %d', target_update_period)
tf.logging.info('\t epsilon_train: %f', epsilon_train)
tf.logging.info('\t epsilon_eval: %f', epsilon_eval)
tf.logging.info('\t epsilon_decay_period: %d', epsilon_decay_period)
tf.logging.info('\t tf_device: %s', tf_device)
tf.logging.info('\t use_staging: %s', use_staging)
tf.logging.info('\t optimizer: %s', optimizer)
# Global variables.
self.num_actions = num_actions
self.observation_size = observation_size
self.num_players = num_players
self.gamma = gamma
self.update_horizon = update_horizon
self.cumulative_gamma = math.pow(gamma, update_horizon)
self.min_replay_history = min_replay_history
self.target_update_period = target_update_period
self.epsilon_fn = epsilon_fn
self.epsilon_train = epsilon_train
self.epsilon_eval = epsilon_eval
self.epsilon_decay_period = epsilon_decay_period
self.update_period = update_period
self.eval_mode = False
self.training_steps = 0
self.batch_staged = False
self.optimizer = optimizer
with tf.device(tf_device):
# Calling online_convnet will generate a new graph as defined in
# graph_template using whatever input is passed, but will always share
# the same weights.
online_convnet = tf.make_template('Online', graph_template)
target_convnet = tf.make_template('Target', graph_template)
# The state of the agent. The last axis is the number of past observations
# that make up the state.
states_shape = (1, observation_size, stack_size)
self.state = np.zeros(states_shape)
self.state_ph = tf.placeholder(tf.uint8, states_shape, name='state_ph')
self.legal_actions_ph = tf.placeholder(tf.float32,
[self.num_actions],
name='legal_actions_ph')
self._q = online_convnet(
state=self.state_ph, num_actions=self.num_actions)
self._replay = self._build_replay_memory(use_staging)
self._replay_qs = online_convnet(self._replay.states, self.num_actions)
self._replay_next_qt = target_convnet(self._replay.next_states,
self.num_actions)
self._train_op = self._build_train_op()
self._sync_qt_ops = self._build_sync_op()
self._q_argmax = tf.argmax(self._q + self.legal_actions_ph, axis=1)[0]
# Set up a session and initialize variables.
self._sess = tf.Session(
'', config=tf.ConfigProto(allow_soft_placement=True))
self._init_op = tf.global_variables_initializer()
self._sess.run(self._init_op)
self._saver = tf.train.Saver(max_to_keep=3)
# This keeps tracks of the observed transitions during play, for each
# player.
self.transitions = [[] for _ in range(num_players)]
def _build_replay_memory(self, use_staging):
"""Creates the replay memory used by the agent.
Args:
use_staging: bool, if True, uses a staging area for replaying.
Returns:
A replay memory object.
"""
return replay_memory.WrappedReplayMemory(
num_actions=self.num_actions,
observation_size=self.observation_size,
batch_size=32,
stack_size=1,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma)
def _build_target_q_op(self):
"""Build an op to be used as a target for the Q-value.
Returns:
target_q_op: An op calculating the target Q-value.
"""
# Get the max q_value across the actions dimension.
replay_next_qt_max = tf.reduce_max(self._replay_next_qt +
self._replay.next_legal_actions, 1)
# Calculate the sample Bellman update.
# Q_t = R_t + \gamma^N * Q'_t+1
# where,
# Q'_t+1 is \argmax_a Q(S_t+1, a)
# (or) 0 if S_t is a terminal state,
# and
# N is the update horizon (by default, N=1).
return self._replay.rewards + self.cumulative_gamma * replay_next_qt_max * (
1. - tf.cast(self._replay.terminals, tf.float32))
def _build_train_op(self):
"""Builds a training op.
Returns:
train_op: An op performing one step of training.
"""
replay_action_one_hot = tf.one_hot(
self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')
replay_chosen_q = tf.reduce_sum(
self._replay_qs * replay_action_one_hot,
reduction_indices=1,
name='replay_chosen_q')
target = tf.stop_gradient(self._build_target_q_op())
loss = tf.losses.huber_loss(
target, replay_chosen_q, reduction=tf.losses.Reduction.NONE)
return self.optimizer.minimize(tf.reduce_mean(loss))
def _build_sync_op(self):
"""Build ops for assigning weights from online to target network.
Returns:
ops: A list of ops assigning weights from online to target network.
"""
# Get trainable variables from online and target networks.
sync_qt_ops = []
trainables_online = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='Online')
trainables_target = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='Target')
for (w_online, w_target) in zip(trainables_online, trainables_target):
# Assign weights from online to target network.
sync_qt_ops.append(w_target.assign(w_online, use_locking=True))
return sync_qt_ops
def begin_episode(self, current_player, legal_actions, observation):
"""Returns the agent's first action.
Args:
current_player: int, the player whose turn it is.
legal_actions: `np.array`, actions which the player can currently take.
observation: `np.array`, the environment's initial observation.
Returns:
A legal, int-valued action.
"""
self._train_step()
self.action = self._select_action(observation, legal_actions)
self._record_transition(current_player, 0, observation, legal_actions,
self.action, begin=True)
return self.action
def step(self, reward, current_player, legal_actions, observation):
"""Stores observations from last transition and chooses a new action.
Notifies the agent of the outcome of the latest transition and stores it
in the replay memory, selects a new action and applies a training step.
Args:
reward: float, the reward received from its action.
current_player: int, the player whose turn it is.
legal_actions: `np.array`, actions which the player can currently take.
observation: `np.array`, the most recent observation.
Returns:
A legal, int-valued action.
"""
self._train_step()
self.action = self._select_action(observation, legal_actions)
self._record_transition(current_player, reward, observation, legal_actions,
self.action)
return self.action
def end_episode(self, final_rewards):
"""Signals the end of the episode to the agent.
Args:
final_rewards: `np.array`, the last rewards from the environment. Each
player gets their own reward, which is the sum of the rewards since
their last move.
"""
self._post_transitions(terminal_rewards=final_rewards)
def _record_transition(self, current_player, reward, observation,
legal_actions, action, begin=False):
"""Records the most recent transition data.
Specifically, the data consists of (r_t, o_{t+1}, l_{t+1}, a_{t+1}), where
r_t is the most recent reward (since our last action),
o_{t+1} is the following observation,
l_{t+1} are the legal actions from the corresponding state,
a_{t+1} is the chosen action from that state.
Args:
current_player: int, the player experiencing the transition.
reward: float, the received reward.
observation: `np.array`, the player's observation.
legal_actions: `np.array`, legal actions from this state.
action: int, the selected action.
begin: bool, if True, this is the beginning of an episode.
"""
self.transitions[current_player].append(
Transition(reward, np.array(observation, dtype=np.uint8, copy=True),
np.array(legal_actions, dtype=np.float32, copy=True),
action, begin))
def _post_transitions(self, terminal_rewards):
"""Posts this episode to the replay memory.
Each player has their own episode, which is posted separately.
Args:
terminal_rewards: `np.array`,terminal rewards for each player.
"""
# We store each player's episode consecutively in the replay memory.
for player in range(self.num_players):
num_transitions = len(self.transitions[player])
for index, transition in enumerate(self.transitions[player]):
# Add: o_t, l_t, a_t, r_{t+1}, term_{t+1}
final_transition = index == num_transitions - 1
if final_transition:
reward = terminal_rewards[player]
else:
reward = self.transitions[player][index + 1].reward
self._store_transition(transition.observation, transition.action,
reward, final_transition,
transition.legal_actions)
# Now that this episode has been stored, drop it from the transitions
# buffer.
self.transitions[player] = []
def _select_action(self, observation, legal_actions):
"""Select an action from the set of allowed actions.
Chooses an action randomly with probability self._calculate_epsilon(), and
will otherwise choose greedily from the current q-value estimates.
Args:
observation: `np.array`, the current observation.
legal_actions: `np.array`, describing legal actions, with -inf meaning
not legal.
Returns:
action: int, a legal action.
"""
if self.eval_mode:
epsilon = self.epsilon_eval
else:
epsilon = self.epsilon_fn(self.epsilon_decay_period, self.training_steps,
self.min_replay_history, self.epsilon_train)
if random.random() <= epsilon:
# Choose a random action with probability epsilon.
legal_action_indices = np.where(legal_actions == 0.0)
return np.random.choice(legal_action_indices[0])
else:
# Convert observation into a batch-based format.
self.state[0, :, 0] = observation
# Choose the action maximizing the q function for the current state.
action = self._sess.run(self._q_argmax,
{self.state_ph: self.state,
self.legal_actions_ph: legal_actions})
assert legal_actions[action] == 0.0, 'Expected legal action.'
return action
def _train_step(self):
"""Runs a single training step.
Runs a training op if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online to target network if training steps is a
multiple of target update period.
"""
if self.eval_mode:
return
# Run a training op.
if (self._replay.memory.add_count >= self.min_replay_history and
not self.batch_staged):
self._sess.run(self._replay.prefetch_batch)
self.batch_staged = True
if (self._replay.memory.add_count > self.min_replay_history and
self.training_steps % self.update_period == 0):
self._sess.run([self._train_op, self._replay.prefetch_batch])
# Sync weights.
if self.training_steps % self.target_update_period == 0:
self._sess.run(self._sync_qt_ops)
self.training_steps += 1
def _store_transition(self, observation, action, reward, is_terminal,
legal_actions):
"""Stores a transition during training mode.
Executes a tf session and executes replay memory ops in order to store the
following tuple in the replay buffer (last_observation, action, reward,
is_terminal).
Args:
observation: `np.array`, observation.
action: int, the action taken.
reward: float, the reward.
is_terminal: bool, indicating if the current state is a terminal state.
legal_actions: Legal actions from the current state.
"""
if not self.eval_mode:
self._sess.run(
self._replay.add_transition_op, {
self._replay.add_obs_ph: observation,
self._replay.add_action_ph: action,
self._replay.add_reward_ph: reward,
self._replay.add_terminal_ph: is_terminal,
self._replay.add_legal_actions_ph: legal_actions
})
def bundle_and_checkpoint(self, checkpoint_dir, iteration_number):
"""Returns a self-contained bundle of the agent's state.
This is used for checkpointing. It will return a dictionary containing all
non-TensorFlow objects (to be saved into a file by the caller), and it saves
all TensorFlow objects into a checkpoint file.
Args:
checkpoint_dir: str, directory where TensorFlow objects will be saved.
iteration_number: int, iteration number for naming the checkpoint file.
Returns:
A dictionary containing all of the agent's non-TensorFlow objects.
If the checkpoint directory does not exist, will return None.
"""
if not tf.gfile.Exists(checkpoint_dir):
return None
self._saver.save(
self._sess,
os.path.join(checkpoint_dir, 'tf_ckpt'),
global_step=iteration_number)
self._replay.save(checkpoint_dir, iteration_number)
bundle_dictionary = {}
bundle_dictionary['state'] = self.state
bundle_dictionary['eval_mode'] = self.eval_mode
bundle_dictionary['training_steps'] = self.training_steps
bundle_dictionary['batch_staged'] = self.batch_staged
return bundle_dictionary
def unbundle(self, checkpoint_dir, iteration_number, bundle_dictionary):
"""Restores the agent from a checkpoint.
Restores the agent's Python objects to those specified in bundle_dictionary,
and restores the TensorFlow objects to those specified in the
checkpoint_dir. If the checkpoint_dir does not exist, will not reset the
agent's state.
Args:
checkpoint_dir: str, path to the checkpoint saved by `tf.Save`.
iteration_number: int, checkpoint version.
bundle_dictionary: Dictionary containing this class's Python objects.
Returns:
A boolean indicating whether unbundling was successful.
"""
try:
# replay.load() will throw a GOSError if it does not find all the
# necessary files, in which case we should abort the process.
self._replay.load(checkpoint_dir, iteration_number)
except tf.errors.NotFoundError:
return False
for key in self.__dict__:
if key in bundle_dictionary:
self.__dict__[key] = bundle_dictionary[key]
self._saver.restore(self._sess, tf.train.latest_checkpoint(checkpoint_dir))
return True
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/dqn_agent.py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors and Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# This file is a fork of the original Dopamine code incorporating changes for
# the multiplayer setting and the Hanabi Learning Environment.
#
"""Run methods for training a DQN agent on Atari.
Methods in this module are usually referenced by |train.py|.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from third_party.dopamine import checkpointer
from third_party.dopamine import iteration_statistics
import dqn_agent
import gin.tf
from hanabi_learning_environment import rl_env
import numpy as np
import rainbow_agent
import tensorflow as tf
LENIENT_SCORE = False
class ObservationStacker(object):
"""Class for stacking agent observations."""
def __init__(self, history_size, observation_size, num_players):
"""Initializer for observation stacker.
Args:
history_size: int, number of time steps to stack.
observation_size: int, size of observation vector on one time step.
num_players: int, number of players.
"""
self._history_size = history_size
self._observation_size = observation_size
self._num_players = num_players
self._obs_stacks = list()
for _ in range(0, self._num_players):
self._obs_stacks.append(np.zeros(self._observation_size *
self._history_size))
def add_observation(self, observation, current_player):
"""Adds observation for the current player.
Args:
observation: observation vector for current player.
current_player: int, current player id.
"""
self._obs_stacks[current_player] = np.roll(self._obs_stacks[current_player],
-self._observation_size)
self._obs_stacks[current_player][(self._history_size - 1) *
self._observation_size:] = observation
def get_observation_stack(self, current_player):
"""Returns the stacked observation for current player.
Args:
current_player: int, current player id.
"""
return self._obs_stacks[current_player]
def reset_stack(self):
"""Resets the observation stacks to all zero."""
for i in range(0, self._num_players):
self._obs_stacks[i].fill(0.0)
@property
def history_size(self):
"""Returns number of steps to stack."""
return self._history_size
def observation_size(self):
"""Returns the size of the observation vector after history stacking."""
return self._observation_size * self._history_size
def load_gin_configs(gin_files, gin_bindings):
"""Loads gin configuration files.
Args:
gin_files: A list of paths to the gin configuration files for this
experiment.
gin_bindings: List of gin parameter bindings to override the values in the
config files.
"""
gin.parse_config_files_and_bindings(gin_files,
bindings=gin_bindings,
skip_unknown=False)
@gin.configurable
def create_environment(game_type='Hanabi-Full', num_players=2):
"""Creates the Hanabi environment.
Args:
game_type: Type of game to play. Currently the following are supported:
Hanabi-Full: Regular game.
Hanabi-Small: The small version of Hanabi, with 2 cards and 2 colours.
num_players: Int, number of players to play this game.
Returns:
A Hanabi environment.
"""
return rl_env.make(
environment_name=game_type, num_players=num_players, pyhanabi_path=None)
@gin.configurable
def create_obs_stacker(environment, history_size=4):
"""Creates an observation stacker.
Args:
environment: environment object.
history_size: int, number of steps to stack.
Returns:
An observation stacker object.
"""
return ObservationStacker(history_size,
environment.vectorized_observation_shape()[0],
environment.players)
@gin.configurable
def create_agent(environment, obs_stacker, agent_type='DQN'):
"""Creates the Hanabi agent.
Args:
environment: The environment.
obs_stacker: Observation stacker object.
agent_type: str, type of agent to construct.
Returns:
An agent for playing Hanabi.
Raises:
ValueError: if an unknown agent type is requested.
"""
if agent_type == 'DQN':
return dqn_agent.DQNAgent(observation_size=obs_stacker.observation_size(),
num_actions=environment.num_moves(),
num_players=environment.players)
elif agent_type == 'Rainbow':
return rainbow_agent.RainbowAgent(
observation_size=obs_stacker.observation_size(),
num_actions=environment.num_moves(),
num_players=environment.players)
else:
raise ValueError('Expected valid agent_type, got {}'.format(agent_type))
def initialize_checkpointing(agent, experiment_logger, checkpoint_dir,
checkpoint_file_prefix='ckpt'):
"""Reloads the latest checkpoint if it exists.
The following steps will be taken:
- This method will first create a Checkpointer object, which will be used in
the method and then returned to the caller for later use.
- It will then call checkpointer.get_latest_checkpoint_number to determine
whether there is a valid checkpoint in checkpoint_dir, and what is the
largest file number.
- If a valid checkpoint file is found, it will load the bundled data from
this file and will pass it to the agent for it to reload its data.
- If the agent is able to successfully unbundle, this method will verify that
the unbundled data contains the keys, 'logs' and 'current_iteration'. It
will then load the Logger's data from the bundle, and will return the
iteration number keyed by 'current_iteration' as one of the return values
(along with the Checkpointer object).
Args:
agent: The agent that will unbundle the checkpoint from checkpoint_dir.
experiment_logger: The Logger object that will be loaded from the
checkpoint.
checkpoint_dir: str, the directory containing the checkpoints.
checkpoint_file_prefix: str, the checkpoint file prefix.
Returns:
start_iteration: int, The iteration number to start the experiment from.
experiment_checkpointer: The experiment checkpointer.
"""
experiment_checkpointer = checkpointer.Checkpointer(
checkpoint_dir, checkpoint_file_prefix)
start_iteration = 0
# Check if checkpoint exists. Note that the existence of checkpoint 0 means
# that we have finished iteration 0 (so we will start from iteration 1).
latest_checkpoint_version = checkpointer.get_latest_checkpoint_number(
checkpoint_dir)
if latest_checkpoint_version >= 0:
dqn_dictionary = experiment_checkpointer.load_checkpoint(
latest_checkpoint_version)
if agent.unbundle(
checkpoint_dir, latest_checkpoint_version, dqn_dictionary):
assert 'logs' in dqn_dictionary
assert 'current_iteration' in dqn_dictionary
experiment_logger.data = dqn_dictionary['logs']
start_iteration = dqn_dictionary['current_iteration'] + 1
tf.logging.info('Reloaded checkpoint and will start from iteration %d',
start_iteration)
return start_iteration, experiment_checkpointer
def format_legal_moves(legal_moves, action_dim):
"""Returns formatted legal moves.
This function takes a list of actions and converts it into a fixed size vector
of size action_dim. If an action is legal, its position is set to 0 and -Inf
otherwise.
Ex: legal_moves = [0, 1, 3], action_dim = 5
returns [0, 0, -Inf, 0, -Inf]
Args:
legal_moves: list of legal actions.
action_dim: int, number of actions.
Returns:
a vector of size action_dim.
"""
new_legal_moves = np.full(action_dim, -float('inf'))
if legal_moves:
new_legal_moves[legal_moves] = 0
return new_legal_moves
def parse_observations(observations, num_actions, obs_stacker):
"""Deconstructs the rich observation data into relevant components.
Args:
observations: dict, containing full observations.
num_actions: int, The number of available actions.
obs_stacker: Observation stacker object.
Returns:
current_player: int, Whose turn it is.
legal_moves: `np.array` of floats, of length num_actions, whose elements
are -inf for indices corresponding to illegal moves and 0, for those
corresponding to legal moves.
observation_vector: Vectorized observation for the current player.
"""
current_player = observations['current_player']
current_player_observation = (
observations['player_observations'][current_player])
legal_moves = current_player_observation['legal_moves_as_int']
legal_moves = format_legal_moves(legal_moves, num_actions)
observation_vector = current_player_observation['vectorized']
obs_stacker.add_observation(observation_vector, current_player)
observation_vector = obs_stacker.get_observation_stack(current_player)
return current_player, legal_moves, observation_vector
def run_one_episode(agent, environment, obs_stacker):
"""Runs the agent on a single game of Hanabi in self-play mode.
Args:
agent: Agent playing Hanabi.
environment: The Hanabi environment.
obs_stacker: Observation stacker object.
Returns:
step_number: int, number of actions in this episode.
total_reward: float, undiscounted return for this episode.
"""
obs_stacker.reset_stack()
observations = environment.reset()
current_player, legal_moves, observation_vector = (
parse_observations(observations, environment.num_moves(), obs_stacker))
action = agent.begin_episode(current_player, legal_moves, observation_vector)
is_done = False
total_reward = 0
step_number = 0
has_played = {current_player}
# Keep track of per-player reward.
reward_since_last_action = np.zeros(environment.players)
while not is_done:
observations, reward, is_done, _ = environment.step(action.item())
modified_reward = max(reward, 0) if LENIENT_SCORE else reward
total_reward += modified_reward
reward_since_last_action += modified_reward
step_number += 1
if is_done:
break
current_player, legal_moves, observation_vector = (
parse_observations(observations, environment.num_moves(), obs_stacker))
if current_player in has_played:
action = agent.step(reward_since_last_action[current_player],
current_player, legal_moves, observation_vector)
else:
# Each player begins the episode on their first turn (which may not be
# the first move of the game).
action = agent.begin_episode(current_player, legal_moves,
observation_vector)
has_played.add(current_player)
# Reset this player's reward accumulator.
reward_since_last_action[current_player] = 0
agent.end_episode(reward_since_last_action)
tf.logging.info('EPISODE: %d %g', step_number, total_reward)
return step_number, total_reward
def run_one_phase(agent, environment, obs_stacker, min_steps, statistics,
run_mode_str):
"""Runs the agent/environment loop until a desired number of steps.
Args:
agent: Agent playing hanabi.
environment: environment object.
obs_stacker: Observation stacker object.
min_steps: int, minimum number of steps to generate in this phase.
statistics: `IterationStatistics` object which records the experimental
results.
run_mode_str: str, describes the run mode for this agent.
Returns:
The number of steps taken in this phase, the sum of returns, and the
number of episodes performed.
"""
step_count = 0
num_episodes = 0
sum_returns = 0.
while step_count < min_steps:
episode_length, episode_return = run_one_episode(agent, environment,
obs_stacker)
statistics.append({
'{}_episode_lengths'.format(run_mode_str): episode_length,
'{}_episode_returns'.format(run_mode_str): episode_return
})
step_count += episode_length
sum_returns += episode_return
num_episodes += 1
return step_count, sum_returns, num_episodes
@gin.configurable
def run_one_iteration(agent, environment, obs_stacker,
iteration, training_steps,
evaluate_every_n=100,
num_evaluation_games=100):
"""Runs one iteration of agent/environment interaction.
An iteration involves running several episodes until a certain number of
steps are obtained.
Args:
agent: Agent playing hanabi.
environment: The Hanabi environment.
obs_stacker: Observation stacker object.
iteration: int, current iteration number, used as a global_step.
training_steps: int, the number of training steps to perform.
evaluate_every_n: int, frequency of evaluation.
num_evaluation_games: int, number of games per evaluation.
Returns:
A dict containing summary statistics for this iteration.
"""
start_time = time.time()
statistics = iteration_statistics.IterationStatistics()
# First perform the training phase, during which the agent learns.
agent.eval_mode = False
number_steps, sum_returns, num_episodes = (
run_one_phase(agent, environment, obs_stacker, training_steps, statistics,
'train'))
time_delta = time.time() - start_time
tf.logging.info('Average training steps per second: %.2f',
number_steps / time_delta)
average_return = sum_returns / num_episodes
tf.logging.info('Average per episode return: %.2f', average_return)
statistics.append({'average_return': average_return})
# Also run an evaluation phase if desired.
if evaluate_every_n is not None and iteration % evaluate_every_n == 0:
episode_data = []
agent.eval_mode = True
# Collect episode data for all games.
for _ in range(num_evaluation_games):
episode_data.append(run_one_episode(agent, environment, obs_stacker))
eval_episode_length, eval_episode_return = map(np.mean, zip(*episode_data))
statistics.append({
'eval_episode_lengths': eval_episode_length,
'eval_episode_returns': eval_episode_return
})
tf.logging.info('Average eval. episode length: %.2f Return: %.2f',
eval_episode_length, eval_episode_return)
else:
statistics.append({
'eval_episode_lengths': -1,
'eval_episode_returns': -1
})
return statistics.data_lists
def log_experiment(experiment_logger, iteration, statistics,
logging_file_prefix='log', log_every_n=1):
"""Records the results of the current iteration.
Args:
experiment_logger: A `Logger` object.
iteration: int, iteration number.
statistics: Object containing statistics to log.
logging_file_prefix: str, prefix to use for the log files.
log_every_n: int, specifies logging frequency.
"""
if iteration % log_every_n == 0:
experiment_logger['iter{:d}'.format(iteration)] = statistics
experiment_logger.log_to_file(logging_file_prefix, iteration)
def checkpoint_experiment(experiment_checkpointer, agent, experiment_logger,
iteration, checkpoint_dir, checkpoint_every_n):
"""Checkpoint experiment data.
Args:
experiment_checkpointer: A `Checkpointer` object.
agent: An RL agent.
experiment_logger: a Logger object, to include its data in the checkpoint.
iteration: int, iteration number for checkpointing.
checkpoint_dir: str, the directory where to save checkpoints.
checkpoint_every_n: int, the frequency for writing checkpoints.
"""
if iteration % checkpoint_every_n == 0:
agent_dictionary = agent.bundle_and_checkpoint(checkpoint_dir, iteration)
if agent_dictionary:
agent_dictionary['current_iteration'] = iteration
agent_dictionary['logs'] = experiment_logger.data
experiment_checkpointer.save_checkpoint(iteration, agent_dictionary)
@gin.configurable
def run_experiment(agent,
environment,
start_iteration,
obs_stacker,
experiment_logger,
experiment_checkpointer,
checkpoint_dir,
num_iterations=200,
training_steps=5000,
logging_file_prefix='log',
log_every_n=1,
checkpoint_every_n=1):
"""Runs a full experiment, spread over multiple iterations."""
tf.logging.info('Beginning training...')
if num_iterations <= start_iteration:
tf.logging.warning('num_iterations (%d) < start_iteration(%d)',
num_iterations, start_iteration)
return
for iteration in range(start_iteration, num_iterations):
start_time = time.time()
statistics = run_one_iteration(agent, environment, obs_stacker, iteration,
training_steps)
tf.logging.info('Iteration %d took %d seconds', iteration,
time.time() - start_time)
start_time = time.time()
log_experiment(experiment_logger, iteration, statistics,
logging_file_prefix, log_every_n)
tf.logging.info('Logging iteration %d took %d seconds', iteration,
time.time() - start_time)
start_time = time.time()
checkpoint_experiment(experiment_checkpointer, agent, experiment_logger,
iteration, checkpoint_dir, checkpoint_every_n)
tf.logging.info('Checkpointing iteration %d took %d seconds', iteration,
time.time() - start_time)
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/run_experiment.py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors and Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# This file is a fork of the original Dopamine code incorporating changes for
# the multiplayer setting and the Hanabi Learning Environment.
#
"""Implementation of a Rainbow agent adapted to the multiplayer setting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import dqn_agent
import gin.tf
import numpy as np
import prioritized_replay_memory
import tensorflow as tf
slim = tf.contrib.slim
@gin.configurable
def rainbow_template(state,
num_actions,
num_atoms=51,
layer_size=512,
num_layers=1):
r"""Builds a Rainbow Network mapping states to value distributions.
Args:
state: A `tf.placeholder` for the RL state.
num_actions: int, number of actions that the RL agent can take.
num_atoms: int, number of atoms to approximate the distribution with.
layer_size: int, number of hidden units per layer.
num_layers: int, number of hidden layers.
Returns:
net: A `tf.Graphdef` for Rainbow:
`\theta : \mathcal{X}\rightarrow\mathbb{R}^{|\mathcal{A}| \times N}`,
where `N` is num_atoms.
"""
weights_initializer = slim.variance_scaling_initializer(
factor=1.0 / np.sqrt(3.0), mode='FAN_IN', uniform=True)
net = tf.cast(state, tf.float32)
net = tf.squeeze(net, axis=2)
for _ in range(num_layers):
net = slim.fully_connected(net, layer_size,
activation_fn=tf.nn.relu)
net = slim.fully_connected(net, num_actions * num_atoms, activation_fn=None,
weights_initializer=weights_initializer)
net = tf.reshape(net, [-1, num_actions, num_atoms])
return net
@gin.configurable
class RainbowAgent(dqn_agent.DQNAgent):
"""A compact implementation of the multiplayer Rainbow agent."""
@gin.configurable
def __init__(self,
num_actions=None,
observation_size=None,
num_players=None,
num_atoms=51,
vmax=25.,
gamma=0.99,
update_horizon=1,
min_replay_history=500,
update_period=4,
target_update_period=500,
epsilon_train=0.0,
epsilon_eval=0.0,
epsilon_decay_period=1000,
learning_rate=0.000025,
optimizer_epsilon=0.00003125,
tf_device='/cpu:*'):
"""Initializes the agent and constructs its graph.
Args:
num_actions: int, number of actions the agent can take at any state.
observation_size: int, size of observation vector.
num_players: int, number of players playing this game.
num_atoms: Int, the number of buckets for the value function distribution.
vmax: float, maximum return predicted by a value distribution.
gamma: float, discount factor as commonly used in the RL literature.
update_horizon: int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: int, number of stored transitions before training.
update_period: int, period between DQN updates.
target_update_period: int, update period for the target network.
epsilon_train: float, final epsilon for training.
epsilon_eval: float, epsilon during evaluation.
epsilon_decay_period: int, number of steps for epsilon to decay.
learning_rate: float, learning rate for the optimizer.
optimizer_epsilon: float, epsilon for Adam optimizer.
tf_device: str, Tensorflow device on which to run computations.
"""
# We need this because some tools convert round floats into ints.
vmax = float(vmax)
self.num_atoms = num_atoms
# Using -vmax as the minimum return is is wasteful, because all rewards are
# positive -- but does not unduly affect performance.
self.support = tf.linspace(-vmax, vmax, num_atoms)
self.learning_rate = learning_rate
self.optimizer_epsilon = optimizer_epsilon
graph_template = functools.partial(rainbow_template, num_atoms=num_atoms)
super(RainbowAgent, self).__init__(
num_actions=num_actions,
observation_size=observation_size,
num_players=num_players,
gamma=gamma,
update_horizon=update_horizon,
min_replay_history=min_replay_history,
update_period=update_period,
target_update_period=target_update_period,
epsilon_train=epsilon_train,
epsilon_eval=epsilon_eval,
epsilon_decay_period=epsilon_decay_period,
graph_template=graph_template,
tf_device=tf_device)
tf.logging.info('\t learning_rate: %f', learning_rate)
tf.logging.info('\t optimizer_epsilon: %f', optimizer_epsilon)
def _build_replay_memory(self, use_staging):
"""Creates the replay memory used by the agent.
Rainbow uses prioritized replay.
Args:
use_staging: bool, whether to use a staging area in the replay memory.
Returns:
A replay memory object.
"""
return prioritized_replay_memory.WrappedPrioritizedReplayMemory(
num_actions=self.num_actions,
observation_size=self.observation_size,
stack_size=1,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma)
def _reshape_networks(self):
# self._q is actually logits now, rename things.
# size of _logits: 1 x num_actions x num_atoms
self._logits = self._q
# size of _probabilities: 1 x num_actions x num_atoms
self._probabilities = tf.contrib.layers.softmax(self._q)
# size of _q: 1 x num_actions
self._q = tf.reduce_sum(self.support * self._probabilities, axis=2)
# Recompute argmax from q values. Ignore illegal actions.
self._q_argmax = tf.argmax(self._q + self.legal_actions_ph, axis=1)[0]
# size of _replay_logits: 1 x num_actions x num_atoms
self._replay_logits = self._replay_qs
# size of _replay_next_logits: 1 x num_actions x num_atoms
self._replay_next_logits = self._replay_next_qt
del self._replay_qs
del self._replay_next_qt
def _build_target_distribution(self):
self._reshape_networks()
batch_size = tf.shape(self._replay.rewards)[0]
# size of rewards: batch_size x 1
rewards = self._replay.rewards[:, None]
# size of tiled_support: batch_size x num_atoms
tiled_support = tf.tile(self.support, [batch_size])
tiled_support = tf.reshape(tiled_support, [batch_size, self.num_atoms])
# size of target_support: batch_size x num_atoms
is_terminal_multiplier = 1. - tf.cast(self._replay.terminals, tf.float32)
# Incorporate terminal state to discount factor.
# size of gamma_with_terminal: batch_size x 1
gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier
gamma_with_terminal = gamma_with_terminal[:, None]
target_support = rewards + gamma_with_terminal * tiled_support
# size of next_probabilities: batch_size x num_actions x num_atoms
next_probabilities = tf.contrib.layers.softmax(
self._replay_next_logits)
# size of next_qt: 1 x num_actions
next_qt = tf.reduce_sum(self.support * next_probabilities, 2)
# size of next_qt_argmax: 1 x batch_size
next_qt_argmax = tf.argmax(
next_qt + self._replay.next_legal_actions, axis=1)[:, None]
batch_indices = tf.range(tf.to_int64(batch_size))[:, None]
# size of next_qt_argmax: batch_size x 2
next_qt_argmax = tf.concat([batch_indices, next_qt_argmax], axis=1)
# size of next_probabilities: batch_size x num_atoms
next_probabilities = tf.gather_nd(next_probabilities, next_qt_argmax)
return project_distribution(target_support, next_probabilities,
self.support)
def _build_train_op(self):
"""Builds the training op for Rainbow.
Returns:
train_op: An op performing one step of training.
"""
target_distribution = tf.stop_gradient(self._build_target_distribution())
# size of indices: batch_size x 1.
indices = tf.range(tf.shape(self._replay_logits)[0])[:, None]
# size of reshaped_actions: batch_size x 2.
reshaped_actions = tf.concat([indices, self._replay.actions[:, None]], 1)
# For each element of the batch, fetch the logits for its selected action.
chosen_action_logits = tf.gather_nd(self._replay_logits, reshaped_actions)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=target_distribution,
logits=chosen_action_logits)
optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate,
epsilon=self.optimizer_epsilon)
update_priorities_op = self._replay.tf_set_priority(
self._replay.indices, tf.sqrt(loss + 1e-10))
target_priorities = self._replay.tf_get_priority(self._replay.indices)
target_priorities = tf.math.add(target_priorities, 1e-10)
target_priorities = 1.0 / tf.sqrt(target_priorities)
target_priorities /= tf.reduce_max(target_priorities)
weighted_loss = target_priorities * loss
with tf.control_dependencies([update_priorities_op]):
return optimizer.minimize(tf.reduce_mean(weighted_loss)), weighted_loss
def project_distribution(supports, weights, target_support,
validate_args=False):
"""Projects a batch of (support, weights) onto target_support.
Based on equation (7) in (Bellemare et al., 2017):
https://arxiv.org/abs/1707.06887
In the rest of the comments we will refer to this equation simply as Eq7.
This code is not easy to digest, so we will use a running example to clarify
what is going on, with the following sample inputs:
* supports = [[0, 2, 4, 6, 8],
[1, 3, 4, 5, 6]]
* weights = [[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.2, 0.5, 0.1, 0.1]]
* target_support = [4, 5, 6, 7, 8]
In the code below, comments preceded with 'Ex:' will be referencing the above
values.
Args:
supports: Tensor of shape (batch_size, num_dims) defining supports for the
distribution.
weights: Tensor of shape (batch_size, num_dims) defining weights on the
original support points. Although for the CategoricalDQN agent these
weights are probabilities, it is not required that they are.
target_support: Tensor of shape (num_dims) defining support of the projected
distribution. The values must be monotonically increasing. Vmin and Vmax
will be inferred from the first and last elements of this tensor,
respectively. The values in this tensor must be equally spaced.
validate_args: Whether we will verify the contents of the
target_support parameter.
Returns:
A Tensor of shape (batch_size, num_dims) with the projection of a batch of
(support, weights) onto target_support.
Raises:
ValueError: If target_support has no dimensions, or if shapes of supports,
weights, and target_support are incompatible.
"""
target_support_deltas = target_support[1:] - target_support[:-1]
# delta_z = `\Delta z` in Eq7.
delta_z = target_support_deltas[0]
validate_deps = []
supports.shape.assert_is_compatible_with(weights.shape)
supports[0].shape.assert_is_compatible_with(target_support.shape)
target_support.shape.assert_has_rank(1)
if validate_args:
# Assert that supports and weights have the same shapes.
validate_deps.append(
tf.Assert(
tf.reduce_all(tf.equal(tf.shape(supports), tf.shape(weights))),
[supports, weights]))
# Assert that elements of supports and target_support have the same shape.
validate_deps.append(
tf.Assert(
tf.reduce_all(
tf.equal(tf.shape(supports)[1], tf.shape(target_support))),
[supports, target_support]))
# Assert that target_support has a single dimension.
validate_deps.append(
tf.Assert(
tf.equal(tf.size(tf.shape(target_support)), 1), [target_support]))
# Assert that the target_support is monotonically increasing.
validate_deps.append(
tf.Assert(tf.reduce_all(target_support_deltas > 0), [target_support]))
# Assert that the values in target_support are equally spaced.
validate_deps.append(
tf.Assert(
tf.reduce_all(tf.equal(target_support_deltas, delta_z)),
[target_support]))
with tf.control_dependencies(validate_deps):
# Ex: `v_min, v_max = 4, 8`.
v_min, v_max = target_support[0], target_support[-1]
# Ex: `batch_size = 2`.
batch_size = tf.shape(supports)[0]
# `N` in Eq7.
# Ex: `num_dims = 5`.
num_dims = tf.shape(target_support)[0]
# clipped_support = `[\hat{T}_{z_j}]^{V_max}_{V_min}` in Eq7.
# Ex: `clipped_support = [[[ 4. 4. 4. 6. 8.]]
# [[ 4. 4. 4. 5. 6.]]]`.
clipped_support = tf.clip_by_value(supports, v_min, v_max)[:, None, :]
# Ex: `tiled_support = [[[[ 4. 4. 4. 6. 8.]
# [ 4. 4. 4. 6. 8.]
# [ 4. 4. 4. 6. 8.]
# [ 4. 4. 4. 6. 8.]
# [ 4. 4. 4. 6. 8.]]
# [[ 4. 4. 4. 5. 6.]
# [ 4. 4. 4. 5. 6.]
# [ 4. 4. 4. 5. 6.]
# [ 4. 4. 4. 5. 6.]
# [ 4. 4. 4. 5. 6.]]]]`.
tiled_support = tf.tile([clipped_support], [1, 1, num_dims, 1])
# Ex: `reshaped_target_support = [[[ 4.]
# [ 5.]
# [ 6.]
# [ 7.]
# [ 8.]]
# [[ 4.]
# [ 5.]
# [ 6.]
# [ 7.]
# [ 8.]]]`.
reshaped_target_support = tf.tile(target_support[:, None], [batch_size, 1])
reshaped_target_support = tf.reshape(reshaped_target_support,
[batch_size, num_dims, 1])
# numerator = `|clipped_support - z_i|` in Eq7.
# Ex: `numerator = [[[[ 0. 0. 0. 2. 4.]
# [ 1. 1. 1. 1. 3.]
# [ 2. 2. 2. 0. 2.]
# [ 3. 3. 3. 1. 1.]
# [ 4. 4. 4. 2. 0.]]
# [[ 0. 0. 0. 1. 2.]
# [ 1. 1. 1. 0. 1.]
# [ 2. 2. 2. 1. 0.]
# [ 3. 3. 3. 2. 1.]
# [ 4. 4. 4. 3. 2.]]]]`.
numerator = tf.abs(tiled_support - reshaped_target_support)
quotient = 1 - (numerator / delta_z)
# clipped_quotient = `[1 - numerator / (\Delta z)]_0^1` in Eq7.
# Ex: `clipped_quotient = [[[[ 1. 1. 1. 0. 0.]
# [ 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 1. 0.]
# [ 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 1.]]
# [[ 1. 1. 1. 0. 0.]
# [ 0. 0. 0. 1. 0.]
# [ 0. 0. 0. 0. 1.]
# [ 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 0.]]]]`.
clipped_quotient = tf.clip_by_value(quotient, 0, 1)
# Ex: `weights = [[ 0.1 0.6 0.1 0.1 0.1]
# [ 0.1 0.2 0.5 0.1 0.1]]`.
weights = weights[:, None, :]
# inner_prod = `\sum_{j=0}^{N-1} clipped_quotient * p_j(x', \pi(x'))`
# in Eq7.
# Ex: `inner_prod = [[[[ 0.1 0.6 0.1 0. 0. ]
# [ 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0.1 0. ]
# [ 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0.1]]
# [[ 0.1 0.2 0.5 0. 0. ]
# [ 0. 0. 0. 0.1 0. ]
# [ 0. 0. 0. 0. 0.1]
# [ 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. ]]]]`.
inner_prod = clipped_quotient * weights
# Ex: `projection = [[ 0.8 0.0 0.1 0.0 0.1]
# [ 0.8 0.1 0.1 0.0 0.0]]`.
projection = tf.reduce_sum(inner_prod, 3)
projection = tf.reshape(projection, [batch_size, num_dims])
return projection
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/rainbow_agent.py
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/third_party/__init__.py
|
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A checkpointing mechanism for Dopamine agents.
This Checkpointer expects a base directory where checkpoints for different
iterations are stored. Specifically, Checkpointer.save_checkpoint() takes in
as input a dictionary 'data' to be pickled to disk. At each iteration, we
write a file called 'cpkt.#', where # is the iteration number. The
Checkpointer also cleans up old files, maintaining up to the CHECKPOINT_DURATION
most recent iterations.
The Checkpointer writes a sentinel file to indicate that checkpointing was
globally successful. This means that all other checkpointing activities
(saving the Tensorflow graph, the replay buffer) should be performed *prior*
to calling Checkpointer.save_checkpoint(). This allows the Checkpointer to
detect incomplete checkpoints.
#### Example
After running 10 iterations (numbered 0...9) with base_directory='/checkpoint',
the following files will exist:
```
/checkpoint/cpkt.6
/checkpoint/cpkt.7
/checkpoint/cpkt.8
/checkpoint/cpkt.9
/checkpoint/sentinel_checkpoint_complete.6
/checkpoint/sentinel_checkpoint_complete.7
/checkpoint/sentinel_checkpoint_complete.8
/checkpoint/sentinel_checkpoint_complete.9
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import tensorflow as tf
CHECKPOINT_DURATION = 4
def get_latest_checkpoint_number(base_directory):
"""Returns the version number of the latest completed checkpoint.
Args:
base_directory: str, directory in which to look for checkpoint files.
Returns:
int, the iteration number of the latest checkpoint, or -1 if none was found.
"""
glob = os.path.join(base_directory, 'sentinel_checkpoint_complete.*')
def extract_iteration(x):
return int(x[x.rfind('.') + 1:])
try:
checkpoint_files = tf.gfile.Glob(glob)
except tf.errors.NotFoundError:
return -1
try:
latest_iteration = max(extract_iteration(x) for x in checkpoint_files)
return latest_iteration
except ValueError:
return -1
class Checkpointer(object):
"""Class for managing checkpoints for Dopamine agents.
"""
def __init__(self, base_directory, checkpoint_file_prefix='ckpt',
checkpoint_frequency=1):
"""Initializes Checkpointer.
Args:
base_directory: str, directory where all checkpoints are saved/loaded.
checkpoint_file_prefix: str, prefix to use for naming checkpoint files.
checkpoint_frequency: int, the frequency at which to checkpoint.
Raises:
ValueError: if base_directory is empty, or not creatable.
"""
if not base_directory:
raise ValueError('No path provided to Checkpointer.')
self._checkpoint_file_prefix = checkpoint_file_prefix
self._checkpoint_frequency = checkpoint_frequency
self._base_directory = base_directory
try:
tf.gfile.MakeDirs(base_directory)
except tf.errors.PermissionDeniedError:
# We catch the PermissionDeniedError and issue a more useful exception.
raise ValueError('Unable to create checkpoint path: {}.'.format(
base_directory))
def _generate_filename(self, file_prefix, iteration_number):
"""Returns a checkpoint filename from prefix and iteration number."""
filename = '{}.{}'.format(file_prefix, iteration_number)
return os.path.join(self._base_directory, filename)
def _save_data_to_file(self, data, filename):
"""Saves the given 'data' object to a file."""
with tf.gfile.GFile(filename, 'w') as fout:
pickle.dump(data, fout)
def save_checkpoint(self, iteration_number, data):
"""Saves a new checkpoint at the current iteration_number.
Args:
iteration_number: int, the current iteration number for this checkpoint.
data: Any (picklable) python object containing the data to store in the
checkpoint.
"""
if iteration_number % self._checkpoint_frequency != 0:
return
filename = self._generate_filename(self._checkpoint_file_prefix,
iteration_number)
self._save_data_to_file(data, filename)
filename = self._generate_filename('sentinel_checkpoint_complete',
iteration_number)
with tf.gfile.GFile(filename, 'wb') as fout:
fout.write('done')
self._clean_up_old_checkpoints(iteration_number)
def _clean_up_old_checkpoints(self, iteration_number):
"""Removes sufficiently old checkpoints."""
# After writing a the checkpoint and sentinel file, we garbage collect files
# that are CHECKPOINT_DURATION * self._checkpoint_frequency versions old.
stale_iteration_number = iteration_number - (self._checkpoint_frequency *
CHECKPOINT_DURATION)
if stale_iteration_number >= 0:
stale_file = self._generate_filename(self._checkpoint_file_prefix,
stale_iteration_number)
stale_sentinel = self._generate_filename('sentinel_checkpoint_complete',
stale_iteration_number)
try:
tf.gfile.Remove(stale_file)
tf.gfile.Remove(stale_sentinel)
except tf.errors.NotFoundError:
# Ignore if file not found.
tf.logging.info('Unable to remove {} or {}.'.format(stale_file,
stale_sentinel))
def _load_data_from_file(self, filename):
if not tf.gfile.Exists(filename):
return None
with tf.gfile.GFile(filename, 'rb') as fin:
return pickle.load(fin)
def load_checkpoint(self, iteration_number):
"""Tries to reload a checkpoint at the selected iteration number.
Args:
iteration_number: The checkpoint iteration number to try to load.
Returns:
If the checkpoint files exist, two unpickled objects that were passed in
as data to save_checkpoint; returns None if the files do not exist.
"""
checkpoint_file = self._generate_filename(self._checkpoint_file_prefix,
iteration_number)
return self._load_data_from_file(checkpoint_file)
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/third_party/dopamine/checkpointer.py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/third_party/dopamine/__init__.py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A lightweight logging mechanism for dopamine agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import tensorflow as tf
CHECKPOINT_DURATION = 4
class Logger(object):
"""Class for maintaining a dictionary of data to log."""
def __init__(self, logging_dir):
"""Initializes Logger.
Args:
logging_dir: str, Directory to which logs are written.
"""
# Dict used by logger to store data.
self.data = {}
self._logging_enabled = True
if not logging_dir:
tf.logging.info('Logging directory not specified, will not log.')
self._logging_enabled = False
return
# Try to create logging directory.
try:
tf.gfile.MakeDirs(logging_dir)
except tf.errors.PermissionDeniedError:
# If it already exists, ignore exception.
pass
if not tf.gfile.Exists(logging_dir):
tf.logging.warning(
'Could not create directory %s, logging will be disabled.',
logging_dir)
self._logging_enabled = False
return
self._logging_dir = logging_dir
def __setitem__(self, key, value):
"""This method will set an entry at key with value in the dictionary.
It will effectively overwrite any previous data at the same key.
Args:
key: str, indicating key where to write the entry.
value: A python object to store.
"""
if self._logging_enabled:
self.data[key] = value
def _generate_filename(self, filename_prefix, iteration_number):
filename = '{}_{}'.format(filename_prefix, iteration_number)
return os.path.join(self._logging_dir, filename)
def log_to_file(self, filename_prefix, iteration_number):
"""Save the pickled dictionary to a file.
Args:
filename_prefix: str, name of the file to use (without iteration
number).
iteration_number: int, the iteration number, appended to the end of
filename_prefix.
"""
if not self._logging_enabled:
tf.logging.warning('Logging is disabled.')
return
log_file = self._generate_filename(filename_prefix, iteration_number)
with tf.gfile.GFile(log_file, 'w') as fout:
pickle.dump(self.data, fout, protocol=pickle.HIGHEST_PROTOCOL)
# After writing a checkpoint file, we garbage collect the log file
# that is CHECKPOINT_DURATION versions old.
stale_iteration_number = iteration_number - CHECKPOINT_DURATION
if stale_iteration_number >= 0:
stale_file = self._generate_filename(filename_prefix,
stale_iteration_number)
try:
tf.gfile.Remove(stale_file)
except tf.errors.NotFoundError:
# Ignore if file not found.
pass
def is_logging_enabled(self):
"""Return if logging is enabled."""
return self._logging_enabled
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/third_party/dopamine/logger.py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class for storing iteration-specific metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class IterationStatistics(object):
"""A class for storing iteration-specific metrics.
The internal format is as follows: we maintain a mapping from keys to lists.
Each list contains all the values corresponding to the given key.
For example, self.data_lists['train_episode_returns'] might contain the
per-episode returns achieved during this iteration.
Attributes:
data_lists: dict mapping each metric_name (str) to a list of said metric
across episodes.
"""
def __init__(self):
self.data_lists = {}
def append(self, data_pairs):
"""Add the given values to their corresponding key-indexed lists.
Args:
data_pairs: A dictionary of key-value pairs to be recorded.
"""
for key, value in data_pairs.items():
if key not in self.data_lists:
self.data_lists[key] = []
self.data_lists[key].append(value)
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/third_party/dopamine/iteration_statistics.py
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sum tree data structure.
Used for prioritized experience replay. See prioritized_replay_buffer.py
and Schaul et al. (2015).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import numpy as np
class SumTree(object):
"""A sum tree data structure for storing replay priorities.
A sum tree is a complete binary tree whose leaves contain values called
priorities. Internal nodes maintain the sum of the priorities of all leaf
nodes in their subtree.
For capacity = 4, the tree may look like this:
+---+
|2.5|
+-+-+
|
+-------+--------+
| |
+-+-+ +-+-+
|1.5| |1.0|
+-+-+ +-+-+
| |
+----+----+ +----+----+
| | | |
+-+-+ +-+-+ +-+-+ +-+-+
|0.5| |1.0| |0.5| |0.5|
+---+ +---+ +---+ +---+
This is stored in a list of numpy arrays:
self.nodes = [ [2.5], [1.5, 1], [0.5, 1, 0.5, 0.5] ]
For conciseness, we allocate arrays as powers of two, and pad the excess
elements with zero values.
This is similar to the usual array-based representation of a complete binary
tree, but is a little more user-friendly.
"""
def __init__(self, capacity):
"""Creates the sum tree data structure for the given replay capacity.
Args:
capacity: int, the maximum number of elements that can be stored in this
data structure.
Raises:
ValueError: If requested capacity is not positive.
"""
assert isinstance(capacity, int)
if capacity <= 0:
raise ValueError('Sum tree capacity should be positive. Got: {}'.
format(capacity))
self.nodes = []
tree_depth = int(math.ceil(np.log2(capacity)))
level_size = 1
for _ in range(tree_depth + 1):
nodes_at_this_depth = np.zeros(level_size)
self.nodes.append(nodes_at_this_depth)
level_size *= 2
self.max_recorded_priority = 1.0
def _total_priority(self):
"""Returns the sum of all priorities stored in this sum tree.
Returns:
float, sum of priorities stored in this sum tree.
"""
return self.nodes[0][0]
def sample(self, query_value=None):
"""Samples an element from the sum tree.
Each element has probability p_i / sum_j p_j of being picked, where p_i is
the (positive) value associated with node i (possibly unnormalized).
Args:
query_value: float in [0, 1], used as the random value to select a
sample. If None, will select one randomly in [0, 1).
Returns:
int, a random element from the sum tree.
Raises:
Exception: If the sum tree is empty (i.e. its node values sum to 0), or if
the supplied query_value is larger than the total sum.
"""
if self._total_priority() == 0.0:
raise Exception('Cannot sample from an empty sum tree.')
if query_value and (query_value < 0. or query_value > 1.):
raise ValueError('query_value must be in [0, 1].')
# Sample a value in range [0, R), where R is the value stored at the root.
query_value = random.random() if query_value is None else query_value
query_value *= self._total_priority()
# Now traverse the sum tree.
node_index = 0
for nodes_at_this_depth in self.nodes[1:]:
# Compute children of previous depth's node.
left_child = node_index * 2
left_sum = nodes_at_this_depth[left_child]
# Each subtree describes a range [0, a), where a is its value.
if query_value < left_sum: # Recurse into left subtree.
node_index = left_child
else: # Recurse into right subtree.
node_index = left_child + 1
# Adjust query to be relative to right subtree.
query_value -= left_sum
return node_index
def stratified_sample(self, batch_size):
"""Performs stratified sampling using the sum tree.
Let R be the value at the root (total value of sum tree). This method will
divide [0, R) into batch_size segments, pick a random number from each of
those segments, and use that random number to sample from the sum_tree. This
is as specified in Schaul et al. (2015).
Args:
batch_size: int, the number of strata to use.
Returns:
list of batch_size elements sampled from the sum tree.
Raises:
Exception: If the sum tree is empty (i.e. its node values sum to 0).
"""
if self._total_priority() == 0.0:
raise Exception('Cannot sample from an empty sum tree.')
bounds = np.linspace(0., 1., batch_size + 1)
assert len(bounds) == batch_size + 1
segments = [(bounds[i], bounds[i+1]) for i in range(batch_size)]
query_values = [random.uniform(x[0], x[1]) for x in segments]
return [self.sample(query_value=x) for x in query_values]
def get(self, node_index):
"""Returns the value of the leaf node corresponding to the index.
Args:
node_index: The index of the leaf node.
Returns:
The value of the leaf node.
"""
return self.nodes[-1][node_index]
def set(self, node_index, value):
"""Sets the value of a leaf node and updates internal nodes accordingly.
This operation takes O(log(capacity)).
Args:
node_index: int, the index of the leaf node to be updated.
value: float, the value which we assign to the node. This value must be
nonnegative. Setting value = 0 will cause the element to never be
sampled.
Raises:
ValueError: If the given value is negative.
"""
if value < 0.0:
raise ValueError('Sum tree values should be nonnegative. Got {}'.
format(value))
self.max_recorded_priority = max(value, self.max_recorded_priority)
delta_value = value - self.nodes[-1][node_index]
# Now traverse back the tree, adjusting all sums along the way.
for nodes_at_this_depth in reversed(self.nodes):
# Note: Adding a delta leads to some tolerable numerical inaccuracies.
nodes_at_this_depth[node_index] += delta_value
node_index //= 2
assert node_index == 0, ('Sum tree traversal failed, final node index '
'is not 0.')
|
hanabi-learning-environment-master
|
hanabi_learning_environment/agents/rainbow/third_party/dopamine/sum_tree.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple episode runner using the RL environment."""
from __future__ import print_function
import sys
import getopt
from hanabi_learning_environment import rl_env
from hanabi_learning_environment.agents.random_agent import RandomAgent
from hanabi_learning_environment.agents.simple_agent import SimpleAgent
AGENT_CLASSES = {'SimpleAgent': SimpleAgent, 'RandomAgent': RandomAgent}
class Runner(object):
"""Runner class."""
def __init__(self, flags):
"""Initialize runner."""
self.flags = flags
self.agent_config = {'players': flags['players']}
self.environment = rl_env.make('Hanabi-Full', num_players=flags['players'])
self.agent_class = AGENT_CLASSES[flags['agent_class']]
def run(self):
"""Run episodes."""
rewards = []
for episode in range(flags['num_episodes']):
observations = self.environment.reset()
agents = [self.agent_class(self.agent_config)
for _ in range(self.flags['players'])]
done = False
episode_reward = 0
while not done:
for agent_id, agent in enumerate(agents):
observation = observations['player_observations'][agent_id]
action = agent.act(observation)
if observation['current_player'] == agent_id:
assert action is not None
current_player_action = action
else:
assert action is None
# Make an environment step.
print('Agent: {} action: {}'.format(observation['current_player'],
current_player_action))
observations, reward, done, unused_info = self.environment.step(
current_player_action)
episode_reward += reward
rewards.append(episode_reward)
print('Running episode: %d' % episode)
print('Max Reward: %.3f' % max(rewards))
return rewards
if __name__ == "__main__":
flags = {'players': 2, 'num_episodes': 1, 'agent_class': 'SimpleAgent'}
options, arguments = getopt.getopt(sys.argv[1:], '',
['players=',
'num_episodes=',
'agent_class='])
if arguments:
sys.exit('usage: rl_env_example.py [options]\n'
'--players number of players in the game.\n'
'--num_episodes number of game episodes to run.\n'
'--agent_class {}'.format(' or '.join(AGENT_CLASSES.keys())))
for flag, value in options:
flag = flag[2:] # Strip leading --.
flags[flag] = type(flags[flag])(value)
runner = Runner(flags)
runner.run()
|
hanabi-learning-environment-master
|
examples/rl_env_example.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code demonstrating the Python Hanabi interface."""
from __future__ import print_function
import numpy as np
from hanabi_learning_environment import pyhanabi
def run_game(game_parameters):
"""Play a game, selecting random actions."""
def print_state(state):
"""Print some basic information about the state."""
print("")
print("Current player: {}".format(state.cur_player()))
print(state)
# Example of more queries to provide more about this state. For
# example, bots could use these methods to to get information
# about the state in order to act accordingly.
print("### Information about the state retrieved separately ###")
print("### Information tokens: {}".format(state.information_tokens()))
print("### Life tokens: {}".format(state.life_tokens()))
print("### Fireworks: {}".format(state.fireworks()))
print("### Deck size: {}".format(state.deck_size()))
print("### Discard pile: {}".format(str(state.discard_pile())))
print("### Player hands: {}".format(str(state.player_hands())))
print("")
def print_observation(observation):
"""Print some basic information about an agent observation."""
print("--- Observation ---")
print(observation)
print("### Information about the observation retrieved separately ###")
print("### Current player, relative to self: {}".format(
observation.cur_player_offset()))
print("### Observed hands: {}".format(observation.observed_hands()))
print("### Card knowledge: {}".format(observation.card_knowledge()))
print("### Discard pile: {}".format(observation.discard_pile()))
print("### Fireworks: {}".format(observation.fireworks()))
print("### Deck size: {}".format(observation.deck_size()))
move_string = "### Last moves:"
for move_tuple in observation.last_moves():
move_string += " {}".format(move_tuple)
print(move_string)
print("### Information tokens: {}".format(observation.information_tokens()))
print("### Life tokens: {}".format(observation.life_tokens()))
print("### Legal moves: {}".format(observation.legal_moves()))
print("--- EndObservation ---")
def print_encoded_observations(encoder, state, num_players):
print("--- EncodedObservations ---")
print("Observation encoding shape: {}".format(encoder.shape()))
print("Current actual player: {}".format(state.cur_player()))
for i in range(num_players):
print("Encoded observation for player {}: {}".format(
i, encoder.encode(state.observation(i))))
print("--- EndEncodedObservations ---")
game = pyhanabi.HanabiGame(game_parameters)
print(game.parameter_string(), end="")
obs_encoder = pyhanabi.ObservationEncoder(
game, enc_type=pyhanabi.ObservationEncoderType.CANONICAL)
state = game.new_initial_state()
while not state.is_terminal():
if state.cur_player() == pyhanabi.CHANCE_PLAYER_ID:
state.deal_random_card()
continue
print_state(state)
observation = state.observation(state.cur_player())
print_observation(observation)
print_encoded_observations(obs_encoder, state, game.num_players())
legal_moves = state.legal_moves()
print("")
print("Number of legal moves: {}".format(len(legal_moves)))
move = np.random.choice(legal_moves)
print("Chose random legal move: {}".format(move))
state.apply_move(move)
print("")
print("Game done. Terminal state:")
print("")
print(state)
print("")
print("score: {}".format(state.score()))
if __name__ == "__main__":
# Check that the cdef and library were loaded from the standard paths.
assert pyhanabi.cdef_loaded(), "cdef failed to load"
assert pyhanabi.lib_loaded(), "lib failed to load"
run_game({"players": 3, "random_start_player": True})
|
hanabi-learning-environment-master
|
examples/game_example.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup for pip package."""
from setuptools import find_namespace_packages
from setuptools import setup
def _get_version():
with open('jmp/__init__.py') as fp:
for line in fp:
if line.startswith('__version__'):
g = {}
exec(line, g) # pylint: disable=exec-used
return g['__version__']
raise ValueError('`__version__` not defined in `jmp/__init__.py`')
def _parse_requirements(requirements_txt_path):
with open(requirements_txt_path) as fp:
return fp.read().splitlines()
_VERSION = _get_version()
EXTRA_PACKAGES = {
'jax': _parse_requirements('requirements-jax.txt'),
}
setup(
name='jmp',
version=_VERSION,
url='https://github.com/deepmind/jmp',
license='Apache 2.0',
author='DeepMind',
description='JMP is a Mixed Precision library for JAX.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements('requirements.txt'),
extras_require=EXTRA_PACKAGES,
tests_require=_parse_requirements('requirements-test.txt'),
requires_python='>=3.8',
include_package_data=True,
zip_safe=False,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
)
|
jmp-main
|
setup.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JMP is a Mixed Precision library for JAX."""
from jmp._src.loss_scale import all_finite
from jmp._src.loss_scale import DynamicLossScale
from jmp._src.loss_scale import LossScale
from jmp._src.loss_scale import NoOpLossScale
from jmp._src.loss_scale import select_tree
from jmp._src.loss_scale import StaticLossScale
from jmp._src.policy import cast_to_full
from jmp._src.policy import cast_to_half
from jmp._src.policy import get_policy
from jmp._src.policy import half_dtype
from jmp._src.policy import Policy
__version__ = "0.0.5.dev"
__all__ = (
"all_finite",
"DynamicLossScale",
"LossScale",
"NoOpLossScale",
"select_tree",
"StaticLossScale",
"cast_to_full",
"cast_to_half",
"get_policy",
"half_dtype",
"Policy",
)
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the JMP public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
try:
del _src # pylint: disable=undefined-variable
except NameError:
pass
|
jmp-main
|
jmp/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for jmp._src.policy."""
import itertools as it
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jmp._src import policy as jmp
import numpy as np
HALF_DTYPES = (np.float16, jnp.float16, jnp.bfloat16)
FULL_DTYPES = (np.float32, jnp.float32)
DTYPES = HALF_DTYPES + FULL_DTYPES
NUMPYS = (np, jnp)
def get_dtype_name(dtype):
names = {
np.float16: "float16",
jnp.bfloat16: "bfloat16",
np.float32: "float32"
}
return names[dtype]
def current_platform():
return jax.local_devices()[0].platform
def skip_if_unsupported(dtype):
platform = current_platform()
if ((platform == "gpu" and dtype == jnp.bfloat16) or
(platform == "tpu" and dtype in (np.float16, jnp.float16))):
raise unittest.SkipTest(
f"{get_dtype_name(dtype)} not supported on {platform}")
class PolicyTest(parameterized.TestCase):
def assert_dtypes_equal(self, tree_a, tree_b):
jax.tree_util.tree_map(lambda a, b: self.assertEqual(a.dtype, b.dtype),
tree_a, tree_b)
@parameterized.parameters(*it.product(DTYPES, NUMPYS))
def test_policy_cast_to_param(self, dtype, np_):
skip_if_unsupported(dtype)
policy = jmp.Policy(dtype, dtype, dtype)
self.assertEqual(policy.param_dtype, dtype)
tree = {"a": np_.ones([])}
self.assert_dtypes_equal(policy.cast_to_param(tree),
{"a": np_.ones([], dtype)})
@parameterized.parameters(*it.product(DTYPES, NUMPYS))
def test_policy_cast_to_compute(self, dtype, np_):
skip_if_unsupported(dtype)
policy = jmp.Policy(dtype, dtype, dtype)
self.assertEqual(policy.compute_dtype, dtype)
tree = {"a": np_.ones([])}
self.assert_dtypes_equal(policy.cast_to_compute(tree),
{"a": np_.ones([], dtype)})
@parameterized.parameters(*it.product(DTYPES, NUMPYS))
def test_policy_cast_to_output(self, dtype, np_):
skip_if_unsupported(dtype)
policy = jmp.Policy(dtype, dtype, dtype)
self.assertEqual(policy.output_dtype, dtype)
tree = {"a": np_.ones([])}
self.assert_dtypes_equal(policy.cast_to_output(tree),
{"a": np_.ones([], dtype)})
@parameterized.parameters(*it.product(DTYPES, NUMPYS))
def test_policy_with_output_dtype(self, dtype, np_):
policy = jmp.Policy(np_.float32, np_.float32, np_.float32)
policy = policy.with_output_dtype(dtype)
self.assertEqual(policy.output_dtype, dtype)
@parameterized.parameters(("float16", np.float16),
("float32", np.float32),
("bfloat16", jnp.bfloat16))
def test_get_policy(self, dtype_name, dtype):
policy = jmp.get_policy(dtype_name)
self.assertEqual(policy.param_dtype, dtype)
self.assertEqual(policy.compute_dtype, dtype)
self.assertEqual(policy.output_dtype, dtype)
def test_get_policy_almost_dtype(self):
with self.assertRaisesRegex(ValueError, "Unknown dtype"):
jmp.get_policy("compute_float16")
@parameterized.parameters(*it.product(DTYPES, NUMPYS))
def test_get_policy_mixed(self, dtype, np_):
full = np_.float32
policy = jmp.get_policy(f"c={get_dtype_name(dtype)}")
self.assertEqual(policy.param_dtype, full)
self.assertEqual(policy.compute_dtype, dtype)
self.assertEqual(policy.output_dtype, dtype)
@parameterized.parameters(*it.product(DTYPES, NUMPYS))
def test_get_policy_compute(self, dtype, np_):
full = np_.float32
policy = jmp.get_policy(f"c={get_dtype_name(dtype)},o=full")
self.assertEqual(policy.param_dtype, full)
self.assertEqual(policy.compute_dtype, dtype)
self.assertEqual(policy.output_dtype, full)
def test_half_dtype(self):
if current_platform() == "tpu":
self.assertEqual(jmp.half_dtype(), jnp.bfloat16)
else:
self.assertEqual(jmp.half_dtype(), jnp.float16)
def test_cast_to_full(self):
half_tree = dict(o=object(),
h=jnp.ones([], dtype=jmp.half_dtype()),
f=jnp.ones([]),
i=jnp.ones([], dtype=jnp.int16))
full_tree = dict(o=half_tree["o"],
h=half_tree["h"].astype(jnp.float32),
f=half_tree["f"],
i=half_tree["i"])
self.assertEqual(jmp.cast_to_full(half_tree), full_tree)
def test_cast_to_half(self):
dtype = jmp.half_dtype()
half_tree = dict(o=object(),
h=jnp.ones([], dtype=dtype),
f=jnp.ones([]),
i=jnp.ones([], dtype=jnp.int16))
full_tree = dict(o=half_tree["o"],
h=half_tree["h"],
f=half_tree["f"].astype(dtype),
i=half_tree["i"])
self.assertEqual(jmp.cast_to_half(full_tree), half_tree)
@parameterized.parameters(*it.product(DTYPES))
def test_str(self, dtype):
policy = jmp.Policy(dtype, dtype, dtype)
policy_str = str(policy)
for str_piece in policy_str.split(","):
dtype_str = str_piece.split("=")[1]
self.assertEqual(dtype_str, jmp.dtype_to_names[dtype][0])
if __name__ == "__main__":
absltest.main()
|
jmp-main
|
jmp/_src/policy_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for mixed precision in JAX."""
import dataclasses
from typing import TypeVar
import jax
import jax.numpy as jnp
import numpy as np
T = TypeVar("T")
def _cast_floating_to(tree: T, dtype: jnp.dtype) -> T:
def conditional_cast(x):
if (isinstance(x, (np.ndarray, jnp.ndarray)) and
jnp.issubdtype(x.dtype, jnp.floating)):
x = x.astype(dtype)
return x
return jax.tree_util.tree_map(conditional_cast, tree)
@dataclasses.dataclass(frozen=True)
class Policy:
"""Encapsulates casting for inputs, outputs and parameters."""
param_dtype: jnp.dtype
compute_dtype: jnp.dtype
output_dtype: jnp.dtype
def cast_to_param(self, x):
"""Converts floating point values to the param dtype."""
return _cast_floating_to(x, self.param_dtype)
def cast_to_compute(self, x):
"""Converts floating point values to the compute dtype."""
return _cast_floating_to(x, self.compute_dtype)
def cast_to_output(self, x):
"""Converts floating point values to the output dtype."""
return _cast_floating_to(x, self.output_dtype)
def with_output_dtype(self, output_dtype: jnp.dtype) -> "Policy":
return dataclasses.replace(self, output_dtype=output_dtype)
def __str__(self):
return "p={},c={},o={}".format(dtype_to_names[self.param_dtype][0],
dtype_to_names[self.compute_dtype][0],
dtype_to_names[self.output_dtype][0])
def get_policy(policy_name: str) -> Policy:
"""Returns a mixed precision policy parsed from a string."""
# Loose grammar supporting:
# - "c=f16" (params full, compute+output in f16),
# - "p=f16,c=f16" (params, compute and output in f16).
# - "p=f16,c=bf16" (params in f16, compute in bf16, output in bf16)
# For values that are not specified params defaults to f32, compute follows
# params and output follows compute (e.g. 'c=f16' -> 'p=f32,c=f16,o=f16').
param_dtype = jnp.float32
compute_dtype = output_dtype = None
if "=" in policy_name:
for part in policy_name.split(","):
key, value = part.split("=", 2)
value = parse_dtype(value)
if key == "p" or key == "params":
param_dtype = value
elif key == "c" or key == "compute":
compute_dtype = value
elif key == "o" or key == "output":
output_dtype = value
else:
raise ValueError(f"Unknown key '{key}' in '{policy_name}' should be "
"'params', 'compute' or 'output'.")
if compute_dtype is None:
compute_dtype = param_dtype
if output_dtype is None:
output_dtype = compute_dtype
else:
# Assume policy name is a dtype (e.g. 'f32' or 'half') that all components
# of the policy should contain.
param_dtype = compute_dtype = output_dtype = parse_dtype(policy_name)
return Policy(param_dtype=param_dtype, compute_dtype=compute_dtype,
output_dtype=output_dtype)
def cast_to_full(tree: T) -> T:
"""Ensures floating point leaves of the given tree are f32."""
return _cast_floating_to(tree, jnp.float32)
def cast_to_half(tree: T) -> T:
"""Ensures floating point leaves of the given tree are half precision."""
return _cast_floating_to(tree, half_dtype())
def half_dtype() -> jnp.dtype:
"""Returns the half precision dtype for the current backend."""
device0 = jax.local_devices()[0]
on_tpu = device0.platform == "tpu"
return jnp.bfloat16 if on_tpu else jnp.float16
dtype_to_names = {
jnp.bfloat16: ("bf16", "bfloat16"),
jnp.float16: ("f16", "float16"),
jnp.float32: ("full", "f32", "float32"),
jnp.float64: ("f64", "float64"),
}
name_to_dtype = {name: dtype for dtype, names in dtype_to_names.items() # pylint: disable=g-complex-comprehension
for name in names}
def parse_dtype(value: str) -> jnp.dtype:
"""Parses a string representing a dtype into a dtype object."""
if value == "half":
return half_dtype()
try:
return name_to_dtype[value]
except KeyError as e:
raise ValueError(
f"Unknown dtype '{value}' must be full,half,float16,bfloat16 or a "
"contraction thereof (e.g. 'f' for 'full', 'bf16' for 'bfloat16')"
) from e
|
jmp-main
|
jmp/_src/policy.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
jmp-main
|
jmp/_src/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for loss scaling."""
import dataclasses
import functools
from typing import Tuple, TypeVar, Union
import warnings
import jax
from jax import tree_util
import jax.numpy as jnp
import numpy as np
# from deepmind.internal import usage_logging
T = TypeVar("T")
def register_empty_pytree(cls):
tree_util.register_pytree_node(cls, lambda x: ((), x), lambda x, _: x)
@dataclasses.dataclass(frozen=True)
class NoOpLossScale:
"""No-op loss scale does nothing."""
@property
def loss_scale(self):
return 1
def scale(self, tree: T) -> T:
# usage_logging.log_event(usage_logging.Event.JMP, "NoOpLossScale")
return tree
def unscale(self, tree: T) -> T:
return tree
def adjust(self, grads_finite: jnp.ndarray):
del grads_finite
return self
@dataclasses.dataclass(frozen=True)
class StaticLossScale:
"""Scales and unscales by a fixed constant."""
loss_scale: jnp.ndarray
def scale(self, tree: T) -> T:
# usage_logging.log_event(usage_logging.Event.JMP, "StaticLossScale")
return jax.tree_util.tree_map(lambda x: x * self.loss_scale, tree)
def unscale(self, tree: T) -> T:
inv_loss_scale = 1 / self.loss_scale
return jax.tree_util.tree_map(lambda x: x * inv_loss_scale, tree)
def adjust(self, grads_finite: jnp.ndarray):
del grads_finite
return self
_Data = Tuple[jnp.ndarray, ...]
_Meta = Tuple[int, int]
@dataclasses.dataclass(frozen=True)
class DynamicLossScale:
"""Dynamic loss scale.
Dynamic loss scaling tries to determine the largest loss scale value that
will keep gradients finite. It does this by increasing the loss scale every
`period` steps by `factor` if the grads remain finite, otherwise it reduces
the loss scale by `1 / factor` and resets the counter.
loss_scale = 2 ** 15
counter = 0
period = 2000
factor = 2
for step in range(num_steps):
loss *= loss_scale
grads /= loss_scale
grads_finite = all_finite(grads)
if grads_finite:
counter += 1
if counter == period:
counter = 0
loss_scale = first_finite(loss_scale * factor, loss_scale)
else:
counter = 0
loss_scale = max(1, loss_scale / factor)
Typical usage of this class will be something like:
>>> loss_scale = jmp.DynamicLossScale(jnp.asarray(2. ** 15))
>>> for _ in range(num_steps):
... # compute loss
... loss = loss_scale.scale(loss)
... # compute grads
... grads = loss_scale.unscale(grads)
... grads_finite = jmp.all_finite(grads)
... loss_scale = loss_scale.adjust(grads_finite)
... # conditionally update params using grads
"""
loss_scale: jnp.ndarray
counter: jnp.ndarray = dataclasses.field(
default_factory=lambda: np.zeros([], np.int32))
period: int = 2000
factor: int = 2
min_loss_scale: jnp.ndarray = dataclasses.field(
default_factory=lambda: np.ones([], np.float32))
def __post_init__(self) -> None:
warn_if_not_floating(self.loss_scale, "loss_scale")
warn_if_not_floating(self.min_loss_scale, "min_loss_scale")
def scale(self, tree: T) -> T:
# usage_logging.log_event(usage_logging.Event.JMP, "DynamicLossScale")
return jax.tree_util.tree_map(lambda x: x * self.loss_scale, tree)
def unscale(self, tree: T) -> T:
inv_loss_scale = 1 / self.loss_scale
return jax.tree_util.tree_map(lambda x: x * inv_loss_scale, tree)
def tree_flatten(self) -> Tuple[_Data, _Meta]:
data = (self.loss_scale, self.counter)
meta = (self.period, self.factor)
return data, meta
@classmethod
def tree_unflatten(cls, meta: _Meta, data: _Data) -> "DynamicLossScale":
loss_scale, counter = data
period, factor = meta
return cls(loss_scale, counter, period, factor)
def adjust(self, grads_finite: jnp.ndarray) -> "DynamicLossScale":
"""Returns the next state dependent on whether grads are finite."""
assert grads_finite.ndim == 0, "Expected boolean scalar"
first_finite = lambda a, b: jax.lax.select(jnp.isfinite(a).all(), a, b)
loss_scale = jax.lax.select(
grads_finite,
# When grads are finite increase loss scale periodically.
jax.lax.select(
self.counter == (self.period - 1),
first_finite(self.loss_scale * self.factor,
self.loss_scale),
self.loss_scale),
# If grads are non finite reduce loss scale.
jnp.maximum(self.min_loss_scale, self.loss_scale / self.factor))
counter = ((self.counter + 1) % self.period) * grads_finite
return DynamicLossScale(
loss_scale=loss_scale,
counter=counter,
period=self.period,
factor=self.factor,
min_loss_scale=self.min_loss_scale)
register_empty_pytree(NoOpLossScale)
register_empty_pytree(StaticLossScale)
tree_util.register_pytree_node_class(DynamicLossScale)
LossScale = Union[NoOpLossScale, StaticLossScale, DynamicLossScale]
def all_finite(tree) -> jnp.ndarray:
"""Returns a scalar ndarray indicating whether the input arrays are finite."""
leaves = jax.tree_util.tree_leaves(tree)
if not leaves:
return jnp.array(True)
else:
leaves = map(jnp.isfinite, leaves)
leaves = map(jnp.all, leaves)
return jnp.stack(list(leaves)).all()
def select_tree(pred: jnp.ndarray, a: T, b: T) -> T:
"""Selects a pytree based on the given predicate."""
assert pred.ndim == 0 and pred.dtype == jnp.bool_, "expected boolean scalar"
return jax.tree_util.tree_map(functools.partial(jax.lax.select, pred), a, b)
def warn_if_not_floating(x: Union[jnp.ndarray, object], var_name: str) -> None:
"""Produces a warning if the given array does not have a floating type.
This function handles an edgecase where Jax passes in an `object()` to
determine the structure of user defined pytrees during compilation. They
recommend explicitly checking if the array in question has the type `object`.
From the Jax documentation: "The __init__ and __new__ methods of custom
PyTree classes should generally avoid doing any array conversion or other
input validation, or else anticipate and handle these special cases."
See:
https://jax.readthedocs.io/en/latest/pytrees.html#custom-pytrees-and-initialization
Args:
x: Any object.
var_name: A useful name to put in error messages.
"""
if type(x) is object: # pylint: disable=unidiomatic-typecheck
return
x_dtype = jax.eval_shape(lambda: x).dtype
if not jnp.issubdtype(x_dtype, jnp.floating):
warnings.warn(f"Expected floating type for {var_name}, got {x_dtype}")
|
jmp-main
|
jmp/_src/loss_scale.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for jmp._src.loss_scale."""
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jmp._src import loss_scale as jmp
import numpy as np
class LossScaleTest(parameterized.TestCase):
def test_no_op_loss_scale(self):
loss_scale = jmp.NoOpLossScale()
tree = {"a": jnp.ones([])}
self.assertIs(loss_scale.scale(tree), tree)
self.assertIs(loss_scale.unscale(tree), tree)
@parameterized.named_parameters(
("StaticLossScale(2)", jmp.StaticLossScale, 2),
("StaticLossScale(3)", jmp.StaticLossScale, 3),
("StaticLossScale(4)", jmp.StaticLossScale, 4),
("DynamicLossScale(2)", jmp.DynamicLossScale, 2.),
("DynamicLossScale(3)", jmp.DynamicLossScale, 3.),
("DynamicLossScale(4)", jmp.DynamicLossScale, 4.),
)
def test_static_loss_scale(self, cls, scale):
loss_scale = cls(scale)
tree = {"a": jnp.array(1.)}
scaled_tree = {"a": jnp.array(1. * scale)}
self.assertEqual(loss_scale.scale(tree), scaled_tree)
self.assertEqual(loss_scale.unscale(scaled_tree), tree)
@parameterized.named_parameters(
("NoOpLossScale", jmp.NoOpLossScale),
("StaticLossScale", lambda: jmp.StaticLossScale(0)), # pytype: disable=wrong-arg-types # jax-ndarray
)
def test_static_empty_trees(self, create):
loss_scale = create()
self.assertEmpty(jax.tree_util.tree_leaves(loss_scale))
def test_dynamic_loss_scale_no_warnings(self):
with warnings.catch_warnings(record=True) as logged_warnings:
jmp.DynamicLossScale(2. ** 15) # pytype: disable=wrong-arg-types # jax-ndarray
self.assertEmpty(logged_warnings)
def test_dynamic_loss_scale_tree(self):
scale = jnp.ones([])
counter = jnp.zeros([], jnp.int32)
period = 2000
factor = 2
loss_scale = jmp.DynamicLossScale(scale, counter, period, factor)
self.assertEqual(jax.tree_util.tree_leaves(loss_scale), [scale, counter])
self.assertEqual(jax.tree_util.tree_map(lambda x: x, loss_scale),
loss_scale)
@parameterized.parameters((20, 2), (30, 3))
def test_dynamic_loss_scale_adjust_increases_on_finite(self, period, factor):
grads_finite = jnp.bool_(True)
loss_scale = jmp.DynamicLossScale(jnp.float32(10), jnp.int32(0),
period, factor)
for i in range(1, period):
loss_scale = loss_scale.adjust(grads_finite)
self.assertEqual(loss_scale.loss_scale, 10)
self.assertEqual(loss_scale.counter, i)
self.assertEqual(loss_scale.period, period)
self.assertEqual(loss_scale.factor, factor)
# Loss scale should wrap.
loss_scale = loss_scale.adjust(grads_finite)
self.assertEqual(loss_scale.loss_scale, 10 * factor)
self.assertEqual(loss_scale.counter, 0)
self.assertEqual(loss_scale.period, period)
self.assertEqual(loss_scale.factor, factor)
@parameterized.parameters((20, 2), (30, 3))
def test_dynamic_loss_scale_adjust_reduce_on_non_finite(self, period, factor):
grads_finite = jnp.bool_(False)
init = np.float32(10)
loss_scale = jmp.DynamicLossScale(jnp.asarray(init), jnp.int32(0), period,
factor)
self.assertLess(init / (factor ** 100), 1, msg="should cover max(1, S)")
for i in range(100):
loss_scale = loss_scale.adjust(grads_finite)
np.testing.assert_allclose(loss_scale.loss_scale,
max(1, init / (factor ** (i + 1))),
rtol=1e-5)
self.assertEqual(loss_scale.counter, 0)
self.assertEqual(loss_scale.period, period)
self.assertEqual(loss_scale.factor, factor)
@parameterized.parameters((20, 2, .3125), (30, 3, .37), (5., 2., 0.))
def test_dynamic_loss_scale_explicit_min_loss_scale(self, period, factor,
min_loss_scale):
grads_finite = jnp.bool_(False)
init = np.float32(10)
loss_scale = jmp.DynamicLossScale(
jnp.asarray(init), jnp.int32(0), period, factor,
jnp.asarray(min_loss_scale))
self.assertLess(init / (factor**100), 1, msg="should cover max(1, S)")
for i in range(100):
loss_scale = loss_scale.adjust(grads_finite)
np.testing.assert_allclose(
loss_scale.loss_scale,
max(min_loss_scale, init / (factor**(i + 1))),
rtol=1e-5)
self.assertEqual(loss_scale.counter, 0)
self.assertEqual(loss_scale.period, period)
self.assertEqual(loss_scale.factor, factor)
def test_dynamic_loss_scale_adjust_requires_scalar_input(self):
pass
def test_dynamic_loss_scale_raises_type_error_on_int_loss_scale(self):
expected_message = "Expected floating type for loss_scale"
with self.assertWarnsRegex(Warning, expected_message):
jmp.DynamicLossScale(jnp.asarray(1, dtype=jnp.int32))
def test_dynamic_loss_scale_raises_type_error_on_int_min_loss_scale(self):
expected_message = "Expected floating type for min_loss_scale"
with self.assertWarnsRegex(Warning, expected_message):
jmp.DynamicLossScale(jnp.asarray(1, dtype=jnp.float32),
min_loss_scale=jnp.asarray(1, dtype=jnp.int32))
@parameterized.parameters(jnp.inf, jnp.nan)
def test_all_finite(self, non_finite):
self.assertTrue(jmp.all_finite(None))
self.assertTrue(jmp.all_finite({}))
self.assertFalse(jmp.all_finite({"a": jnp.array(non_finite)}))
self.assertFalse(jmp.all_finite({"a": jnp.ones([]),
"b": jnp.array(non_finite)}))
self.assertFalse(jmp.all_finite({"a": jnp.array(non_finite),
"b": jnp.ones([])}))
self.assertTrue(jmp.all_finite({"a": jnp.ones([]), "b": jnp.ones([])}))
def test_select_tree(self):
a = {"a": jnp.ones([]), "b": jnp.zeros([])}
b = {"a": jnp.zeros([]), "b": jnp.ones([])}
self.assertIsNone(jmp.select_tree(jnp.bool_(True), None, None))
self.assertIsNone(jmp.select_tree(jnp.bool_(False), None, None))
self.assertEqual(jmp.select_tree(jnp.bool_(True), a, b), a)
self.assertEqual(jmp.select_tree(jnp.bool_(False), a, b), b)
def test_select_tree_rejects_non_scalar(self):
with self.assertRaisesRegex(AssertionError, "expected boolean scalar"):
jmp.select_tree(jnp.ones([1]), None, None)
if __name__ == "__main__":
absltest.main()
|
jmp-main
|
jmp/_src/loss_scale_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pseudocode description of the AlphaDev algorithm."""
###########################
########## Content ########
# 1. Environment
# 2. Networks
# 2.1 Network helpers
# 2.2 Representation network
# 2.3 Prediction network (correctness and latency values and policy)
# 3. Helpers
# 4. Part 1: Self-Play
# 5. Part 2: Training
###########################
import collections
import functools
import math
from typing import Any, Callable, Dict, NamedTuple, Optional, Sequence
import chex
import haiku as hk
import jax
import jax.lax
import jax.numpy as jnp
import ml_collections
import numpy
import optax
############################
###### 1. Environment ######
class TaskSpec(NamedTuple):
max_program_size: int
num_inputs: int
num_funcs: int
num_locations: int
num_actions: int
correct_reward: float
correctness_reward_weight: float
latency_reward_weight: float
latency_quantile: float
class AssemblyGame(object):
"""The environment AlphaDev is interacting with."""
class AssemblyInstruction(object):
pass
class AssemblySimulator(object):
# pylint: disable-next=unused-argument
def apply(self, instruction):
return {}
def measure_latency(self, program) -> float:
pass
def __init__(self, task_spec):
self.task_spec = task_spec
self.program = []
self.simulator = self.AssemblySimulator(task_spec)
self.previous_correct_items = 0
def step(self, action):
instruction = self.AssemblyInstruction(action)
self.program.append(instruction)
self.execution_state = self.simulator.apply(instruction)
return self.observation(), self.correctness_reward()
def observation(self):
return {
'program': self.program,
'program_length': len(self.program),
'memory': self.execution_state.memory,
'registers': self.execution_state.registers,
}
def correctness_reward(self) -> float:
"""Computes a reward based on the correctness of the output."""
make_expected_outputs = lambda: []
expected_outputs = make_expected_outputs()
state = self.execution_state
# Weighted sum of correctly placed items
correct_items = 0
for output, expected in zip(state.memory, expected_outputs):
correct_items += output.weight * sum(
output[i] == expected[i] for i in range(len(output))
)
reward = self.task_spec.correctness_reward_weight * (
correct_items - self.previous_correct_items
)
self.previous_correct_items = correct_items
# Bonus for fully correct programs
all_correct = all(
output == expected
for output, expected in zip(state.memory, expected_outputs)
)
reward += self.task_spec.correct_reward * all_correct
return reward
def latency_reward(self) -> float:
latency_samples = [
self.simulator.measure_latency(self.program)
for _ in range(self.task_spec.num_latency_simulation)
]
return (
numpy.quantile(latency_samples, self.task_spec.latency_quantile)
* self.task_spec.latency_reward_weight
)
def clone(self):
pass
######## End Environment ########
#################################
#####################################
############ 2. Networks ############
######## 2.1 Network helpers ########
class Action(object):
"""Action representation."""
def __init__(self, index: int):
self.index = index
def __hash__(self):
return self.index
def __eq__(self, other):
return self.index == other.index
def __gt__(self, other):
return self.index > other.index
class NetworkOutput(NamedTuple):
value: float
correctness_value_logits: jnp.ndarray
latency_value_logits: jnp.ndarray
policy_logits: Dict[Action, float]
class Network(object):
"""Wrapper around Representation and Prediction networks."""
def __init__(self, hparams: ml_collections.ConfigDict, task_spec: TaskSpec):
self.representation = hk.transform(RepresentationNet(
hparams, task_spec, hparams.embedding_dim
))
self.prediction = hk.transform(PredictionNet(
task_spec=task_spec,
value_max=hparams.value.max,
value_num_bins=hparams.value.num_bins,
embedding_dim=hparams.embedding_dim,
))
rep_key, pred_key = jax.random.PRNGKey(42).split()
self.params = {
'representation': self.representation.init(rep_key),
'prediction': self.prediction.init(pred_key),
}
def inference(self, params: Any, observation: jnp.array) -> NetworkOutput:
# representation + prediction function
embedding = self.representation.apply(params['representation'], observation)
return self.prediction.apply(params['prediction'], embedding)
def get_params(self):
# Returns the weights of this network.
return self.params
def update_params(self, updates: Any) -> None:
# Update network weights internally.
self.params = jax.tree_map(lambda p, u: p + u, self.params, updates)
def training_steps(self) -> int:
# How many steps / batches the network has been trained for.
return 0
class UniformNetwork(object):
"""Network representation that returns uniform output."""
# pylint: disable-next=unused-argument
def inference(self, observation) -> NetworkOutput:
# representation + prediction function
return NetworkOutput(0, 0, 0, {})
def get_params(self):
# Returns the weights of this network.
return self.params
def update_params(self, updates: Any) -> None:
# Update network weights internally.
self.params = jax.tree_map(lambda p, u: p + u, self.params, updates)
def training_steps(self) -> int:
# How many steps / batches the network has been trained for.
return 0
######## 2.2 Representation Network ########
class MultiQueryAttentionBlock:
"""Attention with multiple query heads and a single shared key and value head.
Implementation of "Fast Transformer Decoding: One Write-Head is All You Need",
see https://arxiv.org/abs/1911.02150.
"""
class ResBlockV2:
"""Layer-normed variant of the block from https://arxiv.org/abs/1603.05027."""
def int2bin(integers_array: jnp.array) -> jnp.array:
"""Converts an array of integers to an array of its 32bit representation bits.
Conversion goes from array of shape (S1, S2, ..., SN) to (S1, S2, ..., SN*32),
i.e. all binary arrays are concatenated. Also note that the single 32-long
binary sequences are reversed, i.e. the number 1 will be converted to the
binary 1000000... . This is irrelevant for ML problems.
Args:
integers_array: array of integers to convert.
Returns:
array of bits (on or off) in boolean type.
"""
flat_arr = integers_array.astype(jnp.int32).reshape(-1, 1)
bin_mask = jnp.tile(2 ** jnp.arange(32), (flat_arr.shape[0], 1))
return ((flat_arr & bin_mask) != 0).reshape(
*integers_array.shape[:-1], integers_array.shape[-1] * 32
)
def bin2int(binary_array: jnp.array) -> jnp.array:
"""Reverses operation of int2bin."""
u_binary_array = binary_array.reshape(
*binary_array.shape[:-1], binary_array.shape[-1] // 32, 32
)
exp = jnp.tile(2 ** jnp.arange(32), u_binary_array.shape[:-1] + (1,))
return jnp.sum(exp * u_binary_array, axis=-1)
class RepresentationNet(hk.Module):
"""Representation network."""
def __init__(
self,
hparams: ml_collections.ConfigDict,
task_spec: TaskSpec,
embedding_dim: int,
name: str = 'representation',
):
super().__init__(name=name)
self._hparams = hparams
self._task_spec = task_spec
self._embedding_dim = embedding_dim
def __call__(self, inputs):
batch_size = inputs['program'].shape[0]
program_encoding = None
if self._hparams.representation.use_program:
program_encoding = self._encode_program(inputs, batch_size)
if (
self._hparams.representation.use_locations
and self._hparams.representation.use_locations_binary
):
raise ValueError(
'only one of `use_locations` and `use_locations_binary` may be used.'
)
locations_encoding = None
if self._hparams.representation.use_locations:
locations_encoding = self._make_locations_encoding_onehot(
inputs, batch_size
)
elif self._hparams.representation.use_locations_binary:
locations_encoding = self._make_locations_encoding_binary(
inputs, batch_size
)
permutation_embedding = None
if self._hparams.representation.use_permutation_embedding:
permutation_embedding = self.make_permutation_embedding(batch_size)
return self.aggregate_locations_program(
locations_encoding, permutation_embedding, program_encoding, batch_size
)
def _encode_program(self, inputs, batch_size):
program = inputs['program']
max_program_size = inputs['program'].shape[1]
program_length = inputs['program_length'].astype(jnp.int32)
program_onehot = self.make_program_onehot(
program, batch_size, max_program_size
)
program_encoding = self.apply_program_mlp_embedder(program_onehot)
program_encoding = self.apply_program_attention_embedder(program_encoding)
return self.pad_program_encoding(
program_encoding, batch_size, program_length, max_program_size
)
def aggregate_locations_program(
self,
locations_encoding,
unused_permutation_embedding,
program_encoding,
batch_size,
):
locations_embedder = hk.Sequential(
[
hk.Linear(self._embedding_dim),
hk.LayerNorm(axis=-1),
jax.nn.relu,
hk.Linear(self._embedding_dim),
],
name='per_locations_embedder',
)
# locations_encoding.shape == [B, P, D] so map embedder across locations to
# share weights
locations_embedding = hk.vmap(
locations_embedder, in_axes=1, out_axes=1, split_rng=False
)(locations_encoding)
program_encoded_repeat = self.repeat_program_encoding(
program_encoding, batch_size
)
grouped_representation = jnp.concatenate(
[locations_embedding, program_encoded_repeat], axis=-1
)
return self.apply_joint_embedder(grouped_representation, batch_size)
def repeat_program_encoding(self, program_encoding, batch_size):
return jnp.broadcast_to(
program_encoding,
[batch_size, self._task_spec.num_inputs, program_encoding.shape[-1]],
)
def apply_joint_embedder(self, grouped_representation, batch_size):
all_locations_net = hk.Sequential(
[
hk.Linear(self._embedding_dim),
hk.LayerNorm(axis=-1),
jax.nn.relu,
hk.Linear(self._embedding_dim),
],
name='per_element_embedder',
)
joint_locations_net = hk.Sequential(
[
hk.Linear(self._embedding_dim),
hk.LayerNorm(axis=-1),
jax.nn.relu,
hk.Linear(self._embedding_dim),
],
name='joint_embedder',
)
joint_resnet = [
ResBlockV2(self._embedding_dim, name=f'joint_resblock_{i}')
for i in range(self._hparams.representation.repr_net_res_blocks)
]
chex.assert_shape(
grouped_representation, (batch_size, self._task_spec.num_inputs, None)
)
permutations_encoded = all_locations_net(grouped_representation)
# Combine all permutations into a single vector.
joint_encoding = joint_locations_net(jnp.mean(permutations_encoded, axis=1))
for net in joint_resnet:
joint_encoding = net(joint_encoding)
return joint_encoding
def make_program_onehot(self, program, batch_size, max_program_size):
func = program[:, :, 0]
arg1 = program[:, :, 1]
arg2 = program[:, :, 2]
func_onehot = jax.nn.one_hot(func, self._task_spec.num_funcs)
arg1_onehot = jax.nn.one_hot(arg1, self._task_spec.num_locations)
arg2_onehot = jax.nn.one_hot(arg2, self._task_spec.num_locations)
program_onehot = jnp.concatenate(
[func_onehot, arg1_onehot, arg2_onehot], axis=-1
)
chex.assert_shape(program_onehot, (batch_size, max_program_size, None))
return program_onehot
def pad_program_encoding(
self, program_encoding, batch_size, program_length, max_program_size
):
"""Pads the program encoding to account for state-action stagger."""
chex.assert_shape(program_encoding, (batch_size, max_program_size, None))
empty_program_output = jnp.zeros(
[batch_size, program_encoding.shape[-1]],
)
program_encoding = jnp.concatenate(
[empty_program_output[:, None, :], program_encoding], axis=1
)
program_length_onehot = jax.nn.one_hot(program_length, max_program_size + 1)
program_encoding = jnp.einsum(
'bnd,bNn->bNd', program_encoding, program_length_onehot
)
return program_encoding
def apply_program_mlp_embedder(self, program_encoding):
program_embedder = hk.Sequential(
[
hk.Linear(self._embedding_dim),
hk.LayerNorm(axis=-1),
jax.nn.relu,
hk.Linear(self._embedding_dim),
],
name='per_instruction_program_embedder',
)
program_encoding = program_embedder(program_encoding)
return program_encoding
def apply_program_attention_embedder(self, program_encoding):
attention_params = self._hparams.representation.attention
make_attention_block = functools.partial(
MultiQueryAttentionBlock, attention_params, causal_mask=False
)
attention_encoders = [
make_attention_block(name=f'attention_program_sequencer_{i}')
for i in range(self._hparams.representation.attention_num_layers)
]
*_, seq_size, feat_size = program_encoding.shape
position_encodings = jnp.broadcast_to(
MultiQueryAttentionBlock.sinusoid_position_encoding(
seq_size, feat_size
),
program_encoding.shape,
)
program_encoding += position_encodings
for e in attention_encoders:
program_encoding, _ = e(program_encoding, encoded_state=None)
return program_encoding
def _make_locations_encoding_onehot(self, inputs, batch_size):
"""Creates location encoding using onehot representation."""
memory = inputs['memory']
registers = inputs['registers']
locations = jnp.concatenate([memory, registers], axis=-1) # [B, H, P, D]
locations = jnp.transpose(locations, [0, 2, 1, 3]) # [B, P, H, D]
# One-hot encode the values in the memory and average everything across
# permutations.
locations_onehot = jax.nn.one_hot(
locations, self._task_spec.num_location_values, dtype=jnp.int32
)
locations_onehot = locations_onehot.reshape(
[batch_size, self._task_spec.num_inputs, -1]
)
return locations_onehot
def _make_locations_encoding_binary(self, inputs, batch_size):
"""Creates location encoding using binary representation."""
memory_binary = int2bin(inputs['memory']).astype(jnp.float32)
registers_binary = int2bin(inputs['registers']).astype(jnp.float32)
# Note the extra I dimension for the length of the binary integer (32)
locations = jnp.concatenate(
[memory_binary, registers_binary], axis=-1
) # [B, H, P, D*I]
locations = jnp.transpose(locations, [0, 2, 1, 3]) # [B, P, H, D*I]
locations = locations.reshape([batch_size, self._task_spec.num_inputs, -1])
return locations
######## 2.3 Prediction Network ########
def make_head_network(
embedding_dim: int,
output_size: int,
num_hidden_layers: int = 2,
name: Optional[str] = None,
) -> Callable[[jnp.ndarray,], jnp.ndarray]:
return hk.Sequential(
[ResBlockV2(embedding_dim) for _ in range(num_hidden_layers)]
+ [hk.Linear(output_size)],
name=name,
)
class DistributionSupport(object):
def __init__(self, value_max: float, num_bins: int):
self.value_max = value_max
self.num_bins = num_bins
def mean(self, logits: jnp.ndarray) -> float:
pass
def scalar_to_two_hot(self, scalar: float) -> jnp.ndarray:
pass
class CategoricalHead(hk.Module):
"""A head that represents continuous values by a categorical distribution."""
def __init__(
self,
embedding_dim: int,
support: DistributionSupport,
name: str = 'CategoricalHead',
):
super().__init__(name=name)
self._value_support = support
self._embedding_dim = embedding_dim
self._head = make_head_network(
embedding_dim, output_size=self._value_support.num_bins
)
def __call__(self, x: jnp.ndarray):
# For training returns the logits, for inference the mean.
logits = self._head(x)
probs = jax.nn.softmax(logits)
mean = jax.vmap(self._value_support.mean)(probs)
return dict(logits=logits, mean=mean)
class PredictionNet(hk.Module):
"""MuZero prediction network."""
def __init__(
self,
task_spec: TaskSpec,
value_max: float,
value_num_bins: int,
embedding_dim: int,
name: str = 'prediction',
):
super().__init__(name=name)
self.task_spec = task_spec
self.support = DistributionSupport(self.value_max, self.value_num_bins)
self.embedding_dim = embedding_dim
def __call__(self, embedding: jnp.ndarray):
policy_head = make_head_network(
self.embedding_dim, self.task_spec.num_actions
)
value_head = CategoricalHead(self.embedding_dim, self.support)
latency_value_head = CategoricalHead(self.embedding_dim, self.support)
correctness_value = value_head(embedding)
latency_value = latency_value_head(embedding)
return NetworkOutput(
value=correctness_value['mean'] + latency_value['mean'],
correctness_value_logits=correctness_value['logits'],
latency_value_logits=latency_value['logits'],
policy=policy_head(embedding),
)
####### End Networks ########
#############################
#############################
####### 3. Helpers ##########
MAXIMUM_FLOAT_VALUE = float('inf')
KnownBounds = collections.namedtuple('KnownBounds', ['min', 'max'])
class AlphaDevConfig(object):
"""AlphaDev configuration."""
def __init__(
self,
):
### Self-Play
self.num_actors = 128 # TPU actors
# pylint: disable-next=g-long-lambda
self.visit_softmax_temperature_fn = lambda steps: (
1.0 if steps < 500e3 else 0.5 if steps < 750e3 else 0.25
)
self.max_moves = jnp.inf
self.num_simulations = 800
self.discount = 1.0
# Root prior exploration noise.
self.root_dirichlet_alpha = 0.03
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
self.known_bounds = KnownBounds(-6.0, 6.0)
# Environment: spec of the Variable Sort 3 task
self.task_spec = TaskSpec(
max_program_size=100,
num_inputs=17,
num_funcs=14,
num_locations=19,
num_actions=271,
correct_reward=1.0,
correctness_reward_weight=2.0,
latency_reward_weight=0.5,
latency_quantile=0,
)
### Network architecture
self.hparams = ml_collections.ConfigDict()
self.hparams.embedding_dim = 512
self.hparams.representation = ml_collections.ConfigDict()
self.hparams.representation.use_program = True
self.hparams.representation.use_locations = True
self.hparams.representation.use_locations_binary = False
self.hparams.representation.use_permutation_embedding = False
self.hparams.representation.repr_net_res_blocks = 8
self.hparams.representation.attention = ml_collections.ConfigDict()
self.hparams.representation.attention.head_depth = 128
self.hparams.representation.attention.num_heads = 4
self.hparams.representation.attention.attention_dropout = False
self.hparams.representation.attention.position_encoding = 'absolute'
self.hparams.representation.attention_num_layers = 6
self.hparams.value = ml_collections.ConfigDict()
self.hparams.value.max = 3.0 # These two parameters are task / reward-
self.hparams.value.num_bins = 301 # dependent and need to be adjusted.
### Training
self.training_steps = int(1000e3)
self.checkpoint_interval = 500
self.target_network_interval = 100
self.window_size = int(1e6)
self.batch_size = 512
self.td_steps = 5
self.lr_init = 2e-4
self.momentum = 0.9
def new_game(self):
return Game(self.task_spec.num_actions, self.discount, self.task_spec)
class MinMaxStats(object):
"""A class that holds the min-max values of the tree."""
def __init__(self, known_bounds: Optional[KnownBounds]):
self.maximum = known_bounds.max if known_bounds else -MAXIMUM_FLOAT_VALUE
self.minimum = known_bounds.min if known_bounds else MAXIMUM_FLOAT_VALUE
def update(self, value: float):
self.maximum = max(self.maximum, value)
self.minimum = min(self.minimum, value)
def normalize(self, value: float) -> float:
if self.maximum > self.minimum:
# We normalize only when we have set the maximum and minimum values.
return (value - self.minimum) / (self.maximum - self.minimum)
return value
class Player(object):
pass
class Node(object):
"""MCTS node."""
def __init__(self, prior: float):
self.visit_count = 0
self.to_play = -1
self.prior = prior
self.value_sum = 0
self.children = {}
self.hidden_state = None
self.reward = 0
def expanded(self) -> bool:
return bool(self.children)
def value(self) -> float:
if self.visit_count == 0:
return 0
return self.value_sum / self.visit_count
class ActionHistory(object):
"""Simple history container used inside the search.
Only used to keep track of the actions executed.
"""
def __init__(self, history: Sequence[Action], action_space_size: int):
self.history = list(history)
self.action_space_size = action_space_size
def clone(self):
return ActionHistory(self.history, self.action_space_size)
def add_action(self, action: Action):
self.history.append(action)
def last_action(self) -> Action:
return self.history[-1]
def action_space(self) -> Sequence[Action]:
return [Action(i) for i in range(self.action_space_size)]
def to_play(self) -> Player:
return Player()
class Target(NamedTuple):
correctness_value: float
latency_value: float
policy: Sequence[int]
bootstrap_discount: float
class Sample(NamedTuple):
observation: Dict[str, jnp.ndarray]
bootstrap_observation: Dict[str, jnp.ndarray]
target: Target
class Game(object):
"""A single episode of interaction with the environment."""
def __init__(
self, action_space_size: int, discount: float, task_spec: TaskSpec
):
self.task_spec = task_spec
self.environment = AssemblyGame(task_spec)
self.history = []
self.rewards = []
self.latency_reward = 0
self.child_visits = []
self.root_values = []
self.action_space_size = action_space_size
self.discount = discount
def terminal(self) -> bool:
# Game specific termination rules.
# For sorting, a game is terminal if we sort all sequences correctly or
# we reached the end of the buffer.
pass
def is_correct(self) -> bool:
# Whether the current algorithm solves the game.
pass
def legal_actions(self) -> Sequence[Action]:
# Game specific calculation of legal actions.
return []
def apply(self, action: Action):
_, reward = self.environment.step(action)
self.rewards.append(reward)
self.history.append(action)
if self.terminal() and self.is_correct():
self.latency_reward = self.environment.latency_reward()
def store_search_statistics(self, root: Node):
sum_visits = sum(child.visit_count for child in root.children.values())
action_space = (Action(index) for index in range(self.action_space_size))
self.child_visits.append(
[
root.children[a].visit_count / sum_visits
if a in root.children
else 0
for a in action_space
]
)
self.root_values.append(root.value())
def make_observation(self, state_index: int):
if state_index == -1:
return self.environment.observation()
env = AssemblyGame(self.task_spec)
for action in self.history[:state_index]:
observation, _ = env.step(action)
return observation
def make_target(
# pylint: disable-next=unused-argument
self, state_index: int, td_steps: int, to_play: Player
) -> Target:
"""Creates the value target for training."""
# The value target is the discounted sum of all rewards until N steps
# into the future, to which we will add the discounted boostrapped future
# value.
bootstrap_index = state_index + td_steps
for i, reward in enumerate(self.rewards[state_index:bootstrap_index]):
value += reward * self.discount**i # pytype: disable=unsupported-operands
if bootstrap_index < len(self.root_values):
bootstrap_discount = self.discount**td_steps
else:
bootstrap_discount = 0
return Target(
value,
self.latency_reward,
self.child_visits[state_index],
bootstrap_discount,
)
def to_play(self) -> Player:
return Player()
def action_history(self) -> ActionHistory:
return ActionHistory(self.history, self.action_space_size)
class ReplayBuffer(object):
"""Replay buffer object storing games for training."""
def __init__(self, config: AlphaDevConfig):
self.window_size = config.window_size
self.batch_size = config.batch_size
self.buffer = []
def save_game(self, game):
if len(self.buffer) > self.window_size:
self.buffer.pop(0)
self.buffer.append(game)
def sample_batch(self, td_steps: int) -> Sequence[Sample]:
games = [self.sample_game() for _ in range(self.batch_size)]
game_pos = [(g, self.sample_position(g)) for g in games]
# pylint: disable=g-complex-comprehension
return [
Sample(
observation=g.make_observation(i),
bootstrap_observation=g.make_observation(i + td_steps),
target=g.make_target(i, td_steps, g.to_play()),
)
for (g, i) in game_pos
]
# pylint: enable=g-complex-comprehension
def sample_game(self) -> Game:
# Sample game from buffer either uniformly or according to some priority.
return self.buffer[0]
# pylint: disable-next=unused-argument
def sample_position(self, game) -> int:
# Sample position from game either uniformly or according to some priority.
return -1
class SharedStorage(object):
"""Controls which network is used at inference."""
def __init__(self):
self._networks = {}
def latest_network(self) -> Network:
if self._networks:
return self._networks[max(self._networks.keys())]
else:
# policy -> uniform, value -> 0, reward -> 0
return make_uniform_network()
def save_network(self, step: int, network: Network):
self._networks[step] = network
##### End Helpers ########
##########################
# AlphaDev training is split into two independent parts: Network training and
# self-play data generation.
# These two parts only communicate by transferring the latest network checkpoint
# from the training to the self-play, and the finished games from the self-play
# to the training.
def alphadev(config: AlphaDevConfig):
storage = SharedStorage()
replay_buffer = ReplayBuffer(config)
for _ in range(config.num_actors):
launch_job(run_selfplay, config, storage, replay_buffer)
train_network(config, storage, replay_buffer)
return storage.latest_network()
#####################################
####### 4. Part 1: Self-Play ########
# Each self-play job is independent of all others; it takes the latest network
# snapshot, produces a game and makes it available to the training job by
# writing it to a shared replay buffer.
def run_selfplay(
config: AlphaDevConfig, storage: SharedStorage, replay_buffer: ReplayBuffer
):
while True:
network = storage.latest_network()
game = play_game(config, network)
replay_buffer.save_game(game)
def play_game(config: AlphaDevConfig, network: Network) -> Game:
"""Plays an AlphaDev game.
Each game is produced by starting at the initial empty program, then
repeatedly executing a Monte Carlo Tree Search to generate moves until the end
of the game is reached.
Args:
config: An instance of the AlphaDev configuration.
network: Networks used for inference.
Returns:
The played game.
"""
game = config.new_game()
while not game.terminal() and len(game.history) < config.max_moves:
min_max_stats = MinMaxStats(config.known_bounds)
# Initialisation of the root node and addition of exploration noise
root = Node(0)
current_observation = game.make_observation(-1)
network_output = network.inference(current_observation)
_expand_node(
root, game.to_play(), game.legal_actions(), network_output, reward=0
)
_backpropagate(
[root],
network_output.value,
game.to_play(),
config.discount,
min_max_stats,
)
_add_exploration_noise(config, root)
# We then run a Monte Carlo Tree Search using the environment.
run_mcts(
config,
root,
game.action_history(),
network,
min_max_stats,
game.environment,
)
action = _select_action(config, len(game.history), root, network)
game.apply(action)
game.store_search_statistics(root)
return game
def run_mcts(
config: AlphaDevConfig,
root: Node,
action_history: ActionHistory,
network: Network,
min_max_stats: MinMaxStats,
env: AssemblyGame,
):
"""Runs the Monte Carlo Tree Search algorithm.
To decide on an action, we run N simulations, always starting at the root of
the search tree and traversing the tree according to the UCB formula until we
reach a leaf node.
Args:
config: AlphaDev configuration
root: The root node of the MCTS tree from which we start the algorithm
action_history: history of the actions taken so far.
network: instances of the networks that will be used.
min_max_stats: min-max statistics for the tree.
env: an instance of the AssemblyGame.
"""
for _ in range(config.num_simulations):
history = action_history.clone()
node = root
search_path = [node]
sim_env = env.clone()
while node.expanded():
action, node = _select_child(config, node, min_max_stats)
sim_env.step(action)
history.add_action(action)
search_path.append(node)
# Inside the search tree we use the environment to obtain the next
# observation and reward given an action.
observation, reward = sim_env.step(action)
network_output = network.inference(observation)
_expand_node(
node, history.to_play(), history.action_space(), network_output, reward
)
_backpropagate(
search_path,
network_output.value,
history.to_play(),
config.discount,
min_max_stats,
)
def _select_action(
# pylint: disable-next=unused-argument
config: AlphaDevConfig, num_moves: int, node: Node, network: Network
):
visit_counts = [
(child.visit_count, action) for action, child in node.children.items()
]
t = config.visit_softmax_temperature_fn(
training_steps=network.training_steps()
)
_, action = softmax_sample(visit_counts, t)
return action
def _select_child(
config: AlphaDevConfig, node: Node, min_max_stats: MinMaxStats
):
"""Selects the child with the highest UCB score."""
_, action, child = max(
(_ucb_score(config, node, child, min_max_stats), action, child)
for action, child in node.children.items()
)
return action, child
def _ucb_score(
config: AlphaDevConfig,
parent: Node,
child: Node,
min_max_stats: MinMaxStats,
) -> float:
"""Computes the UCB score based on its value + exploration based on prior."""
pb_c = (
math.log((parent.visit_count + config.pb_c_base + 1) / config.pb_c_base)
+ config.pb_c_init
)
pb_c *= math.sqrt(parent.visit_count) / (child.visit_count + 1)
prior_score = pb_c * child.prior
if child.visit_count > 0:
value_score = min_max_stats.normalize(
child.reward + config.discount * child.value()
)
else:
value_score = 0
return prior_score + value_score
def _expand_node(
node: Node,
to_play: Player,
actions: Sequence[Action],
network_output: NetworkOutput,
reward: float,
):
"""Expands the node using value, reward and policy predictions from the NN."""
node.to_play = to_play
node.hidden_state = network_output.hidden_state
node.reward = reward
policy = {a: math.exp(network_output.policy_logits[a]) for a in actions}
policy_sum = sum(policy.values())
for action, p in policy.items():
node.children[action] = Node(p / policy_sum)
def _backpropagate(
search_path: Sequence[Node],
value: float,
to_play: Player,
discount: float,
min_max_stats: MinMaxStats,
):
"""Propagates the evaluation all the way up the tree to the root."""
for node in reversed(search_path):
node.value_sum += value if node.to_play == to_play else -value
node.visit_count += 1
min_max_stats.update(node.value())
value = node.reward + discount * value
def _add_exploration_noise(config: AlphaDevConfig, node: Node):
"""Adds dirichlet noise to the prior of the root to encourage exploration."""
actions = list(node.children.keys())
noise = numpy.random.dirichlet([config.root_dirichlet_alpha] * len(actions))
frac = config.root_exploration_fraction
for a, n in zip(actions, noise):
node.children[a].prior = node.children[a].prior * (1 - frac) + n * frac
########### End Self-Play ###########
#####################################
#####################################
####### 5. Part 2: Training #########
def train_network(
config: AlphaDevConfig, storage: SharedStorage, replay_buffer: ReplayBuffer
):
"""Trains the network on data stored in the replay buffer."""
network = Network(config.hparams, config.task_spec)
target_network = Network(config.hparams, config.task_spec)
optimizer = optax.sgd(config.lr_init, config.momentum)
optimizer_state = optimizer.init(network.get_params())
for i in range(config.training_steps):
if i % config.checkpoint_interval == 0:
storage.save_network(i, network)
if i % config.target_network_interval == 0:
target_network = network.copy()
batch = replay_buffer.sample_batch(config.num_unroll_steps, config.td_steps)
optimizer_state = _update_weights(
optimizer, optimizer_state, network, target_network, batch)
storage.save_network(config.training_steps, network)
def scale_gradient(tensor: Any, scale):
"""Scales the gradient for the backward pass."""
return tensor * scale + jax.lax.stop_gradient(tensor) * (1 - scale)
def _loss_fn(
network_params: jnp.array,
target_network_params: jnp.array,
network: Network,
target_network: Network,
batch: Sequence[Sample]
) -> float:
"""Computes loss."""
loss = 0
for observation, bootstrap_obs, target in batch:
predictions = network.inference(network_params, observation)
bootstrap_predictions = target_network.inference(
target_network_params, bootstrap_obs)
target_correctness, target_latency, target_policy, bootstrap_discount = (
target
)
target_correctness += (
bootstrap_discount * bootstrap_predictions.correctness_value_logits
)
l = optax.softmax_cross_entropy(predictions.policy_logits, target_policy)
l += scalar_loss(
predictions.correctness_value_logits, target_correctness, network
)
l += scalar_loss(predictions.latency_value_logits, target_latency, network)
loss += l
loss /= len(batch)
return loss
_loss_grad = jax.grad(_loss_fn, argnums=0)
def _update_weights(
optimizer: optax.GradientTransformation,
optimizer_state: Any,
network: Network,
target_network: Network,
batch: Sequence[Sample],
) -> Any:
"""Updates the weight of the network."""
updates = _loss_grad(
network.get_params(),
target_network.get_params(),
network,
target_network,
batch)
optim_updates, new_optim_state = optimizer.update(updates, optimizer_state)
network.update_params(optim_updates)
return new_optim_state
def scalar_loss(prediction, target, network) -> float:
support = network.prediction.support
return optax.softmax_cross_entropy(
prediction, support.scalar_to_two_hot(target)
)
######### End Training ###########
##################################
################################################################################
############################# End of pseudocode ################################
################################################################################
# Stubs to make the typechecker happy.
# pylint: disable-next=unused-argument
def softmax_sample(distribution, temperature: float):
return 0, 0
def launch_job(f, *args):
f(*args)
def make_uniform_network():
return UniformNetwork()
|
alphadev-main
|
alphadev.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Install script for setuptools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
from setuptools import find_packages
from setuptools import setup
setup(
name='dm-hard-eight',
version=imp.load_source('_version',
'dm_hard_eight/_version.py').__version__,
description=('DeepMind Hard Eight Tasks, a set of Unity-based machine-'
'learning research tasks.'),
author='DeepMind',
license='Apache License, Version 2.0',
keywords='reinforcement-learning python machine learning',
packages=find_packages(exclude=['examples']),
install_requires=[
'absl-py',
'dm-env',
'dm-env-rpc',
'docker',
'grpcio',
'numpy',
'portpicker',
],
tests_require=['nose'],
python_requires='>=3.6.1',
extras_require={'examples': ['pygame']},
test_suite='nose.collector',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
|
dm_hard_eight-master
|
setup.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example random agent for interacting with DeepMind Hard Eight Tasks."""
from absl import app
from absl import flags
from absl import logging
from dm_env import specs
import dm_hard_eight
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string(
'docker_image_name', None,
'Name of the Docker image that contains the Hard Eight Tasks. '
'If None, uses the default dm_hard_eight image name')
flags.DEFINE_integer('seed', 123, 'Environment seed.')
flags.DEFINE_string('level_name', 'ball_room_navigation_cubes',
'Name of Hard Eight task to run.')
class RandomAgent(object):
"""Basic random agent for DeepMind Hard Eight Tasks."""
def __init__(self, action_spec):
self.action_spec = action_spec
def act(self):
action = {}
for name, spec in self.action_spec.items():
# Uniformly sample BoundedArray actions.
if isinstance(spec, specs.BoundedArray):
action[name] = np.random.uniform(spec.minimum, spec.maximum, spec.shape)
else:
action[name] = spec.generate_value()
return action
def main(_):
env_settings = dm_hard_eight.EnvironmentSettings(
seed=FLAGS.seed, level_name=FLAGS.level_name)
with dm_hard_eight.load_from_docker(
name=FLAGS.docker_image_name, settings=env_settings) as env:
agent = RandomAgent(env.action_spec())
timestep = env.reset()
score = 0
while not timestep.last():
action = agent.act()
timestep = env.step(action)
if timestep.reward:
score += timestep.reward
logging.info('Total score: %1.1f, reward: %1.1f', score,
timestep.reward)
if __name__ == '__main__':
app.run(main)
|
dm_hard_eight-master
|
examples/random_agent.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example human agent for interacting with DeepMind Hard Eight Tasks."""
from absl import app
from absl import flags
from absl import logging
import dm_hard_eight
import numpy as np
import pygame
FLAGS = flags.FLAGS
flags.DEFINE_list(
'screen_size', [640, 480],
'Screen width/height in pixels. Scales the environment RGB observations to '
'fit the screen size.')
flags.DEFINE_string(
'docker_image_name', None,
'Name of the Docker image that contains the Hard Eight Tasks. '
'If None, uses the default dm_hard_eight image name')
flags.DEFINE_integer('seed', 123, 'Environment seed.')
flags.DEFINE_string('level_name', 'ball_room_navigation_cubes',
'Name of Hard Eight task to run.')
_FRAMES_PER_SECOND = 30
_MOUSE_SENSITIVITY = 0.1
_CURSOR_COLOR = (255, 0, 0)
_CURSOR_SIZE = 2
_LEFT_BUTTON = 1
_KEYS_TO_ACTION = {
pygame.K_w: {'MOVE_BACK_FORWARD': 1},
pygame.K_s: {'MOVE_BACK_FORWARD': -1},
pygame.K_a: {'STRAFE_LEFT_RIGHT': -1},
pygame.K_d: {'STRAFE_LEFT_RIGHT': 1},
pygame.K_UP: {'LOOK_DOWN_UP': -1},
pygame.K_DOWN: {'LOOK_DOWN_UP': 1},
pygame.K_LEFT: {'LOOK_LEFT_RIGHT': -1},
pygame.K_RIGHT: {'LOOK_LEFT_RIGHT': 1},
pygame.K_i: {'LOOK_DOWN_UP': -1},
pygame.K_k: {'LOOK_DOWN_UP': 1},
pygame.K_j: {'LOOK_LEFT_RIGHT': -1},
pygame.K_l: {'LOOK_LEFT_RIGHT': 1},
pygame.K_y: {'HAND_ROTATE_AROUND_FORWARD': 1},
pygame.K_r: {'HAND_ROTATE_AROUND_FORWARD': -1},
pygame.K_t: {'HAND_ROTATE_AROUND_RIGHT': -1},
pygame.K_g: {'HAND_ROTATE_AROUND_RIGHT': 1},
pygame.K_f: {'HAND_ROTATE_AROUND_UP': -1},
pygame.K_h: {'HAND_ROTATE_AROUND_UP': 1},
pygame.K_b: {'HAND_PUSH_PULL': 10},
pygame.K_v: {'HAND_PUSH_PULL': -10},
pygame.K_SPACE: {'HAND_GRIP': 1},
} # pyformat: disable
_NO_ACTION = {
'MOVE_BACK_FORWARD': 0,
'STRAFE_LEFT_RIGHT': 0,
'LOOK_LEFT_RIGHT': 0,
'LOOK_DOWN_UP': 0,
'HAND_ROTATE_AROUND_FORWARD': 0,
'HAND_ROTATE_AROUND_RIGHT': 0,
'HAND_ROTATE_AROUND_UP': 0,
'HAND_PUSH_PULL': 0,
'HAND_GRIP': 0
}
def _grab_mouse(grab=True):
pygame.event.set_grab(grab)
pygame.mouse.set_visible(not grab)
def main(_):
pygame.init()
try:
pygame.mixer.quit()
except NotImplementedError:
pass
pygame.display.set_caption('Hard Eight Human Agent')
env_settings = dm_hard_eight.EnvironmentSettings(
seed=FLAGS.seed, level_name=FLAGS.level_name)
with dm_hard_eight.load_from_docker(name=FLAGS.docker_image_name,
settings=env_settings) as env:
screen = pygame.display.set_mode(
(int(FLAGS.screen_size[0]), int(FLAGS.screen_size[1])))
rgb_spec = env.observation_spec()['RGB_INTERLEAVED']
surface = pygame.Surface((rgb_spec.shape[1], rgb_spec.shape[0]))
score = 0
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
return
if event.key == pygame.K_ESCAPE:
_grab_mouse(not pygame.event.get_grab())
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == _LEFT_BUTTON:
_grab_mouse()
actions = _NO_ACTION.copy()
keys = pygame.key.get_pressed()
for key, key_actions in _KEYS_TO_ACTION.items():
if not keys[key]:
continue
for name, action in key_actions.items():
actions[name] = action
if pygame.event.get_grab():
left_button_pressed, _, _ = pygame.mouse.get_pressed()
if left_button_pressed:
actions['HAND_GRIP'] = 1
x, y = pygame.mouse.get_rel()
actions['LOOK_LEFT_RIGHT'] = _MOUSE_SENSITIVITY * x
actions['LOOK_DOWN_UP'] = _MOUSE_SENSITIVITY * y
timestep = env.step(actions)
frame = np.swapaxes(timestep.observation['RGB_INTERLEAVED'], 0, 1)
pygame.surfarray.blit_array(surface, frame)
pygame.transform.smoothscale(surface, screen.get_size(), screen)
info = pygame.display.Info()
rect_x = (info.current_w - _CURSOR_SIZE) // 2
rect_y = (info.current_h - _CURSOR_SIZE) // 2
fill_rectangle = 0
pygame.draw.rect(screen, _CURSOR_COLOR,
(rect_x, rect_y, _CURSOR_SIZE, _CURSOR_SIZE),
fill_rectangle)
pygame.display.update()
if timestep.reward:
score += timestep.reward
logging.info('Total score: %1.1f, reward: %1.1f', score,
timestep.reward)
clock.tick(_FRAMES_PER_SECOND)
if __name__ == '__main__':
app.run(main)
|
dm_hard_eight-master
|
examples/human_agent.py
|
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python utility functions for loading DeepMind Hard Eight Tasks."""
import codecs
import collections
import json
import os
import re
import subprocess
import time
import typing
from absl import logging
import dm_env
import docker
import grpc
import numpy as np
import portpicker
from dm_env_rpc.v1 import connection as dm_env_rpc_connection
from dm_env_rpc.v1 import dm_env_adaptor
from dm_env_rpc.v1 import dm_env_rpc_pb2
from dm_env_rpc.v1 import error
from dm_env_rpc.v1 import tensor_utils
# Maximum number of times to attempt gRPC connection.
_MAX_CONNECTION_ATTEMPTS = 10
# Port to expect the docker environment to internally listen on.
_DOCKER_INTERNAL_GRPC_PORT = 10000
_DEFAULT_DOCKER_IMAGE_NAME = 'gcr.io/deepmind-environments/dm_hard_eight:v1.0.1'
_HARD_EIGHT_TASK_OBSERVATIONS = ('RGB_INTERLEAVED', 'ACCELERATION',
'HAND_FORCE', 'HAND_IS_HOLDING',
'HAND_DISTANCE', 'Score')
HARD_EIGHT_TASK_LEVEL_NAMES = frozenset((
'ball_room_navigation_cubes',
'drawbridge_max3fork_safe_randomwall',
'reach_wall',
'reach_wall_require_stack',
'sensorhidden_color_novary_greywall',
'slot_colored_block',
'throw_across',
'use_object_knock_down',
))
_ConnectionDetails = collections.namedtuple('_ConnectionDetails',
['channel', 'connection', 'specs'])
class _HardEightTasksEnv(dm_env_adaptor.DmEnvAdaptor):
"""An implementation of dm_env_rpc.DmEnvAdaptor for hard eight tasks."""
def __init__(self, connection_details, requested_observations,
num_action_repeats):
super(_HardEightTasksEnv,
self).__init__(connection_details.connection,
connection_details.specs, requested_observations)
self._channel = connection_details.channel
self._num_action_repeats = num_action_repeats
def close(self):
super(_HardEightTasksEnv, self).close()
self._channel.close()
def step(self, action):
"""Implementation of dm_env.step that supports repeated actions."""
timestep = None
discount = None
reward = None
for _ in range(self._num_action_repeats):
next_timestep = super(_HardEightTasksEnv, self).step(action)
# Accumulate reward per timestep.
if next_timestep.reward is not None:
reward = (reward or 0.) + next_timestep.reward
# Calculate the product for discount.
if next_timestep.discount is not None:
discount = discount if discount else []
discount.append(next_timestep.discount)
timestep = dm_env.TimeStep(next_timestep.step_type, reward,
# Note: np.product(None) returns None.
np.product(discount),
next_timestep.observation)
if timestep.last():
return timestep
return timestep
class _HardEightTasksContainerEnv(_HardEightTasksEnv):
"""An implementation of _HardEightTasksEnv.
Ensures that the provided Docker container is closed on exit.
"""
def __init__(self, connection_details, requested_observations,
num_action_repeats, container):
super(_HardEightTasksContainerEnv,
self).__init__(connection_details, requested_observations,
num_action_repeats)
self._container = container
def close(self):
super(_HardEightTasksContainerEnv, self).close()
try:
self._container.kill()
except docker.errors.NotFound:
pass # Ignore, container has already been closed.
class _HardEightTasksProcessEnv(_HardEightTasksEnv):
"""An implementation of _HardEightTasksEnv.
Ensure that the provided running process is closed on exit.
"""
def __init__(self, connection_details, requested_observations,
num_action_repeats, process):
super(_HardEightTasksProcessEnv,
self).__init__(connection_details, requested_observations,
num_action_repeats)
self._process = process
def close(self):
super(_HardEightTasksProcessEnv, self).close()
self._process.terminate()
self._process.wait()
def _check_grpc_channel_ready(channel):
"""Helper function to check the gRPC channel is ready N times."""
for _ in range(_MAX_CONNECTION_ATTEMPTS - 1):
try:
return grpc.channel_ready_future(channel).result(timeout=1)
except grpc.FutureTimeoutError:
pass
return grpc.channel_ready_future(channel).result(timeout=1)
def _can_send_message(connection):
"""Returns if `connection` is healthy and able to process requests."""
try:
# This should return a response with an error unless the server isn't yet
# receiving requests.
connection.send(dm_env_rpc_pb2.StepRequest())
except error.DmEnvRpcError:
return True
except grpc.RpcError:
return False
def _create_channel_and_connection(port):
"""Returns a tuple of `(channel, connection)`."""
for _ in range(_MAX_CONNECTION_ATTEMPTS):
channel = grpc.secure_channel('localhost:{}'.format(port),
grpc.local_channel_credentials())
_check_grpc_channel_ready(channel)
connection = dm_env_rpc_connection.Connection(channel)
if _can_send_message(connection):
break
else:
# A gRPC server running within Docker sometimes reports that the channel
# is ready but transitively returns an error (status code 14) on first
# use. Giving the server some time to breath and retrying often fixes the
# problem.
connection.close()
channel.close()
time.sleep(1.0)
return channel, connection
def _parse_exception_message(message):
"""Returns a human-readable version of a dm_env_rpc json error message."""
try:
match = re.match(r'^message\:\ \"(.*)\"$', message)
json_data = codecs.decode(match.group(1), 'unicode-escape')
parsed_json_data = json.loads(json_data)
return ValueError(json.dumps(parsed_json_data, indent=4))
except: # pylint: disable=bare-except
return message
def _wrap_send(send):
"""Wraps `send` in order to reformat exceptions."""
try:
return send()
except ValueError as e:
e.args = [_parse_exception_message(e.args[0])]
raise
def _connect_to_environment(port, settings):
"""Helper function for connecting to a running dm_hard_eight environment."""
if settings.level_name not in HARD_EIGHT_TASK_LEVEL_NAMES:
raise ValueError(
'Level named "{}" is not supported for dm_hard_eight'.format(
settings.level_name))
channel, connection = _create_channel_and_connection(port)
original_send = connection.send
connection.send = lambda request: _wrap_send(lambda: original_send(request))
world_name = connection.send(
dm_env_rpc_pb2.CreateWorldRequest(
settings={
'seed': tensor_utils.pack_tensor(settings.seed),
'episodeId': tensor_utils.pack_tensor(0),
'levelName': tensor_utils.pack_tensor(settings.level_name),
})).world_name
join_world_settings = {
'width':
tensor_utils.pack_tensor(settings.width),
'height':
tensor_utils.pack_tensor(settings.height),
}
specs = connection.send(
dm_env_rpc_pb2.JoinWorldRequest(
world_name=world_name, settings=join_world_settings)).specs
return _ConnectionDetails(channel=channel, connection=connection, specs=specs)
class EnvironmentSettings(typing.NamedTuple):
"""Collection of settings used to start a specific Hard Eight task.
Required attributes:
seed: Seed to initialize the environment's RNG.
level_name: Name of the level to load.
Optional attributes:
width: Width (in pixels) of the desired RGB observation; defaults to 96.
height: Height (in pixels) of the desired RGB observation; defaults to 72.
num_action_repeats: Number of times to step the environment with the
provided action in calls to `step()`.
"""
seed: int
level_name: str
width: int = 96
height: int = 72
num_action_repeats: int = 1
def _validate_environment_settings(settings):
"""Helper function to validate the provided environment settings."""
if settings.num_action_repeats <= 0:
raise ValueError('num_action_repeats must have a positive value.')
if settings.width <= 0 or settings.height <= 0:
raise ValueError('width and height must have a positive value.')
def load_from_disk(path, settings):
"""Load Hard Eight Tasks from disk.
Args:
path: Directory containing dm_hard_eight environment.
settings: EnvironmentSettings required to start the environment.
Returns:
An implementation of dm_env.Environment.
Raises:
RuntimeError: If unable to start environment process.
"""
_validate_environment_settings(settings)
executable_path = os.path.join(path, 'Linux64Player')
libosmesa_path = os.path.join(path, 'external_libosmesa_llvmpipe.so')
if not os.path.exists(executable_path) or not os.path.exists(libosmesa_path):
raise RuntimeError(
'Cannot find dm_hard_eight executable or dependent files at path: {}'
.format(path))
port = portpicker.pick_unused_port()
process_flags = [
executable_path,
# Unity command-line flags.
'-logfile',
'-batchmode',
'-noaudio',
# Other command-line flags.
'--logtostderr',
'--server_type=DM_ENV_RPC',
'--uri_address=[::]:{}'.format(port),
]
os.environ.update({
'UNITY_RENDERER': 'software',
'UNITY_OSMESA_PATH': libosmesa_path,
})
process = subprocess.Popen(
process_flags, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if process.poll() is not None:
raise RuntimeError('Failed to start dm_hard_eight process correctly.')
return _HardEightTasksProcessEnv(
_connect_to_environment(port, settings), _HARD_EIGHT_TASK_OBSERVATIONS,
settings.num_action_repeats, process)
def load_from_docker(settings, name=None):
"""Load Hard Eight Tasks from docker container.
Args:
settings: EnvironmentSettings required to start the environment.
name: Optional name of Docker image that contains the dm_hard_eight
environment. If left unset, uses the dm_hard_eight default name.
Returns:
An implementation of dm_env.Environment
"""
_validate_environment_settings(settings)
name = name or _DEFAULT_DOCKER_IMAGE_NAME
client = docker.from_env()
port = portpicker.pick_unused_port()
try:
client.images.get(name)
except docker.errors.ImageNotFound:
logging.info('Downloading docker image "%s"...', name)
client.images.pull(name)
logging.info('Download finished.')
container = client.containers.run(
name,
auto_remove=True,
detach=True,
ports={_DOCKER_INTERNAL_GRPC_PORT: port})
return _HardEightTasksContainerEnv(
_connect_to_environment(port, settings), _HARD_EIGHT_TASK_OBSERVATIONS,
settings.num_action_repeats, container)
|
dm_hard_eight-master
|
dm_hard_eight/_load_environment.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Package version for dm_hard_eight.
Kept in separate file so it can be used during installation.
"""
__version__ = '1.0.0b1' # https://www.python.org/dev/peps/pep-0440/
|
dm_hard_eight-master
|
dm_hard_eight/_version.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_hard_eight.load_from_disk."""
from absl import flags
from absl.testing import absltest
from dm_env import test_utils
import dm_hard_eight
FLAGS = flags.FLAGS
flags.DEFINE_string('path', '',
'Directory that contains dm_hard_eight environment.')
class LoadFromDiskTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self, level_name='ball_room_navigation_cubes'):
return dm_hard_eight.load_from_disk(
FLAGS.path,
settings=dm_hard_eight.EnvironmentSettings(
seed=123, level_name=level_name))
def test_action_spec(self):
action_spec = set(self.environment.action_spec().keys())
expected_actions = {
'STRAFE_LEFT_RIGHT', 'MOVE_BACK_FORWARD', 'LOOK_LEFT_RIGHT',
'LOOK_DOWN_UP', 'HAND_ROTATE_AROUND_RIGHT', 'HAND_ROTATE_AROUND_UP',
'HAND_ROTATE_AROUND_FORWARD', 'HAND_PUSH_PULL', 'HAND_GRIP'
}
self.assertSetEqual(expected_actions, action_spec)
if __name__ == '__main__':
absltest.main()
|
dm_hard_eight-master
|
dm_hard_eight/load_from_disk_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python utilities for running dm_hard_eight."""
from dm_hard_eight import _load_environment
from dm_hard_eight._version import __version__
EnvironmentSettings = _load_environment.EnvironmentSettings
LEVEL_NAMES = _load_environment.HARD_EIGHT_TASK_LEVEL_NAMES
load_from_disk = _load_environment.load_from_disk
load_from_docker = _load_environment.load_from_docker
|
dm_hard_eight-master
|
dm_hard_eight/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_hard_eight.load_from_docker."""
from absl import flags
from absl.testing import absltest
from dm_env import test_utils
import dm_hard_eight
FLAGS = flags.FLAGS
flags.DEFINE_string(
'docker_image_name', None,
'Name of the Docker image that contains the Hard Eight Tasks. '
'If None, uses the default dm_hard_eight name')
class LoadFromDockerTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self, level_name='ball_room_navigation_cubes'):
return dm_hard_eight.load_from_docker(
name=FLAGS.docker_image_name,
settings=dm_hard_eight.EnvironmentSettings(
seed=123, level_name=level_name))
def test_action_spec(self):
action_spec = set(self.environment.action_spec().keys())
expected_actions = {
'STRAFE_LEFT_RIGHT', 'MOVE_BACK_FORWARD', 'LOOK_LEFT_RIGHT',
'LOOK_DOWN_UP', 'HAND_ROTATE_AROUND_RIGHT', 'HAND_ROTATE_AROUND_UP',
'HAND_ROTATE_AROUND_FORWARD', 'HAND_PUSH_PULL', 'HAND_GRIP'
}
self.assertSetEqual(expected_actions, action_spec)
if __name__ == '__main__':
absltest.main()
|
dm_hard_eight-master
|
dm_hard_eight/load_from_docker_test.py
|
"""Arnheim 3 - Collage Creator
Piotr Mirowski, Dylan Banarse, Mateusz Malinowski, Yotam Doron, Oriol Vinyals,
Simon Osindero, Chrisantha Fernando
DeepMind, 2021-2022
Copyright 2021 DeepMind Technologies Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import configargparse
from datetime import datetime
import glob
import os
import pathlib
import subprocess
import sys
import yaml
import numpy as np
import torch
import clip
import src.collage as collage
import src.video_utils as video_utils
# Specify (and override) the config.
ap = configargparse.ArgumentParser(default_config_files=["configs/config.yaml"])
ap.add_argument("-c", "--config", required=True, is_config_file=True,
help="Config file")
# Use CUDA?
ap.add_argument("--cuda", dest="cuda", action="store_true")
ap.add_argument("--no-cuda", dest="cuda", action="store_false")
ap.set_defaults(cuda=True)
ap.add_argument("--torch_device", type=str, default="cuda",
help="Alternative way of specifying the device: cuda or cpu?")
# Output directory.
ap.add_argument("--init_checkpoint", type=str, default="",
help="Path to checkpoint")
# Output directory.
ap.add_argument("--output_dir", type=str, default="",
help="Output directory")
# Clean-up?
ap.add_argument("--clean_up", dest='clean_up', help="Remove all working files",
action='store_true')
ap.add_argument("--no-clean_up", dest='clean_up',
help="Remove all working files", action='store_false')
ap.set_defaults(clean_up=False)
# GUI?
ap.add_argument('--gui', dest='gui', action='store_true')
ap.add_argument('--no-gui', dest='gui', action='store_false')
ap.set_defaults(gui=False)
# Video and tracing.
ap.add_argument("--video_steps", type=int, default=0,
help="Number of steps between two video frames")
ap.add_argument("--trace_every", type=int, default=50,
help="Number of steps between two logging traces")
ap.add_argument('--population_video', dest='population_video',
action='store_true', help='Write the video of population?')
ap.add_argument('--no-population_video', dest='population_video',
action='store_false', help='Write the video of population?')
ap.set_defaults(population_video=False)
# Canvas size.
ap.add_argument("--canvas_width", type=int, default=224,
help="Image width for CLIP optimization")
ap.add_argument("--canvas_height", type=int, default=224,
help="Image height for CLIP optimization")
ap.add_argument("--max_block_size_high_res", type=int, default=2000,
help="Max block size for high-res image")
# Render methods.
ap.add_argument("--render_method", type=str, default="transparency",
help="opacity patches overlay each other using combinations of "
"alpha and depth, transparency _adds_ patch RGB values (black "
"therefore appearing transparent), masked_transparency_clipped "
"and masked_transparency_normed blend patches using the alpha "
"channel")
ap.add_argument("--num_patches", type=int, default=100,
help="Number of patches")
ap.add_argument("--colour_transformations", type=str, default="RGB space",
help="Can be none, RGB space or HHSV space")
ap.add_argument("--invert_colours", dest="invert_colours", action='store_true',
help="Invert image colours to have a white background?")
ap.add_argument("--no-invert_colours", dest="invert_colours",
action='store_false',
help="Invert image colours to have a white background?")
ap.set_defaults(invert_colours=False)
ap.add_argument("--high_res_multiplier", type=int, default=4,
help="Ratio between large canvas and CLIP-optimized canvas")
ap.add_argument('--save_all_arrays', dest='save_all_arrays',
action='store_true',
help='Save the optimised patch arrays as an npy file?')
ap.add_argument('--no-save_all_arrays', dest='save_all_arrays',
action='store_false',
help='Save the optimised patch arrays as an npy file?')
ap.set_defaults(save_all_arrays=False)
# Affine transform settings.
ap.add_argument("--min_trans", type=float, default=-1.,
help="Translation min for X and Y")
ap.add_argument("--max_trans", type=float, default=1.,
help="Translation max for X and Y")
ap.add_argument("--min_trans_init", type=float, default=-1.,
help="Initial translation min for X and Y")
ap.add_argument("--max_trans_init", type=float, default=1.,
help="Initial translation max for X and Y")
ap.add_argument("--min_scale", type=float, default=1.,
help="Scale min (> 1 means zoom out and < 1 means zoom in)")
ap.add_argument("--max_scale", type=float, default=2.,
help="Scale max (> 1 means zoom out and < 1 means zoom in)")
ap.add_argument("--min_squeeze", type=float, default=0.5,
help="Min ratio between X and Y scale")
ap.add_argument("--max_squeeze", type=float, default=2.,
help="Max ratio between X and Y scale")
ap.add_argument("--min_shear", type=float, default=-0.2,
help="Min shear deformation")
ap.add_argument("--max_shear", type=float, default=0.2,
help="Max shear deformation")
ap.add_argument("--min_rot_deg", type=float, default=-180, help="Min rotation")
ap.add_argument("--max_rot_deg", type=float, default=180, help="Max rotation")
# Colour transform settings.
ap.add_argument("--min_rgb", type=float, default=-0.2,
help="Min RGB between -1 and 1")
ap.add_argument("--max_rgb", type=float, default=1.0,
help="Max RGB between -1 and 1")
ap.add_argument("--initial_min_rgb", type=float, default=0.5,
help="Initial min RGB between -1 and 1")
ap.add_argument("--initial_max_rgb", type=float, default=1.,
help="Initial max RGB between -1 and 1")
ap.add_argument("--min_hue_deg", type=float, default=0.,
help="Min hue between 0 and 360")
ap.add_argument("--max_hue_deg", type=float, default=360,
help="Max hue (in degrees) between 0 and 360")
ap.add_argument("--min_sat", type=float, default=0,
help="Min saturation between 0 and 1")
ap.add_argument("--max_sat", type=float, default=1,
help="Max saturation between 0 and 1")
ap.add_argument("--min_val", type=float, default=0,
help="Min value between 0 and 1")
ap.add_argument("--max_val", type=float, default=1,
help="Max value between 0 and 1")
# Training settings.
ap.add_argument("--clip_model", type=str, default="ViT-B/32", help="CLIP model")
ap.add_argument("--optim_steps", type=int, default=10000,
help="Number of training steps (between 0 and 20000)")
ap.add_argument("--learning_rate", type=float, default=0.1,
help="Learning rate, typically between 0.05 and 0.3")
ap.add_argument("--use_image_augmentations", dest="use_image_augmentations",
action='store_true',
help="User image augmentations for CLIP evaluation?")
ap.add_argument("--no-use_image_augmentations", dest="use_image_augmentations",
action='store_false',
help="User image augmentations for CLIP evaluation?")
ap.set_defaults(use_image_augmentations=True)
ap.add_argument("--num_augs", type=int, default=4,
help="Number of image augmentations to use in CLIP evaluation")
ap.add_argument("--use_normalized_clip", dest="use_normalized_clip",
action='store_true',
help="Normalize colours for CLIP, generally leave this as True")
ap.add_argument("--no-use_normalized_clip", dest="use_normalized_clip",
action='store_false',
help="Normalize colours for CLIP, generally leave this as True")
ap.set_defaults(use_normalized_clip=False)
ap.add_argument("--gradient_clipping", type=float, default=10.0,
help="Gradient clipping during optimisation")
ap.add_argument("--initial_search_size", type=int, default=1,
help="Initial random search size (1 means no search)")
ap.add_argument("--initial_search_num_steps", type=int, default=1,
help="Number of gradient steps in initial random search size "
"(1 means only random search, more means gradient descent)")
# Evolution settings.
ap.add_argument("--pop_size", type=int, default=2,
help="For evolution set this to greater than 1")
ap.add_argument("--evolution_frequency", type=int, default= 100,
help="Number of gradient steps between two evolution mutations")
ap.add_argument("--ga_method", type=str, default="Microbial",
help="Microbial: loser of randomly selected pair is replaced "
"by mutated winner. A low selection pressure. Evolutionary "
"Strategies: mutantions of the best individual replace the "
"rest of the population. Much higher selection pressure than "
"Microbial GA")
# Mutation levels.
ap.add_argument("--pos_and_rot_mutation_scale", type=float, default=0.02,
help="Probability of position and rotation mutations")
ap.add_argument("--scale_mutation_scale", type=float, default=0.02,
help="Probability of scale mutations")
ap.add_argument("--distort_mutation_scale", type=float, default=0.02,
help="Probability of distortion mutations")
ap.add_argument("--colour_mutation_scale", type=float, default=0.02,
help="Probability of colour mutations")
ap.add_argument("--patch_mutation_probability", type=float, default=1,
help="Probability of patch mutations")
# Visualisation.
ap.add_argument("--max_multiple_visualizations", type=int, default=5,
help="Limit the number of individuals shown during training")
# Load segmented patches.
ap.add_argument("--multiple_patch_set", default=None,
action='append', dest="multiple_patch_set")
ap.add_argument("--multiple_fixed_scale_patches", default=None,
action='append', dest="multiple_fixed_scale_patches")
ap.add_argument("--multiple_patch_max_proportion", default=None,
action='append', dest="multiple_patch_max_proportion")
ap.add_argument("--multiple_fixed_scale_coeff", default=None,
action='append', dest="multiple_fixed_scale_coeff")
ap.add_argument("--patch_set", type=str, default="animals.npy",
help="Name of Numpy file with patches")
ap.add_argument("--patch_repo_root", type=str,
default=
"https://storage.googleapis.com/dm_arnheim_3_assets/collage_patches",
help="URL to patches")
ap.add_argument("--url_to_patch_file", type=str, default="",
help="URL to a patch file")
# Resize image patches to low- and high-res.
ap.add_argument("--fixed_scale_patches", dest="fixed_scale_patches",
action='store_true', help="Use fixed scale patches?")
ap.add_argument("--no-fixed_scale_patches", dest="fixed_scale_patches",
action='store_false', help="Use fixed scale patches?")
ap.set_defaults(fixed_scale_patches=True)
ap.add_argument("--fixed_scale_coeff", type=float, default=0.7,
help="Scale coeff for fixed scale patches")
ap.add_argument("--normalize_patch_brightness",
dest="normalize_patch_brightness", action='store_true',
help="Normalize the brightness of patches?")
ap.add_argument("--no-normalize_patch_brightness",
dest="normalize_patch_brightness", action='store_false',
help="Normalize the brightness of patches?")
ap.set_defaults(normalize_patch_brightness=False)
ap.add_argument("--patch_max_proportion", type=int, default= 5,
help="Max proportion of patches, between 2 and 8")
ap.add_argument("--patch_width_min", type=int, default=16,
help="Min width of patches")
ap.add_argument("--patch_height_min", type=int, default=16,
help="Min height of patches")
# Configure a background, e.g. uploaded picture or solid colour.
ap.add_argument("--background_use", type=str, default="Global",
help="Global: use image across whole image, "
"or Local: reuse same image for every tile")
ap.add_argument("--background_url", type=str, default="",
help="URL for background image")
ap.add_argument("--background_red", type=int, default=0,
help="Red solid colour background (0 to 255)")
ap.add_argument("--background_green", type=int, default=0,
help="Green solid colour background (0 to 255)")
ap.add_argument("--background_blue", type=int, default=0,
help="Blue solid colour background (0 to 255)")
# Configure image prompt and content.
ap.add_argument("--global_prompt", type=str,
default="Roman mosaic of an unswept floor",
help="Global description of the image")
# Tile prompts and tiling settings.
ap.add_argument("--tile_images", action='store_true', dest="tile_images",
help="Tile images?")
ap.add_argument("--no-tile_images", action='store_false', dest="tile_images",
help="Tile images?")
ap.set_defaults(tile_images=False)
ap.add_argument("--tiles_wide", type=int, default=1,
help="Number of width tiles")
ap.add_argument("--tiles_high", type=int, default=1,
help="Number of height tiles")
ap.add_argument("--global_tile_prompt", dest="global_tile_prompt",
action='store_true',
help="Global tile prompt uses global_prompt (previous cell) "
"for *all* tiles (e.g. Roman mosaic of an unswept floor)")
ap.add_argument("--no-global_tile_prompt", dest="global_tile_prompt",
action='store_false',
help="Global tile prompt uses global_prompt (previous cell) "
"for *all* tiles (e.g. Roman mosaic of an unswept floor)")
ap.set_defaults(global_tile_prompt=False)
ap.add_argument("--tile_prompt_string", type=str, default="",
help="Otherwise, specify multiple tile prompts with columns "
"separated by | and / to delineate new row. E.g. multiple "
"prompts for a 3x2 'landscape' image: "
"'sun | clouds | sky / fields | fields | trees'")
# Composition prompts.
ap.add_argument("--compositional_image", dest="compositional_image",
action="store_true",
help="Use additional prompts for different regions")
ap.add_argument("--no-compositional_image", dest="compositional_image",
action="store_false",
help="Do not use additional prompts for different regions")
ap.set_defaults(compositional_image=False)
# Single image (i.e. no tiling) composition prompts:
# specify 3x3 prompts for each composition region.
ap.add_argument("--prompt_x0_y0", type=str,
default="a photorealistic sky with sun", help="Top left prompt")
ap.add_argument("--prompt_x1_y0", type=str,
default="a photorealistic sky", help="Top centre prompt")
ap.add_argument("--prompt_x2_y0", type=str,
default="a photorealistic sky with moon", help="Top right prompt")
ap.add_argument("--prompt_x0_y1", type=str,
default="a photorealistic tree", help="Middle left prompt")
ap.add_argument("--prompt_x1_y1", type=str,
default="a photorealistic tree", help="Centre prompt")
ap.add_argument("--prompt_x2_y1", type=str,
default="a photorealistic tree", help="Middle right prompt")
ap.add_argument("--prompt_x0_y2", type=str,
default="a photorealistic field", help="Bottom left prompt")
ap.add_argument("--prompt_x1_y2", type=str,
default="a photorealistic field", help="Bottom centre prompt")
ap.add_argument("--prompt_x2_y2", type=str,
default="a photorealistic chicken", help="Bottom right prompt")
# Tile composition prompts.
ap.add_argument("--tile_prompt_formating", type=str, default="close-up of {}",
help="This string is formated to autogenerate region prompts "
"from tile prompt. e.g. close-up of {}")
# Get the config.
config = vars(ap.parse_args())
print(config)
# Adjust config for compositional image.
if config["compositional_image"] == True:
print("Generating compositional image")
config['canvas_width'] *= 2
config['canvas_height'] *= 2
config['high_res_multiplier'] = int(config['high_res_multiplier'] / 2)
print("Using one image augmentations for compositional image creation.")
config["use_image_augmentations"] = True
config["num_augs"] = 1
# Turn off tiling if either boolean is set or width/height set to 1.
if (not config["tile_images"] or
(config["tiles_wide"] == 1 and config["tiles_high"] == 1)):
print("No tiling.")
config["tiles_wide"] = 1
config["tiles_high"] = 1
config["tile_images"] = False
# Default output dir.
if len(config["output_dir"]) == 0:
config["output_dir"] = "output_"
config["output_dir"] += datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
config["output_dir"] += '/'
# Print the config.
print("\n")
yaml.dump(config, sys.stdout, default_flow_style=False, allow_unicode=True)
print("\n\n")
# Configure CUDA.
print("Torch version:", torch.__version__)
if not config["cuda"] or config["torch_device"] == "cpu":
config["torch_device"] = "cpu"
config["cuda"] = False
device = torch.device(config["torch_device"])
# Configure ffmpeg.
os.environ["FFMPEG_BINARY"] = "ffmpeg"
# Initialise and load CLIP model.
print(f"Downloading CLIP model {config['clip_model']}...")
clip_model, _ = clip.load(config["clip_model"], device, jit=False)
# Make output dir.
output_dir = config["output_dir"]
print(f"Storing results in {output_dir}\n")
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
# Save the config.
config_filename = config["output_dir"] + '/' + "config.yaml"
with open(config_filename, "w") as f:
yaml.dump(config, f, default_flow_style=False, allow_unicode=True)
# Tiling.
if not config["tile_images"] or config["global_tile_prompt"]:
tile_prompts = (
[config["global_prompt"]] * config["tiles_high"] * config["tiles_wide"])
else:
tile_prompts = []
count_y = 0
count_x = 0
for row in config["tile_prompt_string"].split("/"):
for prompt in row.split("|"):
prompt = prompt.strip()
tile_prompts.append(prompt)
count_x += 1
if count_x != config["tiles_wide"]:
w = config["tiles_wide"]
raise ValueError(
f"Insufficient prompts for row {count_y}; expected {w}, got {count_x}")
count_x = 0
count_y += 1
if count_y != config["tiles_high"]:
h = config["tiles_high"]
raise ValueError(f"Insufficient prompt rows; expected {h}, got {count_y}")
print("Tile prompts: ", tile_prompts)
# Prepare duplicates of config data if required for tiles.
tile_count = 0
all_prompts = []
for y in range(config["tiles_high"]):
for x in range(config["tiles_wide"]):
list_tile_prompts = []
if config["compositional_image"]:
if config["tile_images"]:
list_tile_prompts = [
config["tile_prompt_formating"].format(tile_prompts[tile_count])
] * 9
else:
list_tile_prompts = [
config["prompt_x0_y0"], config["prompt_x1_y0"],
config["prompt_x2_y0"],
config["prompt_x0_y1"], config["prompt_x1_y1"],
config["prompt_x2_y1"],
config["prompt_x0_y2"], config["prompt_x1_y2"],
config["prompt_x2_y2"]]
list_tile_prompts.append(tile_prompts[tile_count])
tile_count += 1
all_prompts.append(list_tile_prompts)
print(f"All prompts: {all_prompts}")
# Background.
background_image = None
background_url = config["background_url"]
if len(background_url) > 0:
# Load background image from URL.
if background_url.startswith("http"):
background_image = video_utils.cached_url_download(background_url,
format="image_as_np")
else:
background_image = video_utils.load_image(background_url,
show=config["gui"])
else:
background_image = np.ones((10, 10, 3), dtype=np.float32)
background_image[:, :, 0] = config["background_red"] / 255.
background_image[:, :, 1] = config["background_green"] / 255.
background_image[:, :, 2] = config["background_blue"] / 255.
print('Defined background colour ({}, {}, {})'.format(
config["background_red"], config["background_green"],
config["background_blue"]))
# Initialse the collage.
ct = collage.CollageTiler(
prompts=all_prompts,
fixed_background_image=background_image,
clip_model=clip_model,
device=device,
config=config)
ct.initialise()
# Collage optimisation loop.
output = ct.loop()
# Render high res image and finish up.
ct.assemble_tiles()
# Clean-up temporary files.
if config["clean_up"]:
for file_match in ["*.npy", "tile_*.png"]:
output_dir = config["output_dir"]
files = glob.glob(f"{output_dir}/{file_match}")
for f in files:
os.remove(f)
|
arnheim-main
|
arnheim_3/main.py
|
"""Collage network definition.
Arnheim 3 - Collage
Piotr Mirowski, Dylan Banarse, Mateusz Malinowski, Yotam Doron, Oriol Vinyals,
Simon Osindero, Chrisantha Fernando
DeepMind, 2021-2022
Copyright 2021 DeepMind Technologies Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
from . import rendering
from . import transformations
import numpy as np
import torch
class PopulationCollage(torch.nn.Module):
"""Population-based segmentation collage network.
Image structure in this class is SCHW.
"""
def __init__(self,
config,
device,
pop_size=1,
is_high_res=False,
segmented_data=None,
background_image=None):
"""Constructor, relying on global parameters."""
super(PopulationCollage, self).__init__()
# Config, device, number of patches and population size.
self.config = config
self.device = device
self._canvas_height = config["canvas_height"]
self._canvas_width = config["canvas_width"]
self._high_res_multiplier = config["high_res_multiplier"]
self._num_patches = self.config["num_patches"]
self._pop_size = pop_size
requires_grad = not is_high_res
# Create the spatial transformer and colour transformer for patches.
self.spatial_transformer = transformations.PopulationAffineTransforms(
config, device, num_patches=self._num_patches, pop_size=pop_size,
requires_grad=requires_grad, is_high_res=is_high_res)
if self.config["colour_transformations"] == "HSV space":
self.colour_transformer = transformations.PopulationColourHSVTransforms(
config, device, num_patches=self._num_patches, pop_size=pop_size,
requires_grad=requires_grad)
elif self.config["colour_transformations"] == "RGB space":
self.colour_transformer = transformations.PopulationColourRGBTransforms(
config, device, num_patches=self._num_patches, pop_size=pop_size,
requires_grad=requires_grad)
else:
self.colour_transformer = transformations.PopulationOrderOnlyTransforms(
config, device, num_patches=self._num_patches, pop_size=pop_size,
requires_grad=requires_grad)
if config["torch_device"] == "cuda":
self.spatial_transformer = self.spatial_transformer.cuda()
self.colour_transformer = self.colour_transformer.cuda()
self.coloured_patches = None
# Optimisation is run in low-res, final rendering is in high-res.
self._high_res = is_high_res
# Store the background image (low- and high-res).
self.background_image = background_image
if self.background_image is not None:
print(f"Background image of size {self.background_image.shape}")
# Store the dataset (low- and high-res).
self._dataset = segmented_data
# print(f"There are {len(self._dataset)} image patches in the dataset")
# Initial set of indices pointing to self._num_patches first dataset images.
self.patch_indices = [np.arange(self._num_patches) % len(self._dataset)
for _ in range(pop_size)]
# Patches in low and high-res, will be initialised on demand.
self.patches = None
def store_patches(self, population_idx=None):
"""Store the image patches for each population element."""
if self._high_res:
for _ in range(20):
print("NOT STORING HIGH-RES PATCHES")
return
if population_idx is not None and self.patches is not None:
list_indices_population = [population_idx]
self.patches[population_idx, :, :4, :, :] = 0
else:
list_indices_population = np.arange(self._pop_size)
self.patches = torch.zeros(
self._pop_size, self._num_patches, 5, self._canvas_height,
self._canvas_width).to(self.device)
# Put the segmented data into the patches.
for i in list_indices_population:
for j in range(self._num_patches):
patch_i_j = self._fetch_patch(i, j, self._high_res)
self.patches[i, j, ...] = patch_i_j
def _fetch_patch(self, idx_population, idx_patch, is_high_res):
"""Helper function to fetch a patch and store on the whole canvas."""
k = self.patch_indices[idx_population][idx_patch]
patch_j = torch.tensor(
self._dataset[k].swapaxes(0, 2) / 255.0).to(self.device)
width_j = patch_j.shape[1]
height_j = patch_j.shape[2]
if is_high_res:
w0 = int((self._canvas_width * self._high_res_multiplier - width_j)
/ 2.0)
h0 = int((self._canvas_height * self._high_res_multiplier - height_j)
/ 2.0)
mapped_patch = torch.zeros(
5,
self._canvas_height * self._high_res_multiplier,
self._canvas_width * self._high_res_multiplier
).to("cpu")
else:
w0 = int((self._canvas_width - width_j) / 2.0)
h0 = int((self._canvas_height - height_j) / 2.0)
mapped_patch = torch.zeros(
5, self._canvas_height, self._canvas_width).to(self.device)
mapped_patch[4, :, :] = 1.0
mapped_patch[:4, w0:(w0 + width_j), h0:(h0 + height_j)] = patch_j
return mapped_patch
def copy_and_mutate_s(self, parent, child):
with torch.no_grad():
# Copy the patches indices from the parent to the child.
self.patch_indices[child] = copy.deepcopy(self.patch_indices[parent])
# Mutate the child patches with a single swap from the original dataset.
if self.config["patch_mutation_probability"] > np.random.uniform():
idx_dataset = np.random.randint(len(self._dataset))
idx_patch = np.random.randint(self._num_patches)
self.patch_indices[child][idx_patch] = idx_dataset
# Update all the patches for the child.
self.store_patches(child)
self.spatial_transformer.copy_and_mutate_s(parent, child)
self.colour_transformer.copy_and_mutate_s(parent, child)
def copy_from(self, other, idx_to, idx_from):
"""Copy parameters from other collage generator, for selected indices."""
assert idx_to < self._pop_size
with torch.no_grad():
self.patch_indices[idx_to] = copy.deepcopy(other.patch_indices[idx_from])
self.spatial_transformer.copy_from(
other.spatial_transformer, idx_to, idx_from)
self.colour_transformer.copy_from(
other.colour_transformer, idx_to, idx_from)
if not self._high_res:
self.store_patches(idx_to)
def forward(self, params=None):
"""Input-less forward function."""
assert not self._high_res
if self.patches is None:
self.store_patches()
shifted_patches = self.spatial_transformer(self.patches)
background_image = self.background_image
if params is not None and "no_background" in params:
print("Not using background_image")
background_image = None
self.coloured_patches = self.colour_transformer(shifted_patches)
if self.config["render_method"] == "transparency":
img = rendering.population_render_transparency(
self.coloured_patches,
invert_colours=self.config["invert_colours"], b=background_image)
elif self.config["render_method"] == "masked_transparency_clipped":
img = rendering.population_render_masked_transparency(
self.coloured_patches, mode="clipped",
invert_colours=self.config["invert_colours"], b=background_image)
elif self.config["render_method"] == "masked_transparency_normed":
img = rendering.population_render_masked_transparency(
self.coloured_patches, mode="normed",
invert_colours=self.config["invert_colours"], b=background_image)
elif self.config["render_method"] == "opacity":
img = rendering.population_render_overlap(
self.coloured_patches,
invert_colours=self.config["invert_colours"], b=background_image)
else:
print("Unhandled render method")
if params is not None and "no_background" in params:
print("Setting alpha to zero outside of patches")
mask = self.coloured_patches[:, :, 3:4, :, :].sum(1) > 0
mask = mask.permute(0, 2, 3, 1)
img = torch.concat([img, mask], axis=-1)
return img
def forward_high_res(self, params=None):
"""Input-less forward function."""
assert self._high_res
max_render_size = params.get("max_block_size_high_res", 1000)
w = self._canvas_width * self._high_res_multiplier
h = self._canvas_height * self._high_res_multiplier
if (self._high_res_multiplier % 8 == 0 and
self._canvas_width * 8 < max_render_size and
self._canvas_height * 8 < max_render_size):
num_w = int(self._high_res_multiplier / 8)
num_h = int(self._high_res_multiplier / 8)
delta_w = self._canvas_width * 8
delta_h = self._canvas_height * 8
elif (self._high_res_multiplier % 4 == 0 and
self._canvas_width * 4 < max_render_size and
self._canvas_height * 4 < max_render_size):
num_w = int(self._high_res_multiplier / 4)
num_h = int(self._high_res_multiplier / 4)
delta_w = self._canvas_width * 4
delta_h = self._canvas_height * 4
elif (self._high_res_multiplier % 2 == 0 and
self._canvas_width * 2 < max_render_size and
self._canvas_height * 2 < max_render_size):
num_w = int(self._high_res_multiplier / 2)
num_h = int(self._high_res_multiplier / 2)
delta_w = self._canvas_width * 2
delta_h = self._canvas_height * 2
else:
num_w = self._high_res_multiplier
num_h = self._high_res_multiplier
delta_w = self._canvas_width
delta_h = self._canvas_height
img = torch.zeros((1, h, w, 4))
img[..., 3] = 1.0
background_image = self.background_image
if params is not None and "no_background" in params:
print("Not using background_image")
background_image = None
for u in range(num_w):
for v in range(num_h):
x0 = u * delta_w
x1 = (u + 1) * delta_w
y0 = v * delta_h
y1 = (v + 1) * delta_h
print(f"[{u}, {v}] idx [{x0}:{x1}], [{y0}:{y1}]")
# Extract full patches, apply spatial transform individually and crop.
shifted_patches_uv = []
for idx_patch in range(self._num_patches):
patch = self._fetch_patch(0, idx_patch, True).unsqueeze(0)
patch_uv = self.spatial_transformer(patch, idx_patch)
patch_uv = patch_uv[:, :, :, y0:y1, x0:x1]
shifted_patches_uv.append(patch_uv)
shifted_patches_uv = torch.cat(shifted_patches_uv, 1)
# Crop background?
if background_image is not None:
background_image_uv = background_image[:, y0:y1, x0:x1]
else:
background_image_uv = None
# Appy colour transform and render.
coloured_patches_uv = self.colour_transformer(shifted_patches_uv)
if self.config["render_method"] == "transparency":
img_uv = rendering.population_render_transparency(
coloured_patches_uv,
invert_colours=self.config["invert_colours"],
b=background_image_uv)
elif self.config["render_method"] == "masked_transparency_clipped":
img_uv = rendering.population_render_masked_transparency(
coloured_patches_uv, mode="clipped",
invert_colours=self.config["invert_colours"],
b=background_image_uv)
elif self.config["render_method"] == "masked_transparency_normed":
img_uv = rendering.population_render_masked_transparency(
coloured_patches_uv, mode="normed",
invert_colours=self.config["invert_colours"],
b=background_image_uv)
elif self.config["render_method"] == "opacity":
img_uv = rendering.population_render_overlap(
coloured_patches_uv,
invert_colours=self.config["invert_colours"],
b=background_image_uv)
else:
print("Unhandled render method")
if params is not None and "no_background" in params:
print("Setting alpha to zero outside of patches")
mask_uv = coloured_patches_uv[:, :, 3:4, :, :].sum(1) > 0
mask_uv = mask_uv.permute(0, 2, 3, 1)
img_uv = torch.concat([img_uv, mask_uv], axis=-1)
img[0, y0:y1, x0:x1, :4] = img_uv
else:
img[0, y0:y1, x0:x1, :3] = img_uv
print(f"Finished [{u}, {v}] idx [{x0}:{x1}], [{y0}:{y1}]")
print(img.size())
return img
def tensors_to(self, device):
self.spatial_transformer.tensor_to(device)
self.colour_transformer.tensor_to(device)
if self.patches is not None:
self.patches = self.patches.to(device)
|
arnheim-main
|
arnheim_3/src/collage_generator.py
|
"""Collage-making class definitions.
Arnheim 3 - Collage
Piotr Mirowski, Dylan Banarse, Mateusz Malinowski, Yotam Doron, Oriol Vinyals,
Simon Osindero, Chrisantha Fernando
DeepMind, 2021-2022
Copyright 2021 DeepMind Technologies Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import pathlib
from . import training
from . import video_utils
from .collage_generator import PopulationCollage
import cv2
import numpy as np
from .patches import get_segmented_data
import torch
import yaml
class CollageMaker():
"""Makes a single collage image.
A collage image (aka tile) may involve 3x3 parallel evaluations.
"""
def __init__(
self,
prompts,
segmented_data,
background_image,
clip_model,
file_basename,
device,
config):
"""Create a single square collage image.
Args:
prompts: list of prompts. Optional compositional prompts plus a global one
segmented_data: patches for the collage
background_image: background image for the collage
clip_model: CLIP model
file_basename: string, name to use for the saved files
device: CUDA device
config: dictionary with the following fields.
Config fields:
compositional_image: bool, whether to use 3x3 CLIPs
output_dir: string, directory to save working and final images
video_steps: int, how many steps between video frames; 0 is never
population_video: bool, create a video with members of the population
use_normalized_clip: bool, colour-correct images for CLIP evaluation
use_image_augmentations: bool, use image augmentations in evaluation
optim_steps: int, training steps for the collage
pop_size: int, size of population being evolved
evolution_frequency: bool, how many steps between evolution evaluations
initial_search_size: int, initial random search size (1 means no search)
"""
self._prompts = prompts
self._segmented_data = segmented_data
self._background_image = background_image
self._clip_model = clip_model
self._file_basename = file_basename
self._device = device
self._config = config
self._compositional_image = self._config["compositional_image"]
self._output_dir = self._config["output_dir"]
self._use_normalized_clip = self._config["use_normalized_clip"]
self._use_image_augmentations = self._config["use_image_augmentations"]
self._optim_steps = self._config["optim_steps"]
self._pop_size = self._config["pop_size"]
self._population_video = self._config["population_video"]
self._use_evolution = self._config["pop_size"] > 1
self._evolution_frequency = self._config["evolution_frequency"]
self._initial_search_size = self._config["initial_search_size"]
self._video_steps = self._config["video_steps"]
self._video_writer = None
self._population_video_writer = None
if self._video_steps:
self._video_writer = video_utils.VideoWriter(
filename=f"{self._output_dir}/{self._file_basename}.mp4")
if self._population_video:
self._population_video_writer = video_utils.VideoWriter(
filename=f"{self._output_dir}/{self._file_basename}_pop_sample.mp4")
if self._compositional_image:
if len(self._prompts) != 10:
raise ValueError(
"Missing compositional image prompts; found {len(self._prompts)}")
print("Global prompt is", self._prompts[-1])
print("Composition prompts", self._prompts)
else:
if len(self._prompts) != 1:
raise ValueError(
"Missing compositional image prompts; found {len(self._prompts)}")
print("CLIP prompt", self._prompts[0])
# Prompt to CLIP features.
self._prompt_features = training.compute_text_features(
self._prompts, self._clip_model, self._device)
self._augmentations = training.augmentation_transforms(
224,
use_normalized_clip=self._use_normalized_clip,
use_augmentation=self._use_image_augmentations)
# Create population of collage generators.
self._generator = PopulationCollage(
config=self._config,
device=self._device,
is_high_res=False,
pop_size=self._pop_size,
segmented_data=self._segmented_data,
background_image=self._background_image)
self._optimizer = training.make_optimizer(self._generator,
self._config["learning_rate"])
self._step = 0
self._losses_history = []
self._losses_separated_history = []
@property
def generator(self):
return self._generator
@property
def step(self):
return self._step
def initialise(self):
"""Initialise the collage from checkpoint or search over hyper-parameters."""
# If we use a checkpoint.
if len(self._config["init_checkpoint"]) > 0:
self.load(self._config["init_checkpoint"])
return
# If we do an initial random search.
if self._initial_search_size > 1:
print("\nInitial random search over "
f"{self._initial_search_size} individuals")
for j in range(self._pop_size):
generator_search = PopulationCollage(
config=self._config,
device=self._device,
pop_size=self._initial_search_size,
is_high_res=False,
segmented_data=self._segmented_data,
background_image=self._background_image)
self._optimizer = training.make_optimizer(generator_search,
self._config["learning_rate"])
num_steps_search = self._config["initial_search_num_steps"]
if num_steps_search > 1:
# Run several steps of gradient descent?
for step_search in range(num_steps_search):
losses, _, _ = self._train(
step=step_search, last_step=False,
generator=generator_search)
else:
# Or simply let initialise the parameters randomly.
_, _, losses, _ = training.evaluation(
t=0,
clip_enc=self._clip_model,
generator=generator_search,
augment_trans=self._augmentations,
text_features=self._prompt_features,
prompts=self._prompts,
config=self._config,
device=self._device)
print(f"Search {losses}")
idx_best = np.argmin(losses)
print(f"Choose {idx_best} with loss {losses[idx_best]}")
self._generator.copy_from(generator_search, j, idx_best)
del generator_search
print("Initial random search done\n")
self._optimizer = training.make_optimizer(self._generator,
self._config["learning_rate"])
def load(self, path_checkpoint):
"""Load an existing generator from state_dict stored in `path`."""
print(f"\nLoading spatial and colour transforms from {path_checkpoint}...")
state_dict = torch.load(path_checkpoint, map_location=self._device.type)
this_state_dict = self._generator.state_dict()
if state_dict.keys() != this_state_dict.keys():
print(f"Current and loaded state_dict do not match")
for key in this_state_dict:
this_shape = this_state_dict[key].shape
shape = state_dict[key].shape
if this_shape != shape:
print(f"state_dict[{key}] do not match: {this_shape} vs. {shape}")
print(f"Abort loading from checkpoint.")
return
print(f"Checkpoint {path_checkpoint} restored.")
self._generator.load_state_dict(state_dict)
def _train(self, step, last_step, generator):
losses, losses_separated, img_batch = training.step_optimization(
t=step,
clip_enc=self._clip_model,
lr_scheduler=self._optimizer,
generator=generator,
augment_trans=self._augmentations,
text_features=self._prompt_features,
prompts=self._prompts,
config=self._config,
device=self._device,
final_step=last_step)
return losses, losses_separated, img_batch
def loop(self):
"""Main optimisation/image generation loop. Can be interrupted."""
if self._step == 0:
print("\nStarting optimization of collage.")
else:
print(f"\nContinuing optimization of collage at step {self._step}.")
if self._video_steps:
print("Aborting video creation (does not work when interrupted).")
self._video_steps = 0
self._video_writer = None
self._population_video_writer = None
while self._step < self._optim_steps:
last_step = self._step == (self._optim_steps - 1)
losses, losses_separated, img_batch = self._train(
step=self._step, last_step=last_step, generator=self._generator)
self._add_video_frames(img_batch, losses)
self._losses_history.append(losses)
self._losses_separated_history.append(losses_separated)
if (self._use_evolution and self._step
and self._step % self._evolution_frequency == 0):
training.population_evolution_step(
self._generator, self._config, losses)
self._step += 1
def high_res_render(self,
segmented_data_high_res,
background_image_high_res,
gamma=1.0,
show=True,
save=True,
no_background=False):
"""Save and/or show a high res render using high-res patches."""
generator_cpu = PopulationCollage(
config=self._config,
device="cpu",
is_high_res=True,
pop_size=1,
segmented_data=segmented_data_high_res,
background_image=background_image_high_res)
idx_best = np.argmin(self._losses_history[-1])
lowest_loss = self._losses_history[-1][idx_best]
print(f"Lowest loss: {lowest_loss} @ index {idx_best}: ")
generator_cpu.copy_from(self._generator, 0, idx_best)
generator_cpu = generator_cpu.to("cpu")
generator_cpu.tensors_to("cpu")
params = {"gamma": gamma,
"max_block_size_high_res": self._config.get(
"max_block_size_high_res")}
if no_background:
params["no_background"] = True
with torch.no_grad():
img_high_res = generator_cpu.forward_high_res(params)
img = img_high_res.detach().cpu().numpy()[0]
img = np.clip(img, 0.0, 1.0)
if save or show:
# Swap Red with Blue
if img.shape[2] == 4:
print("Image has alpha channel")
img = img[..., [2, 1, 0, 3]]
else:
img = img[..., [2, 1, 0]]
img = np.clip(img, 0.0, 1.0) * 255
if save:
if no_background:
image_filename = f"{self._output_dir}/{self._file_basename}_no_bkgd.png"
else:
image_filename = f"{self._output_dir}/{self._file_basename}.png"
cv2.imwrite(image_filename, img)
if show:
video_utils.cv2_imshow(img)
img = img[:, :, :3]
return img
def finish(self):
"""Finish video writing and save all other data."""
if self._losses_history:
losses_filename = f"{self._output_dir}/{self._file_basename}_losses"
training.plot_and_save_losses(self._losses_history,
title=f"{self._file_basename} Losses",
filename=losses_filename,
show=self._config["gui"])
if self._video_steps:
self._video_writer.close()
if self._population_video_writer:
self._population_video_writer.close()
metadata_filename = f"{self._output_dir}/{self._file_basename}.yaml"
with open(metadata_filename, "w") as f:
yaml.dump(self._config, f, default_flow_style=False, allow_unicode=True)
last_step = self._step
last_loss = float(np.amin(self._losses_history[-1]))
return (last_step, last_loss)
def _add_video_frames(self, img_batch, losses):
"""Add images from numpy image batch to video writers.
Args:
img_batch: numpy array, batch of images (S,H,W,C)
losses: numpy array, losses for each generator (S,N)
"""
if self._video_steps and self._step % self._video_steps == 0:
# Write image to video.
best_img = img_batch[np.argmin(losses)]
self._video_writer.add(cv2.resize(
best_img, (best_img.shape[1] * 3, best_img.shape[0] * 3)))
if self._population_video_writer:
laid_out = video_utils.layout_img_batch(img_batch)
self._population_video_writer.add(cv2.resize(
laid_out, (laid_out.shape[1] * 2, laid_out.shape[0] * 2)))
class CollageTiler():
"""Creates a large collage by producing multiple overlapping collages."""
def __init__(self,
prompts,
fixed_background_image,
clip_model,
device,
config):
"""Create CollageTiler.
Args:
prompts: list of prompts for the collage maker
fixed_background_image: highest res background image
clip_model: CLIP model
device: CUDA device
config: dictionary with the following fields below:
Config fields used:
width: number of tiles wide
height: number of tiles high
background_use: how to use the background, e.g. per tile or whole image
compositional_image: bool, compositional for multi-CLIP collage tiles
high_res_multiplier: int, how much bigger is the final high-res image
output_dir: directory for generated files
torch_device: string, either cpu or cuda
"""
self._prompts = prompts
self._fixed_background_image = fixed_background_image
self._clip_model = clip_model
self._device = device
self._config = config
self._tiles_wide = config["tiles_wide"]
self._tiles_high = config["tiles_high"]
self._background_use = config["background_use"]
self._compositional_image = config["compositional_image"]
self._high_res_multiplier = config["high_res_multiplier"]
self._output_dir = config["output_dir"]
self._torch_device = config["torch_device"]
pathlib.Path(self._output_dir).mkdir(parents=True, exist_ok=True)
self._tile_basename = "tile_y{}_x{}{}"
self._tile_width = 448 if self._compositional_image else 224
self._tile_height = 448 if self._compositional_image else 224
self._overlap = 1. / 3.
# Size of bigger image
self._width = int(((2 * self._tiles_wide + 1) * self._tile_width) / 3.)
self._height = int(((2 * self._tiles_high + 1) * self._tile_height) / 3.)
self._high_res_tile_width = self._tile_width * self._high_res_multiplier
self._high_res_tile_height = self._tile_height * self._high_res_multiplier
self._high_res_width = self._high_res_tile_width * self._tiles_wide
self._high_res_height = self._high_res_tile_height * self._tiles_high
self._print_info()
self._x = 0
self._y = 0
self._collage_maker = None
self._fixed_background = self._scale_fixed_background(high_res=True)
def _print_info(self):
"""Print some debugging information."""
print(f"Tiling {self._tiles_wide}x{self._tiles_high} collages")
print("Optimisation:")
print(f"Tile size: {self._tile_width}x{self._tile_height}")
print(f"Global size: {self._width}x{self._height} (WxH)")
print("High res:")
print(
f"Tile size: {self._high_res_tile_width}x{self._high_res_tile_height}")
print(f"Global size: {self._high_res_width}x{self._high_res_height} (WxH)")
for i, tile_prompts in enumerate(self._prompts):
print(f"Tile {i} prompts: {tile_prompts}")
def initialise(self):
"""Initialise the collage maker, optionally from a checkpoint or initial search."""
if not self._collage_maker:
# Create new collage maker with its unique background.
print(f"\nNew collage creator for y{self._y}, x{self._x} with bg")
tile_bg, self._tile_high_res_bg = self._get_tile_background()
video_utils.show_and_save(tile_bg, self._config,
img_format="SCHW", stitch=False,
show=self._config["gui"])
prompts_x_y = self._prompts[self._y * self._tiles_wide + self._x]
segmented_data, self._segmented_data_high_res = (
get_segmented_data(
self._config, self._x + self._y * self._tiles_wide))
self._collage_maker = CollageMaker(
prompts=prompts_x_y,
segmented_data=segmented_data,
background_image=tile_bg,
clip_model=self._clip_model,
file_basename=self._tile_basename.format(self._y, self._x, ""),
device=self._device,
config=self._config)
self._collage_maker.initialise()
def load(self, path):
"""Load an existing CollageMaker generator from state_dict stored in `path`."""
self._collage_maker.load(path)
def loop(self):
"""Re-entrable loop to optmise collage."""
res_training = {}
while self._y < self._tiles_high:
while self._x < self._tiles_wide:
if not self._collage_maker:
self.initialise()
self._collage_maker.loop()
collage_img = self._collage_maker.high_res_render(
self._segmented_data_high_res,
self._tile_high_res_bg,
gamma=1.0,
show=self._config["gui"],
save=True)
self._collage_maker.high_res_render(
self._segmented_data_high_res,
self._tile_high_res_bg,
gamma=1.0,
show=False,
save=True,
no_background=True)
self._save_tile(collage_img / 255)
(last_step, last_loss) = self._collage_maker.finish()
res_training[f"tile_{self._y}_{self._x}_loss"] = last_loss
res_training[f"tile_{self._y}_{self._x}_step"] = last_step
del self._collage_maker
self._collage_maker = None
self._x += 1
self._y += 1
self._x = 0
# Save results of all optimisations.
res_filename = f"{self._output_dir}/results_training.yaml"
with open(res_filename, "w") as f:
yaml.dump(res_training, f, default_flow_style=False, allow_unicode=True)
return collage_img # SHWC
def _save_tile(self, img):
background_image_np = np.asarray(img)
background_image_np = background_image_np[..., ::-1].copy()
filename = self._tile_basename.format(self._y, self._x, ".npy")
np.save(f"{self._output_dir}/{filename}", background_image_np)
def _save_tile_arrays(self, all_arrays):
filename = self._tile_basename.format(self._y, self._x, "_arrays.npy")
np.save(f"{self._output_dir}/{filename}", all_arrays)
def _scale_fixed_background(self, high_res=True):
"""Get correctly sized background image."""
if self._fixed_background_image is None:
return None
multiplier = self._high_res_multiplier if high_res else 1
if self._background_use == "Local":
height = self._tile_height * multiplier
width = self._tile_width * multiplier
elif self._background_use == "Global":
height = self._height * multiplier
width = self._width * multiplier
return cv2.resize(self._fixed_background_image.astype(float),
(width, height))
def _get_tile_background(self):
"""Get the background for a particular tile.
This involves getting bordering imagery from left, top left, above and top
right, where appropriate.
i.e. tile (1,1) shares overlap with (0,1), (0,2) and (1,0)
(0,0), (0,1), (0,2), (0,3)
(1,0), (1,1), (1,2), (1,3)
(2,0), (2,1), (2,2), (2,3)
Note that (0,0) is not needed as its contribution is already in (0,1)
Returns:
background_image: small background for optimisation
background_image_high_res: high resolution background
"""
if self._fixed_background is None:
tile_border_bg = np.zeros((self._high_res_tile_height,
self._high_res_tile_width, 3))
else:
if self._background_use == "Local":
tile_border_bg = self._fixed_background.copy()
else: # Crop out section for this tile.
orgin_y = self._y * (self._high_res_tile_height
- math.ceil(self._tile_height * self._overlap)
* self._high_res_multiplier)
orgin_x = self._x * (self._high_res_tile_width
- math.ceil(self._tile_width * self._overlap)
* self._high_res_multiplier)
tile_border_bg = self._fixed_background[
orgin_y : orgin_y + self._high_res_tile_height,
orgin_x : orgin_x + self._high_res_tile_width, :]
tile_idx = dict()
if self._x > 0:
tile_idx["left"] = (self._y, self._x - 1)
if self._y > 0:
tile_idx["above"] = (self._y - 1, self._x)
if self._x < self._tiles_wide - 1: # Penultimate on the row
tile_idx["above_right"] = (self._y - 1, self._x + 1)
# Get and insert bodering tile content in this order.
if "above" in tile_idx:
self._copy_overlap(tile_border_bg, "above", tile_idx["above"])
if "above_right" in tile_idx:
self._copy_overlap(tile_border_bg, "above_right", tile_idx["above_right"])
if "left" in tile_idx:
self._copy_overlap(tile_border_bg, "left", tile_idx["left"])
background_image = self._resize_image_for_torch(
tile_border_bg, self._tile_height, self._tile_width)
background_image_high_res = self._resize_image_for_torch(
tile_border_bg,
self._high_res_tile_height,
self._high_res_tile_width).to("cpu")
return background_image, background_image_high_res
def _resize_image_for_torch(self, img, height, width):
# Resize and permute to format used by Collage class (SCHW).
img = torch.tensor(cv2.resize(img.astype(float), (width, height)))
if self._torch_device == "cuda":
img = img.cuda()
return img.permute(2, 0, 1).to(torch.float32)
def _copy_overlap(self, target, location, tile_idx):
"""Copy area from tile adjacent to target tile to target tile."""
big_height = self._high_res_tile_height
big_width = self._high_res_tile_width
pixel_overlap = int(big_width * self._overlap)
filename = self._tile_basename.format(tile_idx[0], tile_idx[1], ".npy")
# print(f"Loading tile {filename})
source = np.load(f"{self._output_dir}/{filename}")
if location == "above":
target[0 : pixel_overlap, 0 : big_width, :] = source[
big_height - pixel_overlap : big_height, 0 : big_width, :]
if location == "left":
target[:, 0 : pixel_overlap, :] = source[
:, big_width - pixel_overlap : big_width, :]
elif location == "above_right":
target[
0 : pixel_overlap, big_width - pixel_overlap : big_width, :] = source[
big_height - pixel_overlap : big_height, 0 : pixel_overlap, :]
def assemble_tiles(self):
"""Stitch together the whole image from saved tiles."""
big_height = self._high_res_tile_height
big_width = self._high_res_tile_width
full_height = int((big_height + 2 * big_height * self._tiles_high) / 3)
full_width = int((big_width + 2 * big_width * self._tiles_wide) / 3)
full_image = np.zeros((full_height, full_width, 3)).astype("float32")
for y in range(self._tiles_high):
for x in range(self._tiles_wide):
filename = self._tile_basename.format(y, x, ".npy")
tile = np.load(f"{self._output_dir}/{filename}")
y_offset = int(big_height * y * 2 / 3)
x_offset = int(big_width * x * 2 / 3)
full_image[y_offset : y_offset + big_height,
x_offset : x_offset + big_width, :] = tile[:, :, :]
filename = "final_tiled_image"
print(f"Saving assembled tiles to {filename}")
video_utils.show_and_save(
full_image, self._config, img_format="SHWC", stitch=False,
filename=filename, show=self._config["gui"])
|
arnheim-main
|
arnheim_3/src/collage.py
|
"""Video utility functions, image rendering and display.
Arnheim 3 - Collage
Piotr Mirowski, Dylan Banarse, Mateusz Malinowski, Yotam Doron, Oriol Vinyals,
Simon Osindero, Chrisantha Fernando
DeepMind, 2021-2022
Copyright 2021 DeepMind Technologies Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import io
import os
import pathlib
import cv2
import numpy as np
import requests
import torch
try:
from google.colab.patches import cv2_imshow # pylint: disable=g-import-not-at-top
except: # pylint: disable=bare-except
def cv2_imshow(img, name="CollageGenerator"):
if img.dtype == np.float32 and img.max() > 1.:
img = img.astype(np.uint8)
cv2.imshow(name, img)
cv2.waitKey(1)
def load_image(filename, as_cv2_image=False, show=False):
"""Load an image as [0,1] RGB numpy array or cv2 image format."""
img = cv2.imread(filename)
if show:
cv2_imshow(img)
if as_cv2_image:
return img # With colour format BGR
img = np.asarray(img)
return img[..., ::-1] / 255. # Reverse colour dim to convert BGR to RGB
def cached_url_download(url, file_format="np_array"):
"""Download file from URL and cache locally."""
cache_filename = os.path.basename(url)
cache = pathlib.Path(cache_filename)
if not cache.is_file():
print(f"Downloading {cache_filename} from {url}")
r = requests.get(url)
bytesio_object = io.BytesIO(r.content)
with open(cache_filename, "wb") as f:
f.write(bytesio_object.getbuffer())
else:
print("Using cached version of " + cache_filename)
if file_format == "np_array":
return np.load(cache, allow_pickle=True)
elif file_format == "cv2_image":
return load_image(cache.name, as_cv2_image=True, show=False)
elif file_format == "image_as_np":
return load_image(cache.name, as_cv2_image=False, show=False)
def layout_img_batch(img_batch, max_display=None):
img_np = img_batch.transpose(0, 2, 1, 3).clip(0.0, 1.0) # S, W, H, C
if max_display:
img_np = img_np[:max_display, ...]
sp = img_np.shape
img_np[:, 0, :, :] = 1.0 # White line separator
img_stitch = np.reshape(img_np, (sp[1] * sp[0], sp[2], sp[3]))
img_r = img_stitch.transpose(1, 0, 2) # H, W, C
return img_r
def show_stitched_batch(img_batch, max_display=1, show=True):
"""Display stitched image batch.
Args:
img: image batch to display
max_display: max number of images to display from population
show: whether to display the image
Returns:
stitched image
"""
img_np = img_batch.detach().cpu().numpy()
img_np = np.clip(img_np, 0.0, 1.0)
num_images = img_np.shape[0]
img_np = img_np.transpose((0, 2, 3, 1))
laid_out = layout_img_batch(img_np, max_display)
if show:
cv2_imshow(cv2.cvtColor(laid_out, cv2.COLOR_BGR2RGB) * 255)
return laid_out
def show_and_save(img_batch, config, t=None,
max_display=1, stitch=True,
img_format="SCHW", show=True, filename=None):
"""Save and display images.
Args:
img_batch: batch of images to display
config: dictionary of all config settings
t: time step
max_display: max number of images to display from population
stitch: append images side-by-side
img_format: SHWC or SCHW (the latter used by CLIP)
show: whether to display the image
filename: save image using filename, if provided
)
Returns:
stitched image or None
"""
if isinstance(img_batch, torch.Tensor):
img_np = img_batch.detach().cpu().numpy()
else:
img_np = img_batch
if len(img_np.shape) == 3:
# if not a batch make it one.
img_np = np.expand_dims(img_np, axis=0)
if not stitch:
print(f"image (not stitch) min {img_np.min()}, max {img_np.max()}")
for i in range(min(max_display, img_np.shape[0])):
img = img_np[i]
if img_format == "SCHW": # Convert to SHWC
img = np.transpose(img, (1, 2, 0))
img = np.clip(img, 0.0, 1.0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) * 255
if filename is not None:
if img.shape[1] > config["canvas_width"]:
filename = "highres_" + filename
output_dir = config["output_dir"]
filename = f"{output_dir}/{filename}_{str(i)}"
if t is not None:
filename += "_t_" + str(t)
filename += ".png"
print(f"Saving image {filename} (shape={img.shape})")
cv2.imwrite(filename, img)
if show:
cv2_imshow(img)
return None
else:
print(f"image (stitch) min {img_np.min()}, max {img_np.max()}")
img_np = np.clip(img_np, 0.0, 1.0)
if img_format == "SCHW": # Convert to SHWC
img_np = img_np.transpose((0, 2, 3, 1))
laid_out = layout_img_batch(img_np, max_display)
if filename is not None:
filename += ".png"
print(f"Saving temporary image {filename} (shape={laid_out.shape})")
cv2.imwrite(filename, cv2.cvtColor(laid_out, cv2.COLOR_BGR2RGB) * 255)
if show:
cv2_imshow(cv2.cvtColor(laid_out, cv2.COLOR_BGR2RGB) * 255)
return laid_out
class VideoWriter:
"""Create a video from image frames."""
def __init__(self, filename="_autoplay.mp4", fps=20.0, show=False, **kw):
"""Video creator.
Creates and display a video made from frames. The default
filename causes the video to be displayed on exit.
Args:
filename: name of video file
fps: frames per second for video
show: display video on close
**kw: args to be passed to FFMPEG_VideoWriter
Returns:
VideoWriter instance.
"""
self.writer = None
self.params = dict(filename=filename, fps=fps, **kw)
self._show = show
print("No video writing implemented")
def add(self, img):
"""Add image to video.
Add new frame to image file, creating VideoWriter if requried.
Args:
img: array-like frame, shape [X, Y, 3] or [X, Y]
Returns:
None
"""
pass
# img = np.asarray(img)
# if self.writer is None:
# h, w = img.shape[:2]
# self.writer = FFMPEG_VideoWriter(size=(w, h), **self.params)
# if img.dtype in [np.float32, np.float64]:
# img = np.uint8(img.clip(0, 1)*255)
# if len(img.shape) == 2:
# img = np.repeat(img[..., None], 3, -1)
# self.writer.write_frame(img)
def close(self):
if self.writer:
self.writer.close()
def __enter__(self):
return self
def __exit__(self, *kw):
self.close()
if self.params["filename"] == "_autoplay.mp4":
self.show()
def show(self, **kw):
"""Display video.
Args:
**kw: args to be passed to mvp.ipython_display
Returns:
None
"""
self.close()
fn = self.params["filename"]
if self._show:
display(mvp.ipython_display(fn, **kw)) # pylint: disable=undefined-variable
|
arnheim-main
|
arnheim_3/src/video_utils.py
|
"""RGB image rendering from patch data.
Arnheim 3 - Collage
Piotr Mirowski, Dylan Banarse, Mateusz Malinowski, Yotam Doron, Oriol Vinyals,
Simon Osindero, Chrisantha Fernando
DeepMind, 2021-2022
Copyright 2021 DeepMind Technologies Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
import torch.nn.functional as F
RENDER_EPSILON = 1e-8
RENDER_OVERLAP_TEMPERATURE = 0.1
RENDER_OVERLAP_ZERO_OFFSET = -5
RENDER_OVERLAP_MASK_THRESHOLD = 0.5
def population_render_transparency(x, invert_colours=False, b=None):
"""Render image from patches with transparancy.
Renders patches with transparency using black as the transparent colour.
Args:
x: tensor of transformed RGB image patches of shape [S, B, 5, H, W].
invert_colours: Invert all RGB values.
b: optional tensor of background RGB image of shape [S, 3, H, W].
Returns:
Tensor of rendered RGB images of shape [S, 3, H, W].
"""
# Sum the RGB patches [S, B, 3, H, W] as [S, 3, H, W].
x = x[:, :, :3, :, :] * x[:, :, 3:4, :, :]
y = x[:, :, :3, :, :].sum(1)
if invert_colours:
y[:, :3, :, :] = 1.0 - y[:, :3, :, :]
# Add backgrounds [S, 3, H, W].
if b is not None:
b = b.cuda() if x.is_cuda else b.cpu()
y = (y + b).clamp(0., 1.)
return y.clamp(0., 1.).permute(0, 2, 3, 1)
def population_render_masked_transparency(
x, mode, invert_colours=False, b=None):
"""Render image from patches using alpha channel for patch transparency.
Args:
x: tensor of transformed RGB image patches of shape [S, B, 5, H, W].
mode: ["clipped" | "normed"], methods of handling alpha with background.
invert_colours: invert RGB values
b: optional tensor of background RGB image of shape [S, 3, H, W].
Returns:
Tensor of rendered RGB images of shape [S, 3, H, W].
"""
# Get the patch mask [S, B, 1, H, W] and sum of masks [S, 1, H, W].
mask = x[:, :, 3:4, :, :]
mask_sum = mask.sum(1) + RENDER_EPSILON
# Mask the RGB patches [S, B, 4, H, W] -> [S, B, 3, H, W].
masked_x = x[:, :, :3, :, :] * mask
# Compute mean of the RGB patches [S, B, 3, H, W] as [S, 3, H, W].
x_sum = masked_x.sum(1)
y = torch.where(
mask_sum > RENDER_EPSILON, x_sum / mask_sum, mask_sum)
if invert_colours:
y[:, :3, :, :] = 1.0 - y[:, :3, :, :]
# Add backgrounds [S, 3, H, W].
if b is not None:
b = b.cuda() if x.is_cuda else b.cpu()
if mode == "normed":
mask_max = mask_sum.max(
dim=2, keepdim=True).values.max(dim=3, keepdim=True).values
mask = mask_sum / mask_max
elif mode == "clipped":
mask = mask_sum.clamp(0., 1.)
else:
raise ValueError(f"Unknown masked_transparency mode {mode}")
y = y[:, :3, :, :] * mask + b.unsqueeze(0)[:, :3, :, :] * (1 - mask)
return y.clamp(0., 1.).permute(0, 2, 3, 1)
def population_render_overlap(x, invert_colours=False, b=None):
"""Render image, overlaying patches on top of one another.
Uses semi-translucent overlap using the alpha chanel as the mask colour
and the 5th channel as the order for the overlapped images.
Args:
x: tensor of transformed RGB image patches of shape [S, B, 5, H, W].
invert_colours: invert RGB values
b: optional tensor of background RGB image of shape [S, 3, H, W].
Returns:
Tensor of rendered RGB images of shape [S, 3, H, W].
"""
# Get the patch mask [S, B, 1, H, W].
mask = x[:, :, 3:4, :, :]
# Mask the patches [S, B, 4, H, W] -> [S, B, 3, H, W]
masked_x = x[:, :, :3, :, :] * mask * mask
# Mask the orders [S, B, 1, H, W] -> [S, B, 1, H, W]
order = torch.where(
mask > RENDER_OVERLAP_MASK_THRESHOLD,
x[:, :, 4:, :, :] * mask / RENDER_OVERLAP_TEMPERATURE,
mask + RENDER_OVERLAP_ZERO_OFFSET)
# Get weights from orders [S, B, 1, H, W]
weights = F.softmax(order, dim=1)
# Apply weights to masked patches and compute mean over patches [S, 3, H, W].
y = (weights * masked_x).sum(1)
if invert_colours:
y[:, :3, :, :] = 1.0 - y[:, :3, :, :]
if b is not None:
b = b.cuda() if x.is_cuda else b.cpu()
y = torch.where(mask.sum(1) > RENDER_OVERLAP_MASK_THRESHOLD, y[:, :3, :, :],
b.unsqueeze(0)[:, :3, :, :])
return y.clamp(0., 1.).permute(0, 2, 3, 1)
|
arnheim-main
|
arnheim_3/src/rendering.py
|
"""Colour and affine transform classes.
Arnheim 3 - Collage
Piotr Mirowski, Dylan Banarse, Mateusz Malinowski, Yotam Doron, Oriol Vinyals,
Simon Osindero, Chrisantha Fernando
DeepMind, 2021-2022
Copyright 2021 DeepMind Technologies Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from kornia.color import hsv
import numpy as np
import torch
import torch.nn.functional as F
class PopulationAffineTransforms(torch.nn.Module):
"""Population-based Affine Transform network."""
def __init__(self, config, device, num_patches=1, pop_size=1,
requires_grad=True, is_high_res=False):
super(PopulationAffineTransforms, self).__init__()
self.config = config
self.device = device
self._pop_size = pop_size
self._is_high_res = is_high_res
print('PopulationAffineTransforms is_high_res={}, requires_grad={}'.format(
self._is_high_res, requires_grad))
self._min_rot = self.config['min_rot_deg'] * np.pi / 180.
self._max_rot = self.config['max_rot_deg'] * np.pi / 180.
matrices_translation = (
(np.random.rand(pop_size, num_patches, 2, 1)
* (self.config['max_trans_init'] - self.config['min_trans_init']))
+ self.config['min_trans_init'])
matrices_rotation = (
(np.random.rand(pop_size, num_patches, 1, 1)
* (self._max_rot - self._min_rot)) + self._min_rot)
matrices_scale = (
(np.random.rand(pop_size, num_patches, 1, 1)
* (self.config['max_scale'] - self.config['min_scale']))
+ self.config['min_scale'])
matrices_squeeze = (
(np.random.rand(pop_size, num_patches, 1, 1) * (
(self.config['max_squeeze'] - self.config['min_squeeze'])
+ self.config['min_squeeze'])))
matrices_shear = (
(np.random.rand(pop_size, num_patches, 1, 1)
* (self.config['max_shear'] - self.config['min_shear']))
+ self.config['min_shear'])
self.translation = torch.nn.Parameter(
torch.tensor(matrices_translation, dtype=torch.float),
requires_grad=requires_grad)
self.rotation = torch.nn.Parameter(
torch.tensor(matrices_rotation, dtype=torch.float),
requires_grad=requires_grad)
self.scale = torch.nn.Parameter(
torch.tensor(matrices_scale, dtype=torch.float),
requires_grad=requires_grad)
self.squeeze = torch.nn.Parameter(
torch.tensor(matrices_squeeze, dtype=torch.float),
requires_grad=requires_grad)
self.shear = torch.nn.Parameter(
torch.tensor(matrices_shear, dtype=torch.float),
requires_grad=requires_grad)
self._identity = (
torch.ones((pop_size, num_patches, 1, 1)) * torch.eye(2).unsqueeze(0)
).to(self.device)
self._zero_column = torch.zeros(
(pop_size, num_patches, 2, 1)).to(self.device)
self._unit_row = (
torch.ones((pop_size, num_patches, 1, 1)) * torch.tensor([0., 0., 1.])
).to(self.device)
self._zeros = torch.zeros((pop_size, num_patches, 1, 1)).to(self.device)
def _clamp(self):
self.translation.data = self.translation.data.clamp(
min=self.config['min_trans'], max=self.config['max_trans'])
self.rotation.data = self.rotation.data.clamp(
min=self._min_rot, max=self._max_rot)
self.scale.data = self.scale.data.clamp(
min=self.config['min_scale'], max=self.config['max_scale'])
self.squeeze.data = self.squeeze.data.clamp(
min=self.config['min_squeeze'], max=self.config['max_squeeze'])
self.shear.data = self.shear.data.clamp(
min=self.config['min_shear'], max=self.config['max_shear'])
def copy_and_mutate_s(self, parent, child):
"""Copy parameters to child, mutating transform parameters."""
with torch.no_grad():
self.translation[child, ...] = (
self.translation[parent, ...]
+ self.config['pos_and_rot_mutation_scale'] * torch.randn(
self.translation[child, ...].shape).to(self.device))
self.rotation[child, ...] = (
self.rotation[parent, ...]
+ self.config['pos_and_rot_mutation_scale'] * torch.randn(
self.rotation[child, ...].shape).to(self.device))
self.scale[child, ...] = (
self.scale[parent, ...]
+ self.config['scale_mutation_scale'] * torch.randn(
self.scale[child, ...].shape).to(self.device))
self.squeeze[child, ...] = (
self.squeeze[parent, ...]
+ self.config['distort_mutation_scale'] * torch.randn(
self.squeeze[child, ...].shape).to(self.device))
self.shear[child, ...] = (
self.shear[parent, ...]
+ self.config['distort_mutation_scale'] * torch.randn(
self.shear[child, ...].shape).to(self.device))
def copy_from(self, other, idx_to, idx_from):
"""Copy parameters from other spatial transform, for selected indices."""
assert idx_to < self._pop_size
with torch.no_grad():
self.translation[idx_to, ...] = other.translation[idx_from, ...]
self.rotation[idx_to, ...] = other.rotation[idx_from, ...]
self.scale[idx_to, ...] = other.scale[idx_from, ...]
self.squeeze[idx_to, ...] = other.squeeze[idx_from, ...]
self.shear[idx_to, ...] = other.shear[idx_from, ...]
def forward(self, x, idx_patch=None):
self._clamp()
scale_affine_mat = torch.cat([
torch.cat([self.scale, self.shear], 3),
torch.cat([self._zeros, self.scale * self.squeeze], 3)], 2)
scale_affine_mat = torch.cat([
torch.cat([scale_affine_mat, self._zero_column], 3),
self._unit_row], 2)
rotation_affine_mat = torch.cat([
torch.cat([torch.cos(self.rotation), -torch.sin(self.rotation)], 3),
torch.cat([torch.sin(self.rotation), torch.cos(self.rotation)], 3)], 2)
rotation_affine_mat = torch.cat([
torch.cat([rotation_affine_mat, self._zero_column], 3),
self._unit_row], 2)
scale_rotation_mat = torch.matmul(scale_affine_mat,
rotation_affine_mat)[:, :, :2, :]
# Population and patch dimensions (0 and 1) need to be merged.
# E.g. from (POP_SIZE, NUM_PATCHES, CHANNELS, WIDTH, HEIGHT)
# to (POP_SIZE * NUM_PATCHES, CHANNELS, WIDTH, HEIGHT)
if idx_patch is not None and self._is_high_res:
scale_rotation_mat = scale_rotation_mat[:, idx_patch, :, :]
num_patches = 1
else:
scale_rotation_mat = scale_rotation_mat[:, :, :2, :].view(
1, -1, *(scale_rotation_mat[:, :, :2, :].size()[2:])).squeeze()
num_patches = x.size()[1]
x = x.view(1, -1, *(x.size()[2:])).squeeze()
# print('scale_rotation_mat', scale_rotation_mat.size())
# print('x', x.size())
scaled_rotated_grid = F.affine_grid(
scale_rotation_mat, x.size(), align_corners=True)
scaled_rotated_x = F.grid_sample(x, scaled_rotated_grid, align_corners=True)
translation_affine_mat = torch.cat([self._identity, self.translation], 3)
if idx_patch is not None and self._is_high_res:
translation_affine_mat = translation_affine_mat[:, idx_patch, :, :]
else:
translation_affine_mat = translation_affine_mat.view(
1, -1, *(translation_affine_mat.size()[2:])).squeeze()
# print('translation_affine_mat', translation_affine_mat.size())
# print('scaled_rotated_x', scaled_rotated_x.size())
translated_grid = F.affine_grid(
translation_affine_mat, scaled_rotated_x.size(), align_corners=True)
y = F.grid_sample(scaled_rotated_x, translated_grid, align_corners=True)
# print('y', y.size())
# print('num_patches', num_patches)
return y.view(self._pop_size, num_patches, *(y.size()[1:]))
def tensor_to(self, device):
self.translation = self.translation.to(device)
self.rotation = self.rotation.to(device)
self.scale = self.scale.to(device)
self.squeeze = self.squeeze.to(device)
self.shear = self.shear.to(device)
self._identity = self._identity.to(device)
self._zero_column = self._zero_column.to(device)
self._unit_row = self._unit_row.to(device)
self._zeros = self._zeros.to(device)
class PopulationOrderOnlyTransforms(torch.nn.Module):
"""No color transforms, just ordering of patches."""
def __init__(self, config, device, num_patches=1, pop_size=1,
requires_grad=True):
super(PopulationOrderOnlyTransforms, self).__init__()
self.config = config
self.device = device
self._pop_size = pop_size
print(f'PopulationOrderOnlyTransforms requires_grad={requires_grad}')
population_zeros = np.ones((pop_size, num_patches, 1, 1, 1))
population_orders = np.random.rand(pop_size, num_patches, 1, 1, 1)
self._zeros = torch.nn.Parameter(
torch.tensor(population_zeros, dtype=torch.float),
requires_grad=False)
self.orders = torch.nn.Parameter(
torch.tensor(population_orders, dtype=torch.float),
requires_grad=requires_grad)
self._hsv_to_rgb = hsv.HsvToRgb()
def _clamp(self):
self.orders.data = self.orders.data.clamp(min=0.0, max=1.0)
def copy_and_mutate_s(self, parent, child):
with torch.no_grad():
self.orders[child, ...] = self.orders[parent, ...]
def copy_from(self, other, idx_to, idx_from):
"""Copy parameters from other colour transform, for selected indices."""
assert idx_to < self._pop_size
with torch.no_grad():
self.orders[idx_to, ...] = other.orders[idx_from, ...]
def forward(self, x):
self._clamp()
colours = torch.cat(
[self._zeros, self._zeros, self._zeros, self._zeros, self.orders],
2)
return colours * x
def tensor_to(self, device):
self.orders = self.orders.to(device)
self._zeros = self._zeros.to(device)
class PopulationColourHSVTransforms(torch.nn.Module):
"""HSV color transforms and ordering of patches."""
def __init__(self, config, device, num_patches=1, pop_size=1,
requires_grad=True):
super(PopulationColourHSVTransforms, self).__init__()
self.config = config
self.device = device
print('PopulationColourHSVTransforms for {} patches, {} individuals'.format(
num_patches, pop_size))
self._pop_size = pop_size
self._min_hue = self.config['min_hue_deg'] * np.pi / 180.
self._max_hue = self.config['max_hue_deg'] * np.pi / 180.
print(f'PopulationColourHSVTransforms requires_grad={requires_grad}')
coeff_hue = (0.5 * (self._max_hue - self._min_hue) + self._min_hue)
coeff_sat = (0.5 * (self.config['max_sat'] - self.config['min_sat'])
+ self.config['min_sat'])
coeff_val = (0.5 * (self.config['max_val'] - self.config['min_val'])
+ self.config['min_val'])
population_hues = (np.random.rand(pop_size, num_patches, 1, 1, 1)
* coeff_hue)
population_saturations = np.random.rand(
pop_size, num_patches, 1, 1, 1) * coeff_sat
population_values = np.random.rand(
pop_size, num_patches, 1, 1, 1) * coeff_val
population_zeros = np.ones((pop_size, num_patches, 1, 1, 1))
population_orders = np.random.rand(pop_size, num_patches, 1, 1, 1)
self.hues = torch.nn.Parameter(
torch.tensor(population_hues, dtype=torch.float),
requires_grad=requires_grad)
self.saturations = torch.nn.Parameter(
torch.tensor(population_saturations, dtype=torch.float),
requires_grad=requires_grad)
self.values = torch.nn.Parameter(
torch.tensor(population_values, dtype=torch.float),
requires_grad=requires_grad)
self._zeros = torch.nn.Parameter(
torch.tensor(population_zeros, dtype=torch.float),
requires_grad=False)
self.orders = torch.nn.Parameter(
torch.tensor(population_orders, dtype=torch.float),
requires_grad=requires_grad)
self._hsv_to_rgb = hsv.HsvToRgb()
def _clamp(self):
self.hues.data = self.hues.data.clamp(
min=self._min_hue, max=self._max_hue)
self.saturations.data = self.saturations.data.clamp(
min=self.config['min_sat'], max=self.config['max_sat'])
self.values.data = self.values.data.clamp(
min=self.config['min_val'], max=self.config['max_val'])
self.orders.data = self.orders.data.clamp(min=0.0, max=1.0)
def copy_and_mutate_s(self, parent, child):
with torch.no_grad():
self.hues[child, ...] = (
self.hues[parent, ...]
+ self.config['colour_mutation_scale'] * torch.randn(
self.hues[child, ...].shape).to(self.device))
self.saturations[child, ...] = (
self.saturations[parent, ...]
+ self.config['colour_mutation_scale'] * torch.randn(
self.saturations[child, ...].shape).to(self.device))
self.values[child, ...] = (
self.values[parent, ...]
+ self.config['colour_mutation_scale'] * torch.randn(
self.values[child, ...].shape).to(self.device))
self.orders[child, ...] = self.orders[parent, ...]
def copy_from(self, other, idx_to, idx_from):
"""Copy parameters from other colour transform, for selected indices."""
assert idx_to < self._pop_size
with torch.no_grad():
self.hues[idx_to, ...] = other.hues[idx_from, ...]
self.saturations[idx_to, ...] = other.saturations[idx_from, ...]
self.values[idx_to, ...] = other.values[idx_from, ...]
self.orders[idx_to, ...] = other.orders[idx_from, ...]
def forward(self, image):
self._clamp()
colours = torch.cat(
[self.hues, self.saturations, self.values, self._zeros, self.orders], 2)
hsv_image = colours * image
rgb_image = self._hsv_to_rgb(hsv_image[:, :, :3, :, :])
return torch.cat([rgb_image, hsv_image[:, :, 3:, :, :]], axis=2)
def tensor_to(self, device):
self.hues = self.hues.to(device)
self.saturations = self.saturations.to(device)
self.values = self.values.to(device)
self.orders = self.orders.to(device)
self._zeros = self._zeros.to(device)
class PopulationColourRGBTransforms(torch.nn.Module):
"""RGB color transforms and ordering of patches."""
def __init__(self, config, device, num_patches=1, pop_size=1,
requires_grad=True):
super(PopulationColourRGBTransforms, self).__init__()
self.config = config
self.device = device
print('PopulationColourRGBTransforms for {} patches, {} individuals'.format(
num_patches, pop_size))
self._pop_size = pop_size
print(f'PopulationColourRGBTransforms requires_grad={requires_grad}')
rgb_init_range = (
self.config['initial_max_rgb'] - self.config['initial_min_rgb'])
population_reds = (
np.random.rand(pop_size, num_patches, 1, 1, 1)
* rgb_init_range) + self.config['initial_min_rgb']
population_greens = (
np.random.rand(pop_size, num_patches, 1, 1, 1)
* rgb_init_range) + self.config['initial_min_rgb']
population_blues = (
np.random.rand(pop_size, num_patches, 1, 1, 1)
* rgb_init_range) + self.config['initial_min_rgb']
population_zeros = np.ones((pop_size, num_patches, 1, 1, 1))
population_orders = np.random.rand(pop_size, num_patches, 1, 1, 1)
self.reds = torch.nn.Parameter(
torch.tensor(population_reds, dtype=torch.float),
requires_grad=requires_grad)
self.greens = torch.nn.Parameter(
torch.tensor(population_greens, dtype=torch.float),
requires_grad=requires_grad)
self.blues = torch.nn.Parameter(
torch.tensor(population_blues, dtype=torch.float),
requires_grad=requires_grad)
self._zeros = torch.nn.Parameter(
torch.tensor(population_zeros, dtype=torch.float),
requires_grad=False)
self.orders = torch.nn.Parameter(
torch.tensor(population_orders, dtype=torch.float),
requires_grad=requires_grad)
def _clamp(self):
self.reds.data = self.reds.data.clamp(
min=self.config['min_rgb'], max=self.config['max_rgb'])
self.greens.data = self.greens.data.clamp(
min=self.config['min_rgb'], max=self.config['max_rgb'])
self.blues.data = self.blues.data.clamp(
min=self.config['min_rgb'], max=self.config['max_rgb'])
self.orders.data = self.orders.data.clamp(min=0.0, max=1.0)
def copy_and_mutate_s(self, parent, child):
with torch.no_grad():
self.reds[child, ...] = (
self.reds[parent, ...]
+ self.config['colour_mutation_scale'] * torch.randn(
self.reds[child, ...].shape).to(self.device))
self.greens[child, ...] = (
self.greens[parent, ...]
+ self.config['colour_mutation_scale'] * torch.randn(
self.greens[child, ...].shape).to(self.device))
self.blues[child, ...] = (
self.blues[parent, ...]
+ self.config['colour_mutation_scale'] * torch.randn(
self.blues[child, ...].shape).to(self.device))
self.orders[child, ...] = self.orders[parent, ...]
def copy_from(self, other, idx_to, idx_from):
"""Copy parameters from other colour transform, for selected indices."""
assert idx_to < self._pop_size
with torch.no_grad():
self.reds[idx_to, ...] = other.reds[idx_from, ...]
self.greens[idx_to, ...] = other.greens[idx_from, ...]
self.blues[idx_to, ...] = other.blues[idx_from, ...]
self.orders[idx_to, ...] = other.orders[idx_from, ...]
def forward(self, x):
self._clamp()
colours = torch.cat(
[self.reds, self.greens, self.blues, self._zeros, self.orders], 2)
return colours * x
def tensor_to(self, device):
self.reds = self.reds.to(device)
self.greens = self.greens.to(device)
self.blues = self.blues.to(device)
self.orders = self.orders.to(device)
self._zeros = self._zeros.to(device)
|
arnheim-main
|
arnheim_3/src/transformations.py
|
"""Loading and processing collage patches.
Arnheim 3 - Collage
Piotr Mirowski, Dylan Banarse, Mateusz Malinowski, Yotam Doron, Oriol Vinyals,
Simon Osindero, Chrisantha Fernando
DeepMind, 2021-2022
Copyright 2021 DeepMind Technologies Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from .video_utils import cached_url_download
from .video_utils import cv2_imshow
SHOW_PATCHES = False
def add_binary_alpha_mask(patch):
"""Black pixels treated as having alpha=0, all other pixels have alpha=255."""
mask = ((patch.sum(2) > 0) * 255).astype(np.uint8)
return np.concatenate([patch, np.expand_dims(mask, -1)], axis=-1)
def resize_patch(patch, coeff):
return cv2.resize(patch.astype(float),
(int(np.round(patch.shape[1] * coeff)),
int(np.round(patch.shape[0] * coeff))))
def print_size_segmented_data(segmented_data, show=True):
"""Print debug information on patch sizes."""
size_max = 0
shape_max = None
size_min = np.infty
shape_min = None
for i, segment in enumerate(segmented_data):
segment = segment.swapaxes(0, 1)
shape_i = segment.shape
size_i = shape_i[0] * shape_i[1]
if size_i > size_max:
shape_max = shape_i
size_max = size_i
if size_i < size_min:
shape_min = shape_i
size_min = size_i
print(f"Patch {i} of shape {shape_i}")
if show:
im_i = cv2.cvtColor(segment, cv2.COLOR_RGBA2BGRA)
im_bgr = im_i[:, :, :3]
im_mask = np.tile(im_i[:, :, 3:], (1, 1, 3))
im_render = np.concatenate([im_bgr, im_mask], 1)
cv2_imshow(im_render)
print(f"{len(segmented_data)} patches, max {shape_max}, min {shape_min}\n")
def get_segmented_data_initial(config):
"""Load patch file and return segmented image data."""
if config["url_to_patch_file"]:
segmented_data_initial = cached_url_download(config["url_to_patch_file"])
else:
repo_file = config["patch_set"]
repo_root = config["patch_repo_root"]
segmented_data_initial = cached_url_download(
f"{repo_root}/{repo_file}")
segmented_data_initial_tmp = []
for i in range(len(segmented_data_initial)):
if segmented_data_initial[i].shape[2] == 3:
segmented_data_initial_tmp.append(add_binary_alpha_mask(
segmented_data_initial[i]))
else:
segmented_data_initial_tmp.append(
segmented_data_initial[i])
segmented_data_initial = segmented_data_initial_tmp
return segmented_data_initial
def normalise_patch_brightness(patch):
max_intensity = max(patch.max(), 1.0)
return ((patch / max_intensity) * 255).astype(np.uint8)
def get_segmented_data(config, index):
"""Generate low and high res patch data for a collage.
Args:
config: dict, config file and command line args
index: int, which subset of options to use when multiple are available.
E.g. selecting patch set based on tile number.
Returns:
numpy arrays: low and high resolution patch data.
"""
# Select tile's patch set and/or parameters if multiple provided.
if ("multiple_patch_set" in config and isinstance(
config["multiple_patch_set"], list) and
config["multiple_patch_set"] != ['null']):
config["patch_set"] = config["multiple_patch_set"][
index % len(config["multiple_patch_set"])]
if ("multiple_fixed_scale_patches" in config and isinstance(
config["multiple_fixed_scale_patches"], list) and
config["multiple_fixed_scale_patches"] != ['null']):
config["fixed_scale_patches"] = config["multiple_fixed_scale_patches"][
index % len(config["multiple_fixed_scale_patches"])] == "True"
if ("multiple_patch_max_proportion" in config and isinstance(
config["multiple_patch_max_proportion"], list) and
config["multiple_patch_max_proportion"] != ['null']):
config["patch_max_proportion"] = int(config[
"multiple_patch_max_proportion"][
index % len(config["multiple_patch_max_proportion"])])
if ("multiple_fixed_scale_coeff" in config and isinstance(
config["multiple_fixed_scale_coeff"], list) and
config["multiple_fixed_scale_coeff"] != ['null']):
config["fixed_scale_coeff"] = float(config["multiple_fixed_scale_coeff"][
index % len(config["multiple_fixed_scale_coeff"])])
segmented_data_initial = get_segmented_data_initial(config)
# Fixed order for the segmented images.
num_patches = len(segmented_data_initial)
order = np.arange(num_patches)
# The following permutes the patches but precludes reloading checkpoints.
# order = np.random.permutation(num_patches)
# Compress all images until they are at most 1/PATCH_MAX_PROPORTION of the
# large canvas size.
canvas_height = config["canvas_height"]
canvas_width = config["canvas_width"]
hires_height = canvas_height * config["high_res_multiplier"]
hires_width = canvas_width * config["high_res_multiplier"]
height_large_max = hires_height / config["patch_max_proportion"]
width_large_max = hires_width / config["patch_max_proportion"]
print(f"Patch set {config['patch_set']}, fixed_scale_patches? "
f"{config['fixed_scale_patches']}, "
f"fixed_scale_coeff={config['fixed_scale_coeff']}, "
f"patch_max_proportion={config['patch_max_proportion']}")
if config["fixed_scale_patches"]:
print(f"Max size for fixed scale patches: ({hires_height},{hires_width})")
else:
print(
f"Max patch size on large img: ({height_large_max}, {width_large_max})")
print(type(config["fixed_scale_patches"]))
segmented_data = []
segmented_data_high_res = []
for patch_i in range(num_patches):
segmented_data_initial_i = segmented_data_initial[
order[patch_i]].astype(np.float32).swapaxes(0, 1)
shape_i = segmented_data_initial_i.shape
h_i = shape_i[0]
w_i = shape_i[1]
if h_i >= config["patch_height_min"] and w_i >= config["patch_width_min"]:
# Coefficient for resizing the patch.
if config["fixed_scale_patches"]:
coeff_i_large = config["fixed_scale_coeff"]
if h_i * coeff_i_large > hires_height:
coeff_i_large = hires_height / h_i
if w_i * coeff_i_large > width_large_max:
coeff_i_large = min(coeff_i_large, hires_width / w_i)
if coeff_i_large != config["fixed_scale_coeff"]:
print(
f"Patch {patch_i} too large; scaled to {coeff_i_large:.2f}")
else:
coeff_i_large = 1.0
if h_i > height_large_max:
coeff_i_large = height_large_max / h_i
if w_i > width_large_max:
coeff_i_large = min(coeff_i_large, width_large_max / w_i)
# Resize the high-res patch?
if coeff_i_large < 1.0:
# print(f"Patch {patch_i} scaled by {coeff_i_large:.2f}")
segmented_data_high_res_i = resize_patch(segmented_data_initial_i,
coeff_i_large)
else:
segmented_data_high_res_i = np.copy(segmented_data_initial_i)
# Resize the low-res patch.
coeff_i = coeff_i_large / config["high_res_multiplier"]
segmented_data_i = resize_patch(segmented_data_initial_i, coeff_i)
shape_i = segmented_data_i.shape
if (shape_i[0] > canvas_height
or shape_i[1] > config["canvas_width"]):
print(f"{shape_i} exceeds canvas ({canvas_height},{canvas_width})")
if config["normalize_patch_brightness"]:
segmented_data_i[..., :3] = normalise_patch_brightness(
segmented_data_i[..., :3])
segmented_data_high_res_i[..., :3] = normalise_patch_brightness(
segmented_data_high_res_i[..., :3])
segmented_data_high_res_i = segmented_data_high_res_i.astype(np.uint8)
segmented_data_high_res.append(segmented_data_high_res_i)
segmented_data_i = segmented_data_i.astype(np.uint8)
segmented_data.append(segmented_data_i)
else:
print(f"Discard patch of size {h_i}x{w_i}")
if SHOW_PATCHES:
print("Patch sizes during optimisation:")
print_size_segmented_data(segmented_data, show=config["gui"])
print("Patch sizes for high-resolution final image:")
print_size_segmented_data(segmented_data_high_res, show=config["gui"])
return segmented_data, segmented_data_high_res
|
arnheim-main
|
arnheim_3/src/patches.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.