python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN Networks."""
from absl.testing import absltest
from absl.testing import parameterized
from enn.networks import utils as networks_utils
from enn.networks.resnet import base
from enn.networks.resnet import lib
import haiku as hk
import jax
_TEST_CONFIGS = (
lib.CanonicalResNets.RESNET_18.value,
lib.CanonicalResNets.RESNET_50.value,
)
class NetworkTest(parameterized.TestCase):
"""Tests for ResNet."""
@parameterized.product(
num_classes=[2, 10],
batch_size=[1, 10],
image_size=[2, 10],
config=_TEST_CONFIGS,
)
def test_forward_pass(
self,
num_classes: int,
batch_size: int,
image_size: int,
config: lib.ResNetConfig,
):
"""Tests forward pass and output shape."""
enn = base.EnsembleResNetENN(
num_output_classes=num_classes,
config=config,
)
rng = hk.PRNGSequence(0)
image_shape = [image_size, image_size, 3]
x = jax.random.normal(next(rng), shape=[batch_size,] + image_shape)
index = enn.indexer(next(rng))
params, state = enn.init(next(rng), x, index)
out, unused_new_state = enn.apply(params, state, x, index)
logits = networks_utils.parse_net_output(out)
self.assertEqual(logits.shape, (batch_size, num_classes))
if __name__ == '__main__':
absltest.main()
|
enn-master
|
enn/networks/resnet/test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ResNet with priors for ImageNet."""
from typing import Sequence
import chex
from enn import base
from enn.networks import base as networks_base
from enn.networks import indexers
from enn.networks.resnet import base as resnet_base
import haiku as hk
import jax
import jax.numpy as jnp
class ResnetMlpPrior(networks_base.EnnArray):
"""Resnet Network with MLP Prior."""
def __init__(self,
num_classes: int,
prior_scale: float = 1.,
hidden_sizes: Sequence[int] = (10,),
is_training: bool = True):
def net_fn(x: chex.Array,
index: base.Index) -> networks_base.OutputWithPrior:
del index
output = resnet_base.resnet_model(num_classes)(x, is_training=is_training)
# MLP Prior
if jax.local_devices()[0].platform == 'tpu':
x = jnp.transpose(x, (3, 0, 1, 2)) # HWCN -> NHWC
x = hk.Flatten()(x)
prior = hk.nets.MLP(list(hidden_sizes) + [num_classes,], name='prior')(x)
return networks_base.OutputWithPrior(
train=output.train, prior=prior_scale * prior, extra=output.extra)
transformed = hk.without_apply_rng(hk.transform_with_state(net_fn))
enn = networks_base.EnnArray(
apply=transformed.apply,
init=transformed.init,
indexer=indexers.EnsembleIndexer(1))
super().__init__(enn.apply, enn.init, enn.indexer)
class ResnetCnnPrior(networks_base.EnnArray):
"""VGG Network with ConvNet Prior."""
def __init__(self,
num_classes: int,
prior_scale: float = 1.,
output_channels: Sequence[int] = (4, 8, 8),
kernel_sizes: Sequence[int] = (10, 10, 3),
strides: Sequence[int] = (5, 5, 2),
is_training: bool = True):
assert len(output_channels) == len(kernel_sizes) == len(strides)
def net_fn(x: chex.Array,
index: base.Index) -> networks_base.OutputWithPrior:
del index
output = resnet_base.resnet_model(num_classes)(x, is_training=is_training)
# CNN Prior
if jax.local_devices()[0].platform == 'tpu':
x = jnp.transpose(x, (3, 0, 1, 2)) # HWCN -> NHWC
for channels, kernel_size, stride in zip(output_channels,
kernel_sizes,
strides,):
x = hk.Conv2D(
output_channels=channels,
kernel_shape=[kernel_size, kernel_size],
stride=stride,
name='prior_conv')(x)
x = jax.nn.relu(x)
x = hk.Flatten()(x)
prior = hk.nets.MLP([num_classes], name='prior')(x)
return networks_base.OutputWithPrior(
train=output.train, prior=prior_scale * prior, extra=output.extra)
transformed = hk.without_apply_rng(hk.transform_with_state(net_fn))
enn = networks_base.EnnArray(
apply=transformed.apply,
init=transformed.init,
indexer=indexers.EnsembleIndexer(1))
super().__init__(enn.apply, enn.init, enn.indexer)
|
enn-master
|
enn/networks/resnet/priors.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Network definitions for ResNet."""
import chex
from enn.networks import base as networks_base
from enn.networks import ensembles
from enn.networks import utils as networks_utils
from enn.networks.resnet import lib
import haiku as hk
import jax
import jax.numpy as jnp
def resnet_model(
num_output_classes: int,
enable_double_transpose: bool = True,
config: lib.ResNetConfig = lib.CanonicalResNets.RESNET_50.value,
) -> lib.ForwardFn:
"""Returns forward network for ResNet."""
model = lib.ResNet(num_output_classes, config)
should_transpose_images = (
enable_double_transpose and jax.local_devices()[0].platform == 'tpu')
def forward_fn(
inputs: chex.Array,
is_training: bool,
test_local_stats: bool = False) -> networks_base.OutputWithPrior:
# If enabled, there should be a matching NHWC->HWCN transpose in the data.
if should_transpose_images:
inputs = jnp.transpose(inputs, (3, 0, 1, 2)) # HWCN -> NHWC
net_out = model(inputs, is_training, test_local_stats)
return networks_utils.parse_to_output_with_prior(net_out)
return forward_fn
class EnsembleResNetENN(networks_base.EnnArray):
"""Ensemble of ResNet Networks created using einsum ensemble."""
def __init__(self,
num_output_classes: int,
num_ensemble: int = 1,
is_training: bool = True,
test_local_stats: bool = False,
enable_double_transpose: bool = True,
config: lib.ResNetConfig = lib.CanonicalResNets.RESNET_50.value):
def net_fn(x: chex.Array) -> networks_base.OutputWithPrior:
forward_fn = resnet_model(num_output_classes=num_output_classes,
enable_double_transpose=enable_double_transpose,
config=config)
net_out = forward_fn(x, is_training, test_local_stats)
return networks_utils.parse_to_output_with_prior(net_out)
transformed = hk.without_apply_rng(hk.transform_with_state(net_fn))
enn = ensembles.EnsembleWithState(transformed, num_ensemble)
super().__init__(enn.apply, enn.init, enn.indexer)
|
enn-master
|
enn/networks/resnet/base.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Helper to get a model given collection of models."""
from typing import Optional, Sequence
import chex
from enn import base
from enn.networks import base as networks_base
from enn.networks import indexers
from enn.networks import mlp
from enn.networks.epinet import base as epinet_base
import haiku as hk
import jax.numpy as jnp
class MLPEpinetWithPrior(epinet_base.EpinetWithState):
"""MLP epinet with matching prior function."""
def __init__(self,
index_dim: int,
num_classes: int,
epinet_hiddens: Sequence[int],
prior_epinet_hiddens: Optional[Sequence[int]] = None,
prior_scale: float = 1,
drop_inputs: bool = False):
"""Defines an MLP epinet with matching prior function."""
if prior_epinet_hiddens is None:
prior_epinet_hiddens = epinet_hiddens
def epinet_fn(inputs: chex.Array,
index: base.Index,
hidden: chex.Array) -> networks_base.OutputWithPrior:
# Creating networks
train_epinet = mlp.ProjectedMLP(
epinet_hiddens, num_classes, index_dim, name='train_epinet')
prior_epinet = mlp.ProjectedMLP(
prior_epinet_hiddens, num_classes, index_dim, name='prior_epinet')
if drop_inputs:
epi_inputs = hidden
else:
flat_inputs = hk.Flatten()(inputs)
epi_inputs = jnp.concatenate([hidden, flat_inputs], axis=1)
# Wiring networks: add linear epinet (+ prior) from final output layer.
epi_train = train_epinet(epi_inputs, index)
epi_prior = prior_epinet(epi_inputs, index)
return networks_base.OutputWithPrior(
train=epi_train,
prior=prior_scale * epi_prior,
)
# Form ENN from haiku transformed.
transformed = hk.without_apply_rng(hk.transform_with_state(epinet_fn))
indexer = indexers.GaussianIndexer(index_dim)
super().__init__(transformed.apply, transformed.init, indexer)
def parse_base_hidden(
base_out: networks_base.Output,
hidden_name: str = 'final_out',
) -> chex.Array:
"""Parses the final hidden layer from the base network output."""
# TODO(author2): improve type checking on base_out
assert isinstance(base_out, networks_base.OutputWithPrior)
assert hidden_name in base_out.extra
return base_out.extra[hidden_name]
|
enn-master
|
enn/networks/epinet/last_layer.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Public methods for epinet."""
# Base
from enn.networks.epinet.base import BaseHiddenParser
from enn.networks.epinet.base import combine_base_epinet_as_enn
from enn.networks.epinet.base import EpinetApplyWithState
from enn.networks.epinet.base import EpinetInitWithState
from enn.networks.epinet.base import EpinetWithState
# last_layer
from enn.networks.epinet.last_layer import MLPEpinetWithPrior
from enn.networks.epinet.last_layer import parse_base_hidden
# ResNet
from enn.networks.epinet.mlp import make_mlp_epinet
# Prior
from enn.networks.epinet.priors import combine_epinet_and_prior
from enn.networks.epinet.priors import make_cifar_conv_prior
from enn.networks.epinet.priors import make_imagenet_conv_prior
from enn.networks.epinet.priors import make_imagenet_mlp_prior
# ResNet
from enn.networks.epinet.resnet import make_checkpoint_from_config
from enn.networks.epinet.resnet import ResnetFinalEpinet
from enn.networks.epinet.resnet import ResnetFinalEpinetConfig
|
enn-master
|
enn/networks/epinet/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Network definitions for epinet.
Trying to fork out some reusable pieces for the code.
"""
from typing import Optional, Sequence
import chex
from enn import base
from enn.networks import base as networks_base
from enn.networks import indexers
from enn.networks import mlp
import haiku as hk
import jax
def make_mlp_epinet(
output_sizes: Sequence[int],
epinet_hiddens: Sequence[int],
index_dim: int,
expose_layers: Optional[Sequence[bool]] = None,
prior_scale: float = 1.,
stop_gradient: bool = False,
name: Optional[str] = None,
) -> networks_base.EnnArray:
"""Factory method to create a standard MLP epinet."""
if name is None:
prefix = ''
else:
prefix = name + '_'
def net_fn(x: chex.Array, z: base.Index) -> networks_base.OutputWithPrior:
base_mlp = mlp.ExposedMLP(
output_sizes, expose_layers, name=prefix+'base_mlp')
num_classes = output_sizes[-1]
train_epinet = mlp.ProjectedMLP(
epinet_hiddens, num_classes, index_dim, name=prefix+'train_epinet')
prior_epinet = mlp.ProjectedMLP(
epinet_hiddens, num_classes, index_dim, name=prefix+'prior_epinet')
base_out = base_mlp(x)
features = base_out.extra['exposed_features']
if stop_gradient:
epi_inputs = jax.lax.stop_gradient(features)
else:
epi_inputs = features
epi_train = train_epinet(epi_inputs, z)
epi_prior = prior_epinet(epi_inputs, z)
return networks_base.OutputWithPrior(
train=base_out.train + epi_train,
prior=prior_scale * epi_prior,
)
transformed = hk.without_apply_rng(hk.transform_with_state(net_fn))
return networks_base.EnnArray(
apply=transformed.apply,
init=transformed.init,
indexer=indexers.GaussianIndexer(index_dim),
)
|
enn-master
|
enn/networks/epinet/mlp.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for epinet derived from resnet."""
import dataclasses
from typing import Callable, Optional, Sequence
from enn.checkpoints import base as checkpoints_base
from enn.checkpoints import epinet as checkpoints_epinet
from enn.networks import base as networks_base
from enn.networks import priors as enn_priors
from enn.networks.epinet import base as epinet_base
from enn.networks.epinet import last_layer
from enn.networks.epinet import priors
PriorFnCtor = Callable[[], enn_priors.PriorFn]
@dataclasses.dataclass
class ResnetFinalEpinetConfig:
"""Configuration options for ResNet + final layer MLP epinet."""
base_checkpoint: checkpoints_base.EnnCheckpoint
index_dim: int
num_classes: int
epinet_hiddens: Sequence[int]
prior_epinet_hiddens: Optional[Sequence[int]] = None
base_logits_scale: float = 1
epi_prior_scale: float = 1
add_prior_scale: float = 1
prior_fn_ctor: Optional[PriorFnCtor] = None
freeze_base: bool = True
parse_hidden: epinet_base.BaseHiddenParser = last_layer.parse_base_hidden
seed: int = 23
class ResnetFinalEpinet(networks_base.EnnArray):
"""ResNet + final layer MLP epinet."""
def __init__(self, config: ResnetFinalEpinetConfig):
epinet_pieces = _make_enn_from_config(config)
enn = epinet_pieces.enn
super().__init__(enn.apply, enn.init, enn.indexer)
def make_checkpoint_from_config(
name: str,
load_fn: checkpoints_base.ParamsStateLoadFn,
config: ResnetFinalEpinetConfig,
tuned_eval_temperature: Optional[float] = None,
) -> checkpoints_epinet.EpinetCheckpoint:
"""Returns an EpinetCheckpoint from ResnetFinalEpinetConfig.
Args:
name: string identifier for checkpoint.
load_fn: gives params, state for epinet. Base network init from base_cpt.
config: ResnetFinalEpinetConfig, which includes base_cpt as component.
tuned_eval_temperature: Optional temperature rescaling for evaluation.
"""
return checkpoints_epinet.EpinetCheckpoint(
name=name,
load_fn=load_fn,
epinet_ctor=lambda: _make_enn_from_config(config).epinet,
parse_hidden=config.parse_hidden,
base_cpt=config.base_checkpoint,
base_scale=config.base_logits_scale,
tuned_eval_temperature=tuned_eval_temperature,
)
@dataclasses.dataclass
class _EpinetPieces:
"""Wraps the key components necessary to create either ENN or checkpoint."""
enn: networks_base.EnnArray # Entire network (base+epi).
epinet: epinet_base.EpinetWithState # Epinet applied on top of base net.
def _make_enn_from_config(config: ResnetFinalEpinetConfig) -> _EpinetPieces:
"""Wires together the epinet."""
# Make the last layer epinet
epinet = last_layer.MLPEpinetWithPrior(
index_dim=config.index_dim,
num_classes=config.num_classes,
epinet_hiddens=config.epinet_hiddens,
prior_epinet_hiddens=config.prior_epinet_hiddens,
prior_scale=config.epi_prior_scale,
drop_inputs=True,
)
if config.prior_fn_ctor:
# Form the extra additive prior functions
prior_fn = config.prior_fn_ctor()
# Combined epinet is epinet_head with additive prior
epinet = priors.combine_epinet_and_prior(
epinet, prior_fn, config.add_prior_scale)
# Form the ENN by combining them all
enn = epinet_base.combine_base_epinet_as_enn(
base_checkpoint=config.base_checkpoint,
epinet=epinet,
parse_hidden=config.parse_hidden,
base_scale=config.base_logits_scale,
freeze_base=config.freeze_base,
)
return _EpinetPieces(enn, epinet)
|
enn-master
|
enn/networks/epinet/resnet.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions for epinet and priors.
WARNING: NOT GOLD QUALITY YET - WORK IN PROGRESS.
"""
from typing import Optional, Sequence, Tuple
import chex
from enn import base
from enn.networks import base as networks_base
from enn.networks import einsum_mlp
from enn.networks import ensembles
from enn.networks import priors
from enn.networks.epinet import base as epinet_base
import haiku as hk
import jax
import jax.numpy as jnp
def combine_epinet_and_prior(
epinet: epinet_base.EpinetWithState,
prior_fn: priors.PriorFn,
prior_scale: float = 1,
) -> epinet_base.EpinetWithState:
"""Combines epinet and prior_fn to give a new epinet."""
def apply(
params: hk.Params, # Epinet parameters
state: hk.State, # Epinet state
inputs: chex.Array, # ENN inputs = x
index: base.Index, # ENN index = z
hidden: chex.Array, # Base net hiddens = phi(x)
) -> Tuple[networks_base.OutputWithPrior, hk.State]:
epi_out, epi_state = epinet.apply(params, state, inputs, index, hidden)
prior_out = prior_fn(inputs, index)
combined_out = networks_base.OutputWithPrior(
train=epi_out.train,
prior=epi_out.prior + prior_out * prior_scale,
extra=epi_out.extra
)
return combined_out, epi_state
return epinet_base.EpinetWithState(apply, epinet.init, epinet.indexer)
def make_imagenet_mlp_prior(
num_ensemble: int = 1,
hidden_sizes: Sequence[int] = (50, 50),
num_classes: int = 1000,
seed: int = 23) -> priors.PriorFn:
"""Combining a few mlps as prior function."""
# Note that this returns a callable function and no parameters are exposed.
rng = hk.PRNGSequence(seed)
output_sizes = list(hidden_sizes) + [num_classes,]
def net_fn(x):
# We need to transpose images to match the double-transpose-trick we have in
# the ImageNet dataset loader (enn/datasets/imagenet.py).
if jax.local_devices()[0].platform == 'tpu':
x = jnp.transpose(x, (3, 0, 1, 2)) # HWCN -> NHWC
x = hk.avg_pool(x, window_shape=10, strides=5, padding='VALID')
model = einsum_mlp.EnsembleMLP(output_sizes, num_ensemble)
return model(x)
transformed = hk.without_apply_rng(hk.transform(net_fn))
dummy_input = jnp.ones(shape=(1, 224, 224, 3))
if jax.local_devices()[0].platform == 'tpu':
dummy_input = jnp.transpose(dummy_input, (1, 2, 3, 0)) # NHWC -> HWCN
params = transformed.init(next(rng), dummy_input)
prior_fn = lambda x, z: jnp.dot(transformed.apply(params, x), z)
return jax.jit(prior_fn)
def make_imagenet_conv_prior(
num_ensemble: int = 1,
num_classes: int = 1000,
seed: int = 23,
output_channels: Sequence[int] = (4, 8, 8),
kernel_shapes: Sequence[int] = (10, 10, 3),
strides: Sequence[int] = (5, 5, 2),
) -> priors.PriorFn:
"""Combining a few conv nets as prior function."""
# Note that this returns a callable function and no parameters are exposed.
rng = hk.PRNGSequence(seed)
assert len(output_channels) == len(kernel_shapes) == len(strides)
def conv_net(x):
# We need to transpose images to match the double-transpose-trick we have in
# the ImageNet dataset loader (enn/datasets/imagenet.py).
if jax.local_devices()[0].platform == 'tpu':
x = jnp.transpose(x, (3, 0, 1, 2)) # HWCN -> NHWC
for channels, kernel_shape, stride in zip(output_channels,
kernel_shapes,
strides,):
x = hk.Conv2D(output_channels=channels,
kernel_shape=kernel_shape,
stride=stride,
name='prior_conv')(x)
x = jax.nn.relu(x)
x = hk.Flatten()(x)
return hk.nets.MLP([num_classes], name='prior')(x)
transformed = hk.without_apply_rng(hk.transform(conv_net))
ensemble = ensembles.Ensemble(model=transformed, num_ensemble=num_ensemble)
dummy_index = ensemble.indexer(next(rng))
dummy_input = jnp.ones(shape=(1, 224, 224, 3))
if jax.local_devices()[0].platform == 'tpu':
dummy_input = jnp.transpose(dummy_input, (1, 2, 3, 0)) # NHWC -> HWCN
params = ensemble.init(next(rng), dummy_input, dummy_index)
def prior_fn(x: chex.Array, z: chex.Array) -> chex.Array:
out = [ensemble.apply(params, x, index) for index in range(num_ensemble)]
out = jnp.stack(out, axis=-1)
return jnp.dot(out, z)
return jax.jit(prior_fn)
def make_cifar_conv_prior(
num_ensemble: int = 1,
num_classes: int = 10,
seed: int = 23,
output_channels: Sequence[int] = (4, 8, 4),
kernel_shapes: Optional[Sequence[int]] = None,
) -> priors.PriorFn:
"""Combining a few conv nets as prior function."""
rng = hk.PRNGSequence(seed)
if kernel_shapes is None:
kernel_shapes = [[5, 5]] * len(output_channels)
assert len(output_channels) == len(kernel_shapes)
def conv_net(x):
x = jax.image.resize(x, [x.shape[0], 32, 32, 3], method='bilinear')
for i, channels in enumerate(output_channels):
x = hk.Conv2D(output_channels=channels,
kernel_shape=kernel_shapes[i],
stride=2, name='prior_conv')(x)
x = jax.nn.relu(x)
x = hk.Flatten()(x)
return hk.nets.MLP([num_classes], name='prior')(x)
transformed = hk.without_apply_rng(hk.transform(conv_net))
ensemble = ensembles.Ensemble(model=transformed, num_ensemble=num_ensemble)
dummy_index = ensemble.indexer(next(rng))
dummy_input = jnp.ones(shape=(4, 32, 32, 3))
params = ensemble.init(next(rng), dummy_input, dummy_index)
def prior_fn(x: chex.Array, z: chex.Array) -> chex.Array:
out = [ensemble.apply(params, x, index) for index in range(num_ensemble)]
out = jnp.stack(out, axis=-1)
return jnp.dot(out, z)
return jax.jit(prior_fn)
|
enn-master
|
enn/networks/epinet/priors.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Epinet ENN."""
from typing import Sequence
from absl.testing import absltest
from absl.testing import parameterized
from enn import supervised
from enn.networks.epinet import mlp
class EpinetTest(parameterized.TestCase):
@parameterized.product(
base_hiddens=[[], [10, 10]],
epinet_hiddens=[[], [10, 10]],
index_dim=[1, 3],
regression=[True, False]
)
def test_mlp_epinet(self,
base_hiddens: Sequence[int],
epinet_hiddens: Sequence[int],
index_dim: int,
regression: bool):
"""Test that the MLP epinet runs."""
test_experiment = supervised.make_test_experiment(regression)
enn = mlp.make_mlp_epinet(
output_sizes=list(base_hiddens) + [test_experiment.num_outputs,],
epinet_hiddens=epinet_hiddens,
index_dim=index_dim,
)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
|
enn-master
|
enn/networks/epinet/mlp_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for epinet."""
import dataclasses
from typing import Callable, Optional, Tuple
import chex
from enn import base
from enn.checkpoints import base as checkpoint_base
from enn.networks import base as networks_base
from enn.networks import utils as networks_utils
import haiku as hk
import jax
from typing_extensions import Protocol
class EpinetApplyWithState(Protocol):
"""Applies the epinet at given parameters and state."""
def __call__(
self,
params: hk.Params, # Epinet parameters
state: hk.State, # Epinet state
inputs: chex.Array, # ENN inputs = x
index: base.Index, # ENN index = z
hidden: chex.Array, # Base net hiddens = phi(x)
) -> Tuple[networks_base.OutputWithPrior, hk.State]:
"""Applies the epinet at given parameters and state."""
class EpinetInitWithState(Protocol):
"""Initializes epinet parameters and state."""
def __call__(
self,
key: chex.PRNGKey, # Random key
inputs: chex.Array, # ENN inputs = x
index: base.Index, # ENN index = z
hidden: chex.Array, # Base net hiddens = phi(x)
) -> Tuple[hk.Params, hk.State]:
"""Initializes epinet parameters and state."""
@dataclasses.dataclass
class EpinetWithState:
"""Convenient pairing of Haiku transformed function and index sampler."""
apply: EpinetApplyWithState
init: EpinetInitWithState
indexer: base.EpistemicIndexer
BaseHiddenParser = Callable[[networks_base.Output], chex.Array]
def combine_base_epinet_as_enn(
base_checkpoint: checkpoint_base.EnnCheckpoint,
epinet: EpinetWithState,
parse_hidden: BaseHiddenParser,
base_index: Optional[base.Index] = None,
base_scale: float = 1,
freeze_base: bool = True,
) -> networks_base.EnnArray:
"""Returns a combined ENN from a base network and an epinet.
Args:
base_checkpoint: checkpoint of base model ENN.
epinet: Epinet to be combined.
parse_hidden: Function to obtain hidden representation from base_out.
base_index: Optional index applied to base_enn, otherwise seed=0.
base_scale: rescale output of the base network by this.
freeze_base: If True, then the only parameters/state returned will be
specific just to the epinet. If False, then the parameters/state are
combined with those of the base network. Useful for finetuning.
"""
# TODO(author2): Add testing to this function.
# Parse the base network from checkpoint
base_params_init, base_state_init = base_checkpoint.load_fn()
base_enn = base_checkpoint.enn_ctor()
if base_index is None:
base_index = base_enn.indexer(jax.random.PRNGKey(0))
def apply(
params: hk.Params,
state: hk.State,
inputs: chex.Array,
index: base.Index,
) -> Tuple[networks_base.OutputWithPrior, hk.State]:
"""Applies the base network and epinet."""
if freeze_base:
base_params, base_state = base_params_init, base_state_init
else:
base_params, base_state = params, state
# Forward the base network
base_out, base_state = base_enn.apply(
base_params, base_state, inputs, base_index)
base_out = networks_utils.parse_to_output_with_prior(base_out)
# Forward the epinet and combine their outputs
epi_out, epi_state = epinet.apply(
params, state, inputs, index, parse_hidden(base_out))
output = networks_base.OutputWithPrior(
train=base_out.train * base_scale + epi_out.train,
prior=base_out.prior * base_scale + epi_out.prior,
)
state = epi_state if freeze_base else {**base_state, **epi_state}
return output, state
def init(key: chex.PRNGKey,
inputs: chex.Array,
index: base.Index) -> Tuple[hk.Params, hk.State]:
"""Initializes the epinet."""
base_out, unused_base_state = base_enn.apply(
base_params_init, base_state_init, inputs, base_index)
params, state = epinet.init(
key, inputs, index, parse_hidden(base_out))
if freeze_base:
return params, state
else:
return {**params, **base_params_init}, {**state, **base_state_init}
return networks_base.EnnArray(apply, init, epinet.indexer)
|
enn-master
|
enn/networks/epinet/base.py
|
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing different enns in the library."""
from enn.networks.bert.base import BertApply
from enn.networks.bert.base import BertConfig
from enn.networks.bert.base import BertConfigs
from enn.networks.bert.base import BertEnn
from enn.networks.bert.base import BertInit
from enn.networks.bert.base import BertInput
from enn.networks.bert.bert import BERT
from enn.networks.bert.bert import make_bert_enn
from enn.networks.bert.cls_heads import AgentConfig
from enn.networks.bert.cls_heads import CommonOutputLayer
from enn.networks.bert.cls_heads import make_baseline_head_enn
from enn.networks.bert.cls_heads import make_head_enn
# TODO(author3): Remove this and redirect dependencies to enn/networks.
from enn.networks.combiners import combine_naive_enn
from enn.networks.combiners import make_optimized_forward
|
enn-master
|
enn/networks/bert/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for BERT ENN."""
from absl.testing import absltest
from absl.testing import parameterized
from enn import datasets
from enn.networks import utils as networks_utils
from enn.networks.bert import base
from enn.networks.bert import bert
import haiku as hk
import jax
import jax.numpy as jnp
def _fake_data(seed: int,
num_train: int,
num_classes: int,
sequence_len: int) -> datasets.ArrayBatch:
"""Generates a fake dataset."""
rng = hk.PRNGSequence(seed)
token_ids = jax.random.randint(
next(rng), [num_train, sequence_len], 0, 10_000)
segment_ids = jnp.zeros([num_train, sequence_len], jnp.int32)
input_mask = jnp.ones([num_train, sequence_len], jnp.int32)
batch_start = jax.random.randint(next(rng), [], 0, 1_000_000)
data_index = jnp.arange(num_train) + batch_start
return datasets.ArrayBatch(
x=base.BertInput(token_ids, segment_ids, input_mask),
y=jax.random.randint(next(rng), [num_train], 0, num_classes),
data_index=data_index,
)
class NetworkTest(parameterized.TestCase):
"""Tests for the BERT model."""
@parameterized.product(
output_size=[3, 6],
num_train=[1, 10],
is_training=[True, False],
)
def test_forward_pass(
self,
output_size: int,
num_train: int,
is_training: bool,
):
"""Tests forward pass and output shape."""
bert_config = base.BertConfig(
vocab_size=128,
num_hidden_layers=2,
num_attention_heads=3,
hidden_size=output_size,
)
bert_enn = bert.make_bert_enn(
bert_config=bert_config, is_training=is_training
)
fake_batch = _fake_data(
seed=0,
num_train=num_train,
num_classes=output_size,
sequence_len=128,
)
rng = hk.PRNGSequence(0)
index = bert_enn.indexer(next(rng))
params, state = bert_enn.init(next(rng), fake_batch.x, index)
out, unused_new_state = bert_enn.apply(params, state, fake_batch.x, index)
logits = networks_utils.parse_net_output(out)
self.assertEqual(logits.shape, (num_train, output_size))
if __name__ == '__main__':
absltest.main()
|
enn-master
|
enn/networks/bert/test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Last layer classification heads for BERT model."""
import dataclasses
from typing import Optional, Sequence, Tuple
import chex
from enn import base as enn_base
from enn.networks import base as networks_base
from enn.networks import dropout
from enn.networks import einsum_mlp
from enn.networks import epinet
from enn.networks import indexers
import haiku as hk
import jax
import jax.numpy as jnp
@dataclasses.dataclass
class AgentConfig:
"""Agent configuration."""
hiddens: Sequence[int] = (50, 50)
# For ensemble agent
num_ensemble: int = 10
# For dropout agent
dropout_rate: float = 0.1
# For epinet agent
index_dim: int = 30
prior_scale: float = 1
def make_head_enn(
agent: str,
num_classes: int,
agent_config: Optional[AgentConfig] = None) -> networks_base.EnnArray:
"""Returns a last layer (head) enn."""
if agent_config is None:
agent_config = AgentConfig()
output_sizes = list(agent_config.hiddens) + [num_classes]
if agent == 'epinet':
# We don't want to expose any layers. This means that only the inputs are
# passed to epinet.
expose_layers = [False] * len(output_sizes)
return epinet.make_mlp_epinet(output_sizes=output_sizes,
epinet_hiddens=agent_config.hiddens,
index_dim=agent_config.index_dim,
expose_layers=expose_layers,
prior_scale=agent_config.prior_scale,
stop_gradient=True)
elif agent == 'ensemble':
return einsum_mlp.make_einsum_ensemble_mlp_enn(
output_sizes=output_sizes,
num_ensemble=agent_config.num_ensemble,
)
elif agent == 'dropout':
return dropout.MLPDropoutENN(
output_sizes=output_sizes,
dropout_rate=agent_config.dropout_rate,
dropout_input=False,
)
else:
raise ValueError(f'Invalid agent: {agent}!')
def make_baseline_head_enn(
num_classes: int, is_training: bool
) -> networks_base.EnnArray:
"""Makes an enn of the baseline classifier head."""
def net_fn(inputs: chex.Array) -> networks_base.OutputWithPrior:
"""Forwards the network."""
classification_layer = CommonOutputLayer(
num_classes=num_classes,
use_extra_projection=False,
dropout_rate=0.1,
)
outputs = classification_layer(inputs, is_training=is_training)
# Wrap in ENN output layer
return networks_base.OutputWithPrior(outputs, prior=jnp.zeros_like(outputs))
# Transformed has the rng input, which we need to change --> index.
transformed = hk.transform_with_state(net_fn)
def apply(
params: hk.Params,
state: hk.State,
inputs: chex.Array,
index: enn_base.Index,
) -> Tuple[networks_base.OutputWithPrior, hk.State]:
return transformed.apply(params, state, index, inputs)
def init(rng_key: chex.PRNGKey,
inputs: chex.Array,
index: enn_base.Index) -> Tuple[hk.Params, hk.State]:
del index # rng_key is duplicated in this case.
return transformed.init(rng_key, inputs)
return networks_base.EnnArray(apply, init, indexers.PrngIndexer())
class CommonOutputLayer(hk.Module):
"""Finetuning layer for downstream tasks.
This is the Haiku module of the classifier implemented in tensorflow here:
https://github.com/google-research/bert/blob/master/run_classifier.py#L574
"""
def __init__(
self,
num_classes: int,
dropout_rate: float = 0.1,
use_extra_projection: bool = True,
name: str = 'output_layer',
):
"""Initialises the module.
Args:
num_classes: Number of classes in the downstream task.
dropout_rate: Dropout rate.
use_extra_projection: Whether to add an extra linear layer with a tanh
activation is added before computing the output value.
name: Haiku module name.
"""
super().__init__(name=name)
self._num_classes = num_classes
self._dropout_rate = dropout_rate
self._use_extra_projection = use_extra_projection
def __call__(
self,
inputs: jax.Array,
is_training: bool = True,
) -> jax.Array:
"""Compute the classification logits.
Args:
inputs: A tensor of shape (B, d_model) containing a summary of the
sequence to regress
is_training: `bool` if True dropout is applied.
Returns:
A tensor of shape (B,) containing the regressed values.
"""
output = inputs
if self._use_extra_projection:
d_model = output.shape[-1]
output = hk.Linear(
d_model,
w_init=hk.initializers.RandomNormal(stddev=0.02))(output)
output = jnp.tanh(output)
if is_training:
output = hk.dropout(
rng=hk.next_rng_key(), rate=self._dropout_rate, x=output)
output = hk.Linear(
self._num_classes,
w_init=hk.initializers.RandomNormal(stddev=0.02))(output)
return output
|
enn-master
|
enn/networks/bert/cls_heads.py
|
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A JAX implementation of BERT."""
import typing as tp
import chex
from enn import base as enn_base
from enn.networks import base as networks_base
from enn.networks import indexers
from enn.networks.bert import base
import haiku as hk
import jax
import jax.numpy as jnp
# BERT layer norm uses
# github.com/google-research/tf-slim/blob/master/tf_slim/layers/layers.py#L2346
TF_LAYERNORM_EPSILON = 1e-12
def make_bert_enn(
bert_config: base.BertConfig,
is_training: bool,
) -> base.BertEnn:
"""Makes the BERT model as an ENN with state."""
def net_fn(inputs: base.BertInput) -> networks_base.OutputWithPrior:
"""Forwards the network (no index)."""
hidden_drop = bert_config.hidden_dropout_prob if is_training else 0.
att_drop = bert_config.attention_probs_dropout_prob if is_training else 0.
bert_model = BERT(
vocab_size=bert_config.vocab_size,
hidden_size=bert_config.hidden_size,
num_hidden_layers=bert_config.num_hidden_layers,
num_attention_heads=bert_config.num_attention_heads,
intermediate_size=bert_config.intermediate_size,
hidden_dropout_prob=hidden_drop,
attention_probs_dropout_prob=att_drop,
max_position_embeddings=bert_config.max_position_embeddings,
type_vocab_size=bert_config.type_vocab_size,
initializer_range=bert_config.initializer_range,
)
# Embed and summarize the sequence.
return bert_model( # pytype: disable=wrong-arg-types # jax-devicearray
input_ids=inputs.token_ids,
token_type_ids=inputs.segment_ids,
input_mask=inputs.input_mask.astype(jnp.int32),
is_training=is_training,
)
# Transformed has the rng input, which we need to change --> index.
transformed = hk.transform_with_state(net_fn)
def apply(
params: hk.Params,
state: hk.State,
inputs: base.BertInput,
index: enn_base.Index, # BERT operates with an RNG-key index.
) -> tp.Tuple[networks_base.OutputWithPrior, hk.State]:
key = index
return transformed.apply(params, state, key, inputs)
def init(rng_key: chex.PRNGKey,
inputs: base.BertInput,
index: enn_base.Index) -> tp.Tuple[hk.Params, hk.State]:
del index # rng_key is duplicated in this case.
return transformed.init(rng_key, inputs)
return base.BertEnn(apply, init, indexers.PrngIndexer())
class BERT(hk.Module):
"""BERT as a Haiku module.
This is the Haiku module of the BERT model implemented in tensorflow here:
https://github.com/google-research/bert/blob/master/modeling.py#L107
"""
def __init__(
self,
vocab_size: int,
hidden_size: int,
num_hidden_layers: int,
num_attention_heads: int,
intermediate_size: int,
hidden_dropout_prob: float,
attention_probs_dropout_prob: float,
max_position_embeddings: int,
type_vocab_size: int,
initializer_range: float,
name: str = 'BERT',
):
super().__init__(name=name)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.size_per_head = hidden_size // num_attention_heads
def _bert_layer(
self,
layer_input: jax.Array,
layer_index: int,
input_mask: jax.Array,
is_training: bool,
) -> jax.Array:
"""Forward pass of a single layer."""
*batch_dims, seq_length, hidden_size = layer_input.shape
queries = hk.Linear(
self.hidden_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='query_%d' % layer_index)(
layer_input)
keys = hk.Linear(
self.hidden_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='keys_%d' % layer_index)(
layer_input)
values = hk.Linear(
self.hidden_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='values_%d' % layer_index)(
layer_input)
btnh = (*batch_dims, seq_length, self.num_attention_heads,
self.size_per_head)
queries = jnp.reshape(queries, btnh)
keys = jnp.reshape(keys, btnh)
values = jnp.reshape(values, btnh)
# Attention scores.
attention_scores = jnp.einsum('...tnh,...fnh->...nft', keys, queries)
attention_scores *= self.size_per_head**(-0.5)
# attention_scores shape: [..., num_heads, num_attending, num_attended_over]
# Broadcast the input mask along heads and query dimension.
# If a key/value location is pad, do not attend over it.
# Do that by plunging the attention logit to negative infinity.
bcast_shape = list(input_mask.shape[:-1]) + [1, 1, input_mask.shape[-1]]
input_mask_broadcasted = jnp.reshape(input_mask, bcast_shape)
attention_mask = -1. * 1e30 * (1.0 - input_mask_broadcasted)
attention_scores += attention_mask
attention_probs = jax.nn.softmax(attention_scores)
if is_training:
attention_probs = hk.dropout(hk.next_rng_key(),
self.attention_probs_dropout_prob,
attention_probs)
# Weighted sum.
attention_output = jnp.einsum('...nft,...tnh->...fnh', attention_probs,
values)
attention_output = jnp.reshape(
attention_output, (*batch_dims, seq_length, hidden_size))
# Projection to hidden size.
attention_output = hk.Linear(
self.hidden_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='attention_output_dense_%d' % layer_index)(
attention_output)
if is_training:
attention_output = hk.dropout(hk.next_rng_key(), self.hidden_dropout_prob,
attention_output)
attention_output = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
eps=TF_LAYERNORM_EPSILON,
name='attention_output_ln_%d' % layer_index)(
attention_output + layer_input)
# FFW.
intermediate_output = hk.Linear(
self.intermediate_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='intermediate_output_%d' % layer_index)(
attention_output)
intermediate_output = jax.nn.gelu(intermediate_output)
layer_output = hk.Linear(
self.hidden_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='layer_output_%d' % layer_index)(
intermediate_output)
if is_training:
layer_output = hk.dropout(hk.next_rng_key(), self.hidden_dropout_prob,
layer_output)
layer_output = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
eps=TF_LAYERNORM_EPSILON,
name='layer_output_ln_%d' % layer_index)(
layer_output + attention_output)
return layer_output
def __call__(
self,
input_ids: jax.Array,
token_type_ids: tp.Optional[jax.Array] = None,
input_mask: tp.Optional[jax.Array] = None,
is_training: bool = True,
) -> networks_base.OutputWithPrior:
"""Forward pass of the BERT model."""
# Prepare size, fill out missing inputs.
*_, seq_length = input_ids.shape
if input_mask is None:
input_mask = jnp.ones(shape=input_ids.shape, dtype=jnp.int32)
if token_type_ids is None:
token_type_ids = jnp.zeros(shape=input_ids.shape, dtype=jnp.int32)
position_ids = jnp.arange(seq_length)[None, :]
# Embeddings.
word_embedder = hk.Embed(
vocab_size=self.vocab_size,
embed_dim=self.hidden_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='word_embeddings')
word_embeddings = word_embedder(input_ids)
token_type_embeddings = hk.Embed(
vocab_size=self.type_vocab_size,
embed_dim=self.hidden_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='token_type_embeddings')(
token_type_ids)
position_embeddings = hk.Embed(
vocab_size=self.max_position_embeddings,
embed_dim=self.hidden_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='position_embeddings')(
position_ids)
input_embeddings = (
word_embeddings + token_type_embeddings + position_embeddings)
input_embeddings = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
eps=TF_LAYERNORM_EPSILON,
name='embeddings_ln')(
input_embeddings)
if is_training:
input_embeddings = hk.dropout(
hk.next_rng_key(), self.hidden_dropout_prob, input_embeddings)
# BERT layers.
h = input_embeddings
extra = {}
for i in range(self.num_hidden_layers):
h = self._bert_layer(
h, layer_index=i, input_mask=input_mask, is_training=is_training)
extra[f'hidden_layer_{i}'] = h
last_layer = h
# Masked language modelling logprobs.
mlm_hidden = hk.Linear(
self.hidden_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='mlm_dense')(last_layer)
mlm_hidden = jax.nn.gelu(mlm_hidden)
mlm_hidden = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
eps=TF_LAYERNORM_EPSILON,
name='mlm_ln')(mlm_hidden)
output_weights = jnp.transpose(word_embedder.embeddings)
logits = jnp.matmul(mlm_hidden, output_weights)
logits = hk.Bias(bias_dims=[-1], name='mlm_bias')(logits)
log_probs = jax.nn.log_softmax(logits, axis=-1)
# Pooled output: [CLS] token.
first_token_last_layer = last_layer[..., 0, :]
pooled_output = hk.Linear(
self.hidden_size,
w_init=hk.initializers.TruncatedNormal(self.initializer_range),
name='pooler_dense')(
first_token_last_layer)
pooled_output = jnp.tanh(pooled_output)
extra['logits'] = logits
extra['log_probs'] = log_probs
extra['pooled_output'] = pooled_output
return networks_base.OutputWithPrior(
train=pooled_output, prior=jnp.zeros_like(pooled_output), extra=extra)
|
enn-master
|
enn/networks/bert/bert.py
|
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base for BERT model."""
import enum
import typing as tp
import chex
from enn import base as enn_base
from enn.networks import base as networks_base
import numpy as np
class BertInput(tp.NamedTuple):
"""Input for the BERT model."""
token_ids: np.ndarray
segment_ids: np.ndarray
input_mask: np.ndarray
extra: tp.Dict[str, chex.Array] = {} # You can put other optional stuff here
# Enn modules specialized to work with BertInput.
BertEnn = enn_base.EpistemicNetwork[BertInput, networks_base.OutputWithPrior]
BertApply = enn_base.ApplyFn[BertInput, networks_base.OutputWithPrior]
BertInit = enn_base.InitFn[BertInput]
# Minimal BertConfig copied from
# https://github.com/google-research/bert/blob/master/modeling.py#L31
class BertConfig:
"""Configuration for the BERT Model."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act='gelu',
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
def bert_small() -> BertConfig:
"""Config for small BERT with ~110M params."""
return BertConfig(
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
hidden_dropout_prob=0.1,
hidden_size=768,
initializer_range=0.02,
intermediate_size=3072,
max_position_embeddings=512,
num_attention_heads=12,
num_hidden_layers=12,
type_vocab_size=2,
vocab_size=30522,
)
def bert_large() -> BertConfig:
"""Config for large BERT with ~340M params."""
return BertConfig(
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
hidden_dropout_prob=0.1,
hidden_size=1024,
initializer_range=0.02,
intermediate_size=4096,
max_position_embeddings=512,
num_attention_heads=16,
num_hidden_layers=24,
type_vocab_size=2,
vocab_size=30522)
class BertConfigs(enum.Enum):
"""Configs for BERT models."""
BERT_SMALL: BertConfig = bert_small() # ~110M params
BERT_LARGE: BertConfig = bert_large() # ~340M params
|
enn-master
|
enn/networks/bert/base.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Epistemic neural networks for uncertainty representation."""
|
enn-master
|
enn/opensource/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of the losses."""
# Base
from enn.data_noise.base import DataNoise
from enn.data_noise.base import DataNoiseBase
# Boostrapping
from enn.data_noise.bootstrapping import BootstrapNoise
# Gaussian noise
from enn.data_noise.gaussian import GaussianTargetNoise
|
enn-master
|
enn/data_noise/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for generating bootstrap weights in JAX.
Note that we *may* want to pull this into a separate library from the ENN.
"""
import dataclasses
from typing import Callable, Optional, Sequence, Union
from absl import logging
import chex
from enn import base
from enn import datasets
from enn import networks
from enn.data_noise import base as data_noise_base
import jax
import jax.numpy as jnp
import typing_extensions
_ENN = Union[networks.EnnNoState,
networks.EnnArray]
@dataclasses.dataclass
class BootstrapNoise(data_noise_base.DataNoise):
"""Apply bootstrap reweighting to a batch of data."""
# TODO(author5): just pass indexer instead of the enn
enn: _ENN
distribution: str
seed: int = 0
def __call__(self, data: datasets.ArrayBatch,
index: base.Index) -> datasets.ArrayBatch:
"""Apply bootstrap reweighting to a batch of data."""
boot_fn = make_boot_fn(self.enn, self.distribution, self.seed)
boot_weights = boot_fn(data.data_index, index)
return dataclasses.replace(data, weights=boot_weights)
################################################################################
# BootstrapFn reweights data based on epistemic index
BatchWeights = chex.Array # Bootstrap weights for each datapoint
BootstrapFn = Callable[[datasets.DataIndex, base.Index], BatchWeights]
# TODO(author2): Currently all functions written assuming batch dimensions.
# but it might be more elegant to rework the vmap and instead define for one
# example at a time.
# batch_weights = boot_fn(data_index, index) # (batch_size, 1) shape
# TODO(author2): Refactor batch_weights to be just (batch_size,) shape.
class WeightFn(typing_extensions.Protocol):
"""Interface for weight-generating functions."""
def __call__(
self,
rng_key: chex.PRNGKey,
indices: Optional[Sequence[int]] = None,
) -> jax.Array:
...
DISTRIBUTIONS = {
'poisson': lambda x, shape=(): jax.random.poisson(x, 1, shape=shape),
'exponential':
lambda x, shape=(): jax.random.exponential(x, shape=shape),
'bernoulli':
lambda x, shape=(): 2 * jax.random.bernoulli(x, 0.5, shape=shape),
'uniform': lambda x, shape=(): 2 * jax.random.uniform(x, shape=shape),
}
def null_bootstrap(data_index: datasets.DataIndex,
index: base.Index) -> BatchWeights:
"""Null bootstrap does not reweight the data at all."""
del index
chex.assert_shape(data_index, (None, 1))
return jnp.ones_like(data_index)
# TODO(author5): Pass just the indexer instead of entire enn
def make_boot_fn(enn: _ENN,
distribution: str,
seed: int = 0) -> BootstrapFn:
"""Factory method to create bootstrap for given ENN and distribution."""
indexer = data_noise_base.get_indexer(enn.indexer)
# None works as a special case for no function
if distribution == 'none' or distribution is None:
return null_bootstrap
# Bootstrapping for ensemble/discrete options
if isinstance(indexer, networks.EnsembleIndexer):
if distribution not in DISTRIBUTIONS:
raise ValueError(f'dist={distribution} not implemented for ensemble.')
weight_fn = DISTRIBUTIONS[distribution]
return _make_ensemble_bootstrap_fn(weight_fn, seed)
# Bootstrapping for Gaussian with unit index
elif isinstance(indexer, networks.GaussianWithUnitIndexer):
index_dim = indexer.index_dim
logging.warning(
'WARNING: indexer is in development, bootstrap may not be correct.')
if distribution == 'bernoulli':
return _make_gaussian_index_bernoulli_bootstrap(index_dim, seed)
else:
raise ValueError(
f'dist={distribution} not implemented for GaussianWithUnitIndexer.')
# Bootstrapping for Gaussian index
elif isinstance(indexer, networks.GaussianIndexer):
index_dim = indexer.index_dim
if distribution == 'bernoulli':
return _make_gaussian_index_bernoulli_bootstrap(index_dim, seed)
elif distribution == 'exponential':
return _make_gaussian_index_exponential_bootstrap(
index_dim, seed, 1/jnp.sqrt(index_dim))
else:
raise ValueError(
f'dist={distribution} not implemented for GaussianIndexer.')
# Bootstrapping for Scaled Gaussian index
elif isinstance(indexer, networks.ScaledGaussianIndexer):
index_dim = indexer.index_dim
if distribution == 'bernoulli':
return _make_gaussian_index_bernoulli_bootstrap(index_dim, seed)
elif distribution == 'exponential':
return _make_gaussian_index_exponential_bootstrap(index_dim, seed, 1)
else:
raise ValueError(
f'dist={distribution} not implemented for GaussianIndexer.')
# Bootstrapping for PRNG index
elif isinstance(indexer, networks.PrngIndexer):
if distribution not in DISTRIBUTIONS:
raise ValueError(f'dist={distribution} not implemented for gauss_enn.')
weight_fn = DISTRIBUTIONS[distribution]
return _make_prng_bootstrap_fn(weight_fn)
else:
raise ValueError(
f'Bootstrapping for EpistemicIndexer={indexer} not implemented.')
def _make_prng_bootstrap_fn(weight_fn: WeightFn) -> BootstrapFn:
"""Factory method for bootstrap with PRNG index."""
def boot_fn(data_index: datasets.DataIndex, index: base.Index):
chex.assert_shape(data_index, (None, 1))
boot_weights = weight_fn(index, data_index.shape)
chex.assert_shape(boot_weights, (None, 1))
return boot_weights
return boot_fn
def _make_key(data_index: chex.Array, seed: int) -> chex.PRNGKey:
"""Creates RngKeys for a batch of data index."""
chex.assert_shape(data_index, (None, 1))
return jax.vmap(jax.random.PRNGKey)(jnp.squeeze(data_index, axis=1) + seed)
def _make_ensemble_bootstrap_fn(
weight_fn: WeightFn, seed: int = 0) -> BootstrapFn:
"""Factory method to create bootstrapping function with ensemble index.
Args:
weight_fn: weight distribution function e.g. jax.random.exponential.
seed: Optional integer added to the data_keys
Returns:
BootstrapFn appropriate for ensemble = assumes integer index.
"""
fold_in = jax.vmap(jax.random.fold_in)
weight_fn = jax.vmap(weight_fn)
def boot_fn(data_index: datasets.DataIndex, index: base.Index):
"""Assumes integer index for ensemble weights."""
chex.assert_shape(data_index, (None, 1))
if not index.shape: # If it's a single integer -> repeat for batch
index = jnp.repeat(index, len(data_index))
data_keys = _make_key(data_index, seed)
rng_keys = fold_in(data_keys, index)
return weight_fn(rng_keys)[:, None]
return boot_fn
def _make_gaussian_index_exponential_bootstrap(
index_dim: int,
seed: int = 0,
scale: float = 1,
fold_seed: int = 666) -> BootstrapFn:
"""Factory method to create the approximate exponential weighting."""
fold_in = jax.vmap(jax.random.fold_in, in_axes=[0, None])
std_gauss = lambda x: jax.random.normal(x, [index_dim]) * scale
sample_std_gaussian = jax.vmap(std_gauss)
def boot_fn(data_index: datasets.DataIndex, index: base.Index):
"""Assumes integer index for ensemble weights."""
chex.assert_shape(data_index, (None, 1))
b_keys = _make_key(data_index, seed)
b = sample_std_gaussian(b_keys)
c_keys = fold_in(b_keys, fold_seed)
c = sample_std_gaussian(c_keys)
batch_size = data_index.shape[0]
z = jnp.repeat(jnp.expand_dims(index, 0), batch_size, axis=0)
weights = 0.5 * (jnp.sum(b * z, axis=1) ** 2 + jnp.sum(c * z, axis=1) ** 2)
return weights[:, None]
return boot_fn
def _make_gaussian_index_bernoulli_bootstrap(
index_dim: int,
seed: int = 0) -> BootstrapFn:
"""Factory method to create the approximate bernoulli weighting."""
std_gauss = lambda x: jax.random.normal(x, [index_dim]) / jnp.sqrt(index_dim)
sample_std_gaussian = jax.vmap(std_gauss)
def boot_fn(data_index: datasets.DataIndex, index: base.Index):
"""Assumes integer index for ensemble weights."""
chex.assert_shape(data_index, (None, 1))
b_keys = _make_key(data_index, seed)
b = sample_std_gaussian(b_keys)
batch_size = data_index.shape[0]
z = jnp.repeat(jnp.expand_dims(index, 0), batch_size, axis=0)
weights = 1. + jnp.sign(jnp.sum(b * z, axis=1))
return weights[:, None]
return boot_fn
|
enn-master
|
enn/data_noise/bootstrapping.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for enn.bootstrapping."""
from absl.testing import absltest
from absl.testing import parameterized
from enn import base
from enn import networks
from enn.data_noise import bootstrapping
import jax
import numpy as np
class BootstrappingTest(parameterized.TestCase):
@parameterized.parameters([
[networks.EnsembleIndexer(10), 'poisson'],
[networks.EnsembleIndexer(10), 'bernoulli'],
[networks.EnsembleIndexer(10), 'exponential'],
[networks.EnsembleIndexer(10), 'uniform'],
[networks.EnsembleIndexer(10), 'none'],
[networks.GaussianWithUnitIndexer(10), 'bernoulli'],
[networks.ScaledGaussianIndexer(10), 'bernoulli'],
[networks.ScaledGaussianIndexer(10), 'exponential'],
[networks.PrngIndexer(), 'poisson'],
[networks.PrngIndexer(), 'bernoulli'],
[networks.PrngIndexer(), 'exponential'],
[networks.PrngIndexer(), 'uniform'],
])
def test_average_weight_approx_one(self,
indexer: base.EpistemicIndexer,
distribution: str):
"""Check that the average weight of bootstrap sample approximately one."""
seed = 999
num_data = 10_000
tolerance = 1 # TODO(author2): Test fails at lower tolerance --> fix.
def init_fn(k, x, z):
del k, x, z
return {'lin': {'w': np.ones(1), 'b': np.ones(1)}}
fake_enn = networks.EnnNoState(
apply=lambda p, x, z: np.ones(1)[:, None],
init=init_fn,
indexer=indexer,
)
boot_fn = bootstrapping.make_boot_fn(fake_enn, distribution, seed=seed)
index = fake_enn.indexer(jax.random.PRNGKey(seed))
data_index = np.arange(num_data)[:, None]
batch_weights = jax.jit(boot_fn)(data_index, index)
# Check the quality of the bootstrap weights
assert np.all(batch_weights >= 0)
assert np.abs(1 - np.mean(batch_weights)) < tolerance
if __name__ == '__main__':
absltest.main()
|
enn-master
|
enn/data_noise/bootstrapping_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for data noise process."""
from enn import base
from enn.datasets import base as ds_base
import typing_extensions
class DataNoiseBase(typing_extensions.Protocol[base.Data]):
def __call__(
self,
data: base.Data,
index: base.Index,
) -> base.Data:
"""Apply some noise process to a batch of data based on epistemic index."""
# DataNoiseBase specialized to work only with Batch data.
DataNoise = DataNoiseBase[ds_base.ArrayBatch]
def get_indexer(indexer: base.EpistemicIndexer):
while hasattr(indexer, 'indexer'):
indexer = indexer.indexer
return indexer
|
enn-master
|
enn/data_noise/base.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for perturbing data with Gaussian noise."""
import dataclasses
from typing import Callable, Union
import chex
from enn import base
from enn import datasets
from enn import networks
from enn.data_noise import base as data_noise_base
import jax
import jax.numpy as jnp
_ENN = Union[networks.EnnNoState,
networks.EnnArray]
@dataclasses.dataclass
class GaussianTargetNoise(data_noise_base.DataNoise):
"""Apply Gaussian noise to the target y."""
enn: _ENN
noise_std: float
seed: int = 0
def __call__(self, data: datasets.ArrayBatch,
index: base.Index) -> datasets.ArrayBatch:
"""Apply Gaussian noise to the target y."""
chex.assert_shape(data.y, (None, 1)) # Only implemented for 1D now.
noise_fn = make_noise_fn(self.enn, self.noise_std, self.seed)
y_noise = noise_fn(data.data_index, index)
return dataclasses.replace(data, y=data.y + y_noise)
NoiseFn = Callable[[datasets.DataIndex, base.Index],
chex.Array]
def make_noise_fn(enn: _ENN,
noise_std: float,
seed: int = 0) -> NoiseFn:
"""Factory method to create noise_fn for given ENN."""
indexer = data_noise_base.get_indexer(enn.indexer)
if isinstance(indexer, networks.EnsembleIndexer):
return _make_ensemble_gaussian_noise(noise_std, seed)
elif isinstance(indexer, networks.GaussianIndexer):
return _make_gaussian_index_noise(indexer.index_dim, noise_std, seed)
elif isinstance(indexer, networks.ScaledGaussianIndexer):
return _make_scaled_gaussian_index_noise(indexer.index_dim, noise_std, seed)
elif isinstance(indexer, networks.GaussianWithUnitIndexer):
# Ignore the first component which is always 1 and not Gaussian.
effective_index_dim = indexer.index_dim - 1
raw_noise = _make_scaled_gaussian_index_noise(
effective_index_dim, noise_std, seed)
noise_fn = lambda d, z: raw_noise(d, z[1:]) # Don't include unit component.
return noise_fn
else:
raise ValueError(f'Unsupported ENN={enn}.')
def _make_key(data_index: chex.Array, seed: int) -> chex.PRNGKey:
"""Creates RngKeys for a batch of data index."""
chex.assert_shape(data_index, (None, 1))
return jax.vmap(jax.random.PRNGKey)(jnp.squeeze(data_index, axis=1) + seed)
def _make_ensemble_gaussian_noise(noise_std: float, seed: int) -> NoiseFn:
"""Factory method to add Gaussian noise for ensemble index."""
batch_fold_in = jax.vmap(jax.random.fold_in)
batch_normal = jax.vmap(jax.random.normal)
def noise_fn(data_index: datasets.DataIndex,
index: base.Index) -> chex.Array:
"""Assumes integer index for ensemble."""
chex.assert_shape(data_index, (None, 1))
if not index.shape: # If it's a single integer -> repeat for batch
index = jnp.repeat(index, len(data_index))
data_keys = _make_key(data_index, seed)
batch_keys = batch_fold_in(data_keys, index)
samples = batch_normal(batch_keys)[:, None]
chex.assert_equal_shape([samples, data_index])
return samples * noise_std
return noise_fn
def _make_scaled_gaussian_index_noise(
index_dim: int,
noise_std: float,
seed: int) -> NoiseFn:
"""Factory method to add Gaussian noise for index MLP."""
std_gauss = lambda x: jax.random.normal(x, [index_dim])
sample_std_gaussian = jax.vmap(std_gauss)
def noise_fn(data_index: datasets.DataIndex,
index: base.Index) -> chex.Array:
"""Assumes scaled Gaussian index with reserved first component."""
chex.assert_shape(data_index, (None, 1))
b_keys = _make_key(data_index, seed)
b = sample_std_gaussian(b_keys)
# Expanding the index to match the batch
batch_size = data_index.shape[0]
z = jnp.repeat(jnp.expand_dims(index, 0), batch_size, axis=0)
chex.assert_shape(z, [batch_size, index_dim])
noise = jnp.sum(b * z, axis=1, keepdims=True) * noise_std
chex.assert_equal_shape([noise, data_index])
return noise
return noise_fn
def _make_gaussian_index_noise(
index_dim: int,
noise_std: float,
seed: int,
) -> NoiseFn:
"""Factory method for Gaussian indexer."""
def sample_sphere(key: chex.PRNGKey) -> chex.Array:
x = jax.random.normal(key, shape=[index_dim])
return x / jnp.sqrt(jnp.sum(x ** 2))
batch_sample_sphere = jax.vmap(sample_sphere)
def noise_fn(data_index: datasets.DataIndex,
index: base.Index) -> chex.Array:
"""Assumes scaled Gaussian index with reserved first component."""
chex.assert_shape(data_index, (None, 1))
b_keys = _make_key(data_index, seed)
b = batch_sample_sphere(b_keys)
# Expanding the index to match the batch
batch_size = data_index.shape[0]
z = jnp.repeat(jnp.expand_dims(index, 0), batch_size, axis=0)
chex.assert_shape(z, [batch_size, index_dim])
noise = jnp.sum(b * z, axis=1, keepdims=True) * noise_std
chex.assert_equal_shape([noise, data_index])
return noise
return noise_fn
|
enn-master
|
enn/data_noise/gaussian.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN Networks."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from enn.extra import kmeans
import haiku as hk
import jax
import jax.numpy as jnp
class KmeansTest(parameterized.TestCase):
@parameterized.product(
num_x=[10, 100],
dim_x=[1, 5],
num_centroids=[1, 3],
)
def test_kmeans_runs(self, num_x: int, dim_x: int, num_centroids: int):
"""Test that KMeans clustering runs and has no nan."""
rng = hk.PRNGSequence(999)
x = jax.random.normal(next(rng), [num_x, dim_x])
kmeans_cluster = kmeans.KMeansCluster(
num_centroids=num_centroids,
num_iterations=100,
key=next(rng),
)
output = kmeans_cluster.fit(x)
chex.assert_shape(output.centroids, [num_centroids, dim_x])
chex.assert_shape(output.counts_per_centroid, [num_centroids])
chex.assert_shape(output.std_distance, [num_centroids])
assert jnp.all(jnp.isfinite(output.centroids))
assert jnp.all(jnp.isfinite(output.counts_per_centroid))
assert jnp.all(jnp.isfinite(output.std_distance))
if __name__ == '__main__':
absltest.main()
|
enn-master
|
enn/extra/kmeans_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for VAE."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from enn import utils
from enn.extra import vae
class VaeTest(parameterized.TestCase):
@parameterized.product(bernoulli_decoder=[True, False], latent_dim=[1, 3])
def test_vae_outputs(self, bernoulli_decoder: bool, latent_dim: int):
"""Train a VAE on a test dataset and test encoder decoder functions."""
dataset = utils.make_test_data(10)
data = next(dataset)
num_train, input_dim = data.x.shape
config = vae.MLPVAEConfig(hidden_sizes=[5, 2],
latent_dim=latent_dim,
bernoulli_decoder=bernoulli_decoder,
num_batches=100,
batch_size=10)
trained_vae = vae.get_mlp_vae_encoder_decoder(
data_x=data.x, config=config)
latents = trained_vae.encoder(data.x)
chex.assert_shape([latents.mean, latents.log_var],
(num_train, config.latent_dim))
reconstructions = trained_vae.decoder(latents.mean)
chex.assert_shape([reconstructions.mean, reconstructions.log_var],
(num_train, input_dim))
if __name__ == '__main__':
absltest.main()
|
enn-master
|
enn/extra/vae_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods."""
# Kmeans
from enn.extra.kmeans import KMeansCluster
from enn.extra.kmeans import KMeansOutput
# VAE
from enn.extra.vae import get_mlp_vae_encoder_decoder
from enn.extra.vae import MeanLogVariance
from enn.extra.vae import MLPVAEConfig
from enn.extra.vae import TrainedVAE
|
enn-master
|
enn/extra/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Jax implementation of KMeans clustering."""
import dataclasses
from typing import NamedTuple, Tuple
import chex
import jax
import jax.numpy as jnp
class KMeansOutput(NamedTuple):
centroids: chex.Array # Centroids found by algorithm: [num_centoid, dim_x]
counts_per_centroid: chex.Array # Counts per centroid: [num_centroid]
std_distance: chex.Array # Std distance to centroid: [num_centroid]
classes: chex.Array # Cluster index of data: [num_data_samples]
@dataclasses.dataclass
class KMeansCluster:
"""Performs KMeans clustering on data."""
num_centroids: int
num_iterations: int
key: chex.PRNGKey
def fit(self, x: chex.Array) -> KMeansOutput:
"""Fits KMeans cluster to given data."""
# Initialize centroids randomly
random_idx = jax.random.choice(
self.key, x.shape[0], [self.num_centroids], replace=False)
initial_centroids = x[random_idx, :]
initial_state = _TrainingState(initial_centroids, iter=0)
# Perfom KMeans via jax.lax.while_loop
cond_fn = lambda state: state.iter < self.num_iterations
body_fn = lambda state: kmeans_iteration(x, state)
final_state = jax.lax.while_loop(cond_fn, body_fn, initial_state)
return jax.jit(compute_output)(x, final_state)
class _TrainingState(NamedTuple):
centroids: chex.Array # Centroids: [num_centroid, dim_x]
iter: int # Training iteration
def get_classes_and_distances(
x: chex.Array, centroids: chex.Array) -> Tuple[chex.Array, chex.Array]:
"""Assigns x to nearest centroid and computes distance to each centroid."""
chex.assert_rank([x, centroids], 2)
num_x, dim_x = x.shape
num_centroids, dim_centroids = centroids.shape
chex.assert_equal(dim_x, dim_centroids)
norm_per_x_per_class = jax.vmap(jax.vmap(jnp.linalg.norm))
distances = norm_per_x_per_class(
jnp.expand_dims(centroids, 0) - jnp.expand_dims(x, 1))
chex.assert_shape(distances, (num_x, num_centroids))
classes = jnp.argmin(distances, axis=1)
chex.assert_shape(classes, [num_x])
return classes, distances
def _safe_divide(numerator: chex.Array, denominator: chex.Array) -> chex.Array:
safe_denom = jnp.maximum(denominator, 1e-6)
return numerator / safe_denom
def kmeans_iteration(x: chex.Array, state: _TrainingState) -> _TrainingState:
"""Performs one iteration of kmeans clustering."""
num_x, dim_x = x.shape
num_centroids = state.centroids.shape[0]
# Form one-hot masks
classes, _ = get_classes_and_distances(x, centroids=state.centroids)
one_hot_centroids = jax.nn.one_hot(classes, num_centroids)
chex.assert_shape(one_hot_centroids, [num_x, num_centroids])
# Take mean over classes for new centroids.
masked_x = x[:, None, :] * one_hot_centroids[:, :, None]
chex.assert_shape(masked_x, [num_x, num_centroids, dim_x])
sum_per_centroid = jnp.sum(masked_x, axis=0)
count_per_centroid = jnp.sum(one_hot_centroids, axis=0)
new_centroids = _safe_divide(sum_per_centroid, count_per_centroid[:, None])
chex.assert_shape(new_centroids, [num_centroids, dim_x])
return _TrainingState(new_centroids, state.iter + 1)
def compute_output(x: chex.Array, state: _TrainingState) -> KMeansOutput:
"""Parse the final output, which includes std per class."""
# Pulling out shapes
num_centroids = state.centroids.shape[0]
# Computing distances
classes, distances = get_classes_and_distances(x, state.centroids)
one_hot_centroids = jax.nn.one_hot(classes, num_centroids)
chex.assert_equal_shape([distances, one_hot_centroids])
# Std per class
counts_per_centroid = jnp.sum(one_hot_centroids, axis=0)
masked_sq_distances = jnp.square(one_hot_centroids * distances)
total_sq_distance_per_class = jnp.sum(masked_sq_distances, axis=0)
std_distance = _safe_divide(total_sq_distance_per_class, counts_per_centroid)
return KMeansOutput(state.centroids, counts_per_centroid,
std_distance, classes)
|
enn-master
|
enn/extra/kmeans.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Jax implementation of Variational Autoencoder (VAE)."""
import dataclasses
from typing import Callable, NamedTuple, Sequence
import chex
from enn import base
from enn import datasets
from enn import losses
from enn import networks
from enn import supervised
from enn import utils
import haiku as hk
import jax
from jax import numpy as jnp
import optax
class MeanLogVariance(NamedTuple):
mean: chex.Array # Mean value output
log_var: chex.Array # Log of variance (same shape as mean).
PreTransformFn = Callable[[chex.Array], MeanLogVariance]
PostTransformFn = Callable[[chex.Array], MeanLogVariance]
class TrainedVAE(NamedTuple):
encoder: PostTransformFn # Maps inputs to mean, log_var in latent
decoder: PostTransformFn # Maps latent to mean, log_var in reconstruction
@dataclasses.dataclass
class MLPVAEConfig:
"""Configures training for an MLP VAE."""
hidden_sizes: Sequence[int] = (256, 64)
latent_dim: int = 2
activation: Callable[[chex.Array], chex.Array] = jax.nn.tanh
bernoulli_decoder: bool = True
num_batches: int = 10_000
batch_size: int = 1_000
learning_rate: float = 1e-3
def get_mlp_vae_encoder_decoder(
data_x: chex.Array,
config: MLPVAEConfig = MLPVAEConfig(),
) -> TrainedVAE:
"""Trains an MLP VAE on given data according to config."""
_, input_dim = data_x.shape
def mlp_encoder(x: chex.Array) -> MeanLogVariance:
"""Encoder for VAE. Outputs mean and log_variance in latent space."""
x = hk.Flatten()(x)
for hidden_size in config.hidden_sizes:
x = hk.Linear(hidden_size, name='encoder')(x)
x = config.activation(x)
mean = hk.Linear(config.latent_dim, name='encoder_mean')(x)
log_var = hk.Linear(config.latent_dim, name='encoder_log_var')(x)
return MeanLogVariance(mean, log_var)
def mlp_decoder(x: chex.Array) -> MeanLogVariance:
"""Decoder for VAE. Outputs mean, log_var for an input in latent space."""
for hidden_size in config.hidden_sizes[::-1]:
x = hk.Linear(hidden_size, name='decoder')(x)
x = config.activation(x)
mean = hk.Linear(input_dim, name='decoder_mean')(x)
if config.bernoulli_decoder:
log_var = jnp.zeros_like(mean)
else:
log_var = hk.Linear(input_dim, name='decoder_log_var')(x)
return MeanLogVariance(mean, log_var)
# Train the VAE
return train_vae(
encoder=mlp_encoder,
decoder=mlp_decoder,
latent_dim=config.latent_dim,
data_x=data_x,
log_likelihood_fn=losses.get_log_likelihood_fn(config.bernoulli_decoder),
optimizer=optax.adam(config.learning_rate),
num_batches=config.num_batches,
batch_size=config.batch_size,
)
def make_vae_enn(encoder: PreTransformFn, decoder: PreTransformFn,
latent_dim: int) -> networks.EnnArray:
"""Factory method to create and transform ENN from encoder/decoder."""
def net_fn(x: chex.Array, z: base.Index) -> networks.OutputWithPrior:
# Encoder
latent_mean, latent_log_var = encoder(x)
chex.assert_shape([latent_mean, latent_log_var], [x.shape[0], latent_dim])
# Generate a random vector based on encoder outputs
latent_std = jnp.exp(0.5 * latent_log_var)
latent = latent_mean + jnp.einsum('bi,i->bi', latent_std, z)
# Decoder
out_mean, out_log_var = decoder(latent)
vae_outputs = {'latent_mean': latent_mean, 'latent_log_var': latent_log_var,
'out_mean': out_mean, 'out_log_var': out_log_var}
return networks.OutputWithPrior(train=out_mean, extra=vae_outputs)
transformed = hk.without_apply_rng(hk.transform_with_state(net_fn))
indexer = networks.GaussianIndexer(latent_dim)
return networks.EnnArray(transformed.apply, transformed.init, indexer)
def train_vae(
encoder: PreTransformFn,
decoder: PreTransformFn,
latent_dim: int,
data_x: chex.Array,
log_likelihood_fn: losses.LogLikelihoodFn,
optimizer: optax.GradientTransformation,
num_batches: int = 10_000,
batch_size: int = 1_000,
) -> TrainedVAE:
"""Given a vae and data, this function outputs trained encoder, decoder."""
num_train, input_dim = data_x.shape
dummy_y = jnp.zeros(shape=(num_train,))
dataset = utils.make_batch_iterator(
datasets.ArrayBatch(x=data_x, y=dummy_y), batch_size
)
# Create loss function
single_loss = losses.VaeLoss(log_likelihood_fn, losses.latent_kl_fn)
loss_fn = losses.average_single_index_loss(single_loss, num_index_samples=1)
# Train VAE by gradient descent for num_batches and extract parameters.
experiment = supervised.Experiment(
enn=make_vae_enn(encoder, decoder, latent_dim),
loss_fn=loss_fn,
optimizer=optimizer,
dataset=dataset,
train_log_freq=max(int(num_batches / 100), 1),
)
experiment.train(num_batches)
params = experiment.state.params
# Form an encoder function from these parameters
transformed_encoder = hk.without_apply_rng(hk.transform(encoder))
def encoder_fn(x: chex.Array) -> MeanLogVariance:
latent = transformed_encoder.apply(params, x)
chex.assert_shape([latent.mean, latent.log_var], (x.shape[0], latent_dim))
return latent
# Form an encoder function from these parameters
transformed_decoder = hk.without_apply_rng(hk.transform(decoder))
def decoder_fn(x: chex.Array) -> MeanLogVariance:
reconstruction = transformed_decoder.apply(params, x)
chex.assert_shape([reconstruction.mean, reconstruction.log_var],
(x.shape[0], input_dim))
return reconstruction
return TrainedVAE(jax.jit(encoder_fn), jax.jit(decoder_fn))
|
enn-master
|
enn/extra/vae.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""JAX module for normalization with accumulated statistics."""
import haiku as hk
import jax.numpy as jnp
import jraph
class AccumulatedNormalizer(hk.Module):
"""Feature normalizer that accumulates statistics for normalization.
It will accumulate statistics using float32 variables, and will return
the mean and std. It accumulates statistics until the accumulate method is
called `max_num_accumulations` times or the total number of batch elements
processed is below `max_example_count`.
To enable full GPU compatibility the number of accumulations is stored as a
float32. As this number is incremented one by one, we require
`max_num_accumulations` to be smaller than the highest float32 number that
maintains integer precision (16777216).
"""
def __init__(
self,
*,
std_epsilon: float = 1e-5,
name: str = 'accumulated_normalizer',
):
"""Inits the module.
Args:
std_epsilon: minimum value of the standard deviation to use.
name: Name of the module.
"""
super().__init__(name=name)
self._accumulator_shape = None
self._std_epsilon = std_epsilon
def __call__(self, batched_data: jnp.array) -> jnp.ndarray:
"""Direct transformation of the normalizer."""
self._set_accumulator_shape(batched_data)
return (batched_data - self.mean) / self.std_with_epsilon
def inverse(self, normalized_batch_data: jnp.ndarray) -> jnp.ndarray:
"""Inverse transformation of the normalizer."""
self._set_accumulator_shape(normalized_batch_data)
return normalized_batch_data * self.std_with_epsilon + self.mean
def _set_accumulator_shape(self, batched_sample_data: jnp.ndarray):
self._accumulator_shape = batched_sample_data.shape[-1]
def _verify_module_connected(self):
if self._accumulator_shape is None:
raise RuntimeError(
'Trying to read the mean before connecting the module.')
@property
def _acc_sum(self):
return hk.get_state(
'acc_sum', self._accumulator_shape, dtype=jnp.float32, init=jnp.zeros)
@property
def _acc_count(self):
return hk.get_state('acc_count', (), dtype=jnp.float32, init=jnp.zeros)
@property
def _acc_sum_squared(self):
return hk.get_state(
'acc_sum_squared',
self._accumulator_shape,
dtype=jnp.float32,
init=jnp.zeros)
@property
def _safe_count(self):
# To ensure count is at least one and avoid nan's.
return jnp.maximum(self._acc_count, 1.)
@property
def mean(self):
self._verify_module_connected()
return self._acc_sum / self._safe_count
@property
def std(self):
self._verify_module_connected()
var = self._acc_sum_squared / self._safe_count - self.mean**2
var = jnp.maximum(var, 0.) # Prevent negatives due to numerical precision.
return jnp.sqrt(var)
@property
def std_with_epsilon(self):
# To use in case the std is too small.
return jnp.maximum(self.std, self._std_epsilon)
class GraphElementsNormalizer(hk.Module):
"""Online normalization of individual graph components of a GraphsTuple.
Can be used to normalize individual node, edge, and global arrays.
"""
def __init__(self,
template_graph: jraph.GraphsTuple,
is_padded_graph: bool,
name: str = 'graph_elements_normalizer'):
"""Inits the module.
Args:
template_graph: Input template graph to compute edge/node/global padding
masks.
is_padded_graph: Whether the graph has padding.
name: Name of the Haiku module.
"""
super().__init__(name=name)
self._node_mask = None
self._edge_mask = None
self._graph_mask = None
if is_padded_graph:
self._node_mask = jraph.get_node_padding_mask(template_graph)
self._edge_mask = jraph.get_edge_padding_mask(template_graph)
self._graph_mask = jraph.get_graph_padding_mask(template_graph)
self._names_used = []
def _run_normalizer(
self, name: str, array: jnp.array, mask: jnp.array
) -> jnp.array:
if name in self._names_used:
raise ValueError(
f'Attempt to reuse name {name}. Used names: {self._names_used}')
self._names_used.append(name)
normalizer = AccumulatedNormalizer(name=name)
return normalizer(array)
def normalize_node_array(self, name: str, array: jnp.array) -> jnp.array:
return self._run_normalizer(name, array, self._node_mask)
def normalize_edge_array(self, name: str, array: jnp.array) -> jnp.array:
return self._run_normalizer(name, array, self._edge_mask)
|
gnn_single_rigids-main
|
gnn_single_rigids/src/normalizers.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for featurizing graph network and running dynamics."""
from typing import Tuple
import jax.numpy as jnp
import jraph
import numpy as np
import scipy.linalg
import scipy.spatial.transform
import tree
from gnn_single_rigids.src import normalizers
def flatten_features(
input_graph: jraph.GraphsTuple,
is_padded_graph: bool,
floor_clamp_dist: float,
floor_height: float = 0.0) -> jraph.GraphsTuple:
"""Returns GraphsTuple with a single array of features per node/edge type."""
# Normalize the elements of the graph.
normalizer = normalizers.GraphElementsNormalizer(
template_graph=input_graph, is_padded_graph=is_padded_graph)
output_nodes = {}
output_edges = {}
# Extract important features from the position_sequence.
position_sequence = input_graph.nodes["world_position"]
input_nodes = input_graph.nodes
velocity_sequence = time_diff(position_sequence) # Finite-difference.
# Collect node features.
node_feats = []
# Normalized velocity sequence, flattening spatial axis.
flat_velocity_sequence = jnp.reshape(velocity_sequence,
[velocity_sequence.shape[0], -1])
# Normalize velocity and add to features
node_feats.append(
normalizer.normalize_node_array("velocity", flat_velocity_sequence),)
# External mask.
node_feats.append(input_nodes["external_mask"][:, None].astype(jnp.float32))
# Distance to the floor.
floor_dist = input_nodes["world_position"][:, -1, 2:3]
floor_dist = jnp.clip(floor_dist - floor_height, a_max=floor_clamp_dist)
node_feats.append(normalizer.normalize_node_array("floor_dist", floor_dist))
# Rest position
mesh_position = input_nodes["mesh_position"]
node_feats.append(
normalizer.normalize_node_array("mesh_position", mesh_position),)
# global position
node_position = input_nodes["world_position"][:, -1]
output_nodes = jnp.concatenate(node_feats, axis=-1)
# mesh edges
mesh_edge_feats = []
# add relative edge distances + norm
rel_dist = (
node_position[input_graph.receivers] -
node_position[input_graph.senders])
mesh_edge_feats.append(
normalizer.normalize_edge_array("rel_dist", rel_dist),)
norm = safe_edge_norm(rel_dist, input_graph, is_padded_graph, keepdims=True)
mesh_edge_feats.append(
normalizer.normalize_edge_array("rel_dist_norm", norm))
# add relative rest edge distances + norm
rel_dist = (
mesh_position[input_graph.receivers] -
mesh_position[input_graph.senders])
mesh_edge_feats.append(
normalizer.normalize_edge_array("rest_dist", rel_dist))
norm = safe_edge_norm(rel_dist, input_graph, is_padded_graph, keepdims=True)
mesh_edge_feats.append(
normalizer.normalize_edge_array("rest_dist_norm", norm))
# flatten features for graph network
output_edges = jnp.concatenate(mesh_edge_feats, axis=-1)
return input_graph._replace(nodes=output_nodes, edges=output_edges)
def time_diff(input_sequence):
"""Returns time difference between successive timepoints."""
return jnp.diff(input_sequence, axis=1)
def safe_edge_norm(
array: jnp.array,
graph: jraph.GraphsTuple,
is_padded_graph: bool,
keepdims=False,
) -> jnp.array:
"""Compute vector norm, preventing nans in padding elements."""
# In the padding graph all edges are connected to the same node with the
# same position, this means that when computing the norm of the relative
# distances we end up with situations f(x) = norm(x-x). The gradient of this
# function should be 0. However, when applying backprop the norm function is
# not differentiable at zero. To avoid this, we simply add an epsilon to the
# padding graph.
if is_padded_graph:
padding_mask = jraph.get_edge_padding_mask(graph)
epsilon = 1e-8
perturb = jnp.logical_not(padding_mask) * epsilon
array += jnp.expand_dims(perturb, range(1, len(array.shape)))
return jnp.linalg.norm(array, axis=-1, keepdims=keepdims)
def _shape_matching(
x: jnp.array, x0: jnp.array
) -> Tuple[jnp.array, jnp.array]:
"""Calculates global transformation that best matches the rest shape [PBD]."""
# compute the center of mass (assuming shape is symmetric)
t0 = x0.mean(axis=0, keepdims=True)
tx = x.mean(axis=0, keepdims=True)
# get nodes centered at zero
q = x0 - t0
p = x - tx
# solve the system to find best transformation that matches the rest shape
mat_pq = np.dot(p.T, q)
rx, _ = scipy.linalg.polar(mat_pq)
# convert rotation to scipy transform
rx_matx = scipy.spatial.transform.Rotation.from_matrix(rx)
trans = tx - t0
return trans, rx_matx
def forward_graph(
graph_with_prediction: jraph.GraphsTuple,
next_gt_graph: jraph.GraphsTuple,
shape_matching_inference: bool = True,
) -> jraph.GraphsTuple:
"""Updates the graph with input predictions.
Args:
graph_with_prediction: GraphsTuple with predictions from network for
updated node positions at next time-step.
next_gt_graph: GraphsTuple representing the ground truth graph at the next
time-step.
shape_matching_inference: If set to true, uses shape matching to maintain
object shape across time-step by finding best global object translation/
rotation to maintain rest shapes and respect node position predictions.
Returns:
next_graph: GraphsTuple with updated node positions.
"""
node_features = graph_with_prediction.nodes
node_predictions_world_pos = node_features["p:world_position"]
if shape_matching_inference:
rest_pos = node_features["mesh_position"]
center = jnp.mean(rest_pos, axis=0, keepdims=True)
trans, rot = _shape_matching(node_predictions_world_pos, rest_pos - center)
node_predictions_world_pos = rot.apply(rest_pos - center) + trans
new_position_with_history = jnp.concatenate(
[
node_features["world_position"][:, 1:],
node_predictions_world_pos[:, jnp.newaxis],
],
axis=1,
)
# copy graph structure
next_graph = tree.map_structure(lambda x: x, next_gt_graph)
# update world positions
next_graph.nodes["world_position"] = new_position_with_history
return next_graph
|
gnn_single_rigids-main
|
gnn_single_rigids/src/utils.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Rollout functions for producing a graph net simulator rollout."""
from typing import Any, Dict, Sequence, Callable, Mapping, Tuple
import haiku as hk
import jax
import jraph
HaikuModel = Callable[
[jraph.GraphsTuple], Tuple[jraph.GraphsTuple, Mapping[str, Any]]
]
def _single_step(
simulator_state: jraph.GraphsTuple,
dynamics_fn: Callable[
[jraph.GraphsTuple], jraph.GraphsTuple],
forward_graph_fn: Callable[
[jraph.GraphsTuple, jraph.GraphsTuple], jraph.GraphsTuple
],
next_simulator_state: jraph.GraphsTuple,
) -> jraph.GraphsTuple:
"""Rollout step."""
# Compute the future for dynamics features, with a padded graph.
simulator_state = jraph.pad_with_graphs(
simulator_state,
n_node=simulator_state.n_node.sum() + 1,
n_edge=simulator_state.n_edge.sum() + 1,
n_graph=simulator_state.n_node.shape[0] + 1)
dynamics_output = dynamics_fn(simulator_state)
simulator_state.nodes["p:world_position"] = dynamics_output.nodes[
"p:world_position"]
simulator_state = jraph.unpad_with_graphs(simulator_state)
# Forward the predictions to build the next graph.
return forward_graph_fn(simulator_state, next_simulator_state)
def _rollout(
ground_truth_trajectory: Sequence[jraph.GraphsTuple],
dynamics_fn: Callable[
[jraph.GraphsTuple], jraph.GraphsTuple],
forward_graph_fn: Callable[
[jraph.GraphsTuple, jraph.GraphsTuple], jraph.GraphsTuple
],
) -> Sequence[jraph.GraphsTuple]:
"""Rolls out a model over a trajectory by feeding its own predictions."""
output_sequence = [ground_truth_trajectory[0]]
for next_simulator_state in ground_truth_trajectory[1:]:
output = _single_step(
output_sequence[-1],
dynamics_fn=dynamics_fn,
forward_graph_fn=forward_graph_fn,
next_simulator_state=next_simulator_state)
output_sequence.append(output)
return output_sequence
def get_predicted_trajectory(input_trajectory: Sequence[jraph.GraphsTuple],
network_weights: Dict[Any, Any],
haiku_model_fn: Callable[[], HaikuModel],
forward_graph_fn) -> Sequence[jraph.GraphsTuple]:
"""Returns rollout trajectory given input trajectory and model information.
Args:
input_trajectory: a trajectory of jraph.GraphsTuples representing a
sequence of states.
network_weights: The learned simulator model parameters.
haiku_model_fn: The haiku model function representing the learned simulator.
forward_graph_fn:
Returns:
rollout_trajectory: The predicted trajectory based on the first entry of
input_trajectory.
"""
network_state = network_weights["state"]
params = network_weights["params"]
@hk.transform_with_state
def forward(input_):
model = haiku_model_fn()
return model(input_)
@jax.jit
def dynamics_fn(input_):
output, unused_network_state = forward.apply(params, network_state,
jax.random.PRNGKey(42), input_)
return output
return _rollout(
input_trajectory,
dynamics_fn=dynamics_fn,
forward_graph_fn=forward_graph_fn)
|
gnn_single_rigids-main
|
gnn_single_rigids/src/rollout.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX implementation of Graph Networks Simulator.
JAX equivalent of:
https://github.com/deepmind/deepmind-research/blob/master/learning_to_simulate/learned_simulator.py
"""
from typing import Any, Mapping
import haiku as hk
import jax.numpy as jnp
import jraph
from gnn_single_rigids.src import graph_network
from gnn_single_rigids.src import normalizers
def _euler_integrate_position(
position_sequence: jnp.array, finite_diff_estimate: jnp.array
) -> jnp.array:
"""Integrates finite difference estimate to position (assuming dt=1)."""
# Uses an Euler integrator to go from position(order=0), velocity(order=1)
# or acceleration(order=2) to position, assuming dt=1 corresponding to
# the size of the finite difference.
previous_position = position_sequence[:, -1]
previous_velocity = previous_position - position_sequence[:, -2]
next_acceleration = finite_diff_estimate
next_velocity = previous_velocity + next_acceleration
next_position = previous_position + next_velocity
return next_position
class LearnedSimulator(hk.Module):
"""Learned simulator from https://arxiv.org/pdf/2002.09405.pdf."""
def __init__(self,
*,
graph_network_kwargs: Mapping[str, Any],
flatten_features_fn=None,
name="LearnedSimulator"):
"""Inits the model.
Args:
graph_network_kwargs: Keyword arguments to pass to the learned part of the
graph network `model.EncodeProcessDecode`.
flatten_features_fn: Function that takes the input graph and dataset
metadata, and returns a graph where node and edge features are a single
array of rank 2, and without global features. The function will be
wrapped in a haiku module, which allows the flattening fn to instantiate
its own variable normalizers.
name: Name of the Haiku module.
"""
super().__init__(name=name)
self._graph_network_kwargs = graph_network_kwargs
self._graph_network = None
# Wrap flatten function in a Haiku module, so any haiku modules created
# by the function are reused in case of multiple calls.
self._flatten_features_fn = hk.to_module(flatten_features_fn)(
name="flatten_features_fn")
def _maybe_build_modules(self, input_graph: jraph.GraphsTuple):
if self._graph_network is None:
num_dimensions = input_graph.nodes["world_position"].shape[-1]
self._graph_network = graph_network.EncodeProcessDecode(
name="encode_process_decode",
node_output_size=num_dimensions,
**self._graph_network_kwargs)
self._target_normalizer = normalizers.AccumulatedNormalizer(
name="target_normalizer")
def __call__(
self, input_graph: jraph.GraphsTuple, padded_graph: bool = True
) -> jraph.GraphsTuple:
self._maybe_build_modules(input_graph)
flat_graphs_tuple = self._encoder_preprocessor(
input_graph, padded_graph=padded_graph)
normalized_prediction = self._graph_network(flat_graphs_tuple).nodes
next_position = self._decoder_postprocessor(normalized_prediction,
input_graph)
return input_graph._replace(
nodes={"p:world_position": next_position},
edges={},
globals={},
senders=input_graph.senders[:0],
receivers=input_graph.receivers[:0],
n_edge=(input_graph.n_edge * 0),
)
def _encoder_preprocessor(
self, input_graph: jraph.GraphsTuple, padded_graph: jraph.GraphsTuple
) -> jraph.GraphsTuple:
graph_with_flat_features = self._flatten_features_fn(
input_graph, is_padded_graph=padded_graph)
return graph_with_flat_features
def _decoder_postprocessor(
self, normalized_prediction: jnp.array, input_graph: jraph.GraphsTuple
) -> jnp.array:
position_sequence = input_graph.nodes["world_position"]
# The model produces the output in normalized space so we apply inverse
# normalization.
prediction = self._target_normalizer.inverse(normalized_prediction)
new_position = _euler_integrate_position(position_sequence, prediction)
return new_position
|
gnn_single_rigids-main
|
gnn_single_rigids/src/learned_simulator.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Meshtools for creating and manipulating meshes."""
from typing import NamedTuple, Tuple
import numpy as np
class Mesh(NamedTuple):
"""Mesh object.
Attributes:
verts: [num_vertices, num_dims] containing vertex positions for mesh nodes.
faces: [num_faces, face_size] contains indices indices joining sets of
vertices. Supports triangles (face_size=3) and quads(face_size=4).
"""
verts: np.ndarray
faces: np.ndarray
def make_xy_plane() -> Mesh:
"""Creates a unit plane in x/y."""
verts = np.array([[0.5, -0.5, 0],
[-0.5, 0.5, 0],
[-0.5, -0.5, 0],
[0.5, 0.5, 0]])
tris = np.array([[0, 1, 2], [0, 3, 1]])
return Mesh(verts, tris)
def make_unit_box() -> Mesh:
"""Creates a unit box."""
verts = np.array([[-0.5, -0.5, -0.5],
[0.5, -0.5, -0.5],
[-0.5, 0.5, -0.5],
[0.5, 0.5, -0.5],
[-0.5, -0.5, 0.5],
[0.5, -0.5, 0.5],
[-0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]])
quads = np.array([[2, 3, 1, 0],
[4, 5, 7, 6],
[3, 7, 5, 1],
[4, 6, 2, 0],
[6, 7, 3, 2],
[1, 5, 4, 0]])
return Mesh(verts, quads)
def triangulate(faces: np.array) -> np.array:
"""Splits quads into triangles."""
if faces.shape[1] == 3:
return faces
elif faces.shape[1] == 4:
return np.concatenate([faces[:, [0, 1, 3]],
faces[:, [1, 2, 3]]], axis=0)
else:
raise ValueError("only triangles and quads are supported")
def transform(mesh: Mesh, translate=(0, 0, 0), scale=(1, 1, 1)) -> Mesh:
"""Translates and scales mesh."""
verts = mesh.verts
verts = verts * np.array(scale)[None] + np.array(translate)[None]
return Mesh(verts, mesh.faces)
def triangles_to_edges(faces: np.array) -> Tuple[np.array, np.array]:
"""Computes mesh edges from triangles."""
# collect edges from triangles
edges = np.concatenate([faces[:, 0:2], faces[:, 1:3], faces[:, 2::-2]],
axis=0)
senders, receivers = np.moveaxis(edges, 1, 0)
# create two-way connectivity
return (np.concatenate([senders, receivers], axis=0),
np.concatenate([receivers, senders], axis=0))
|
gnn_single_rigids-main
|
gnn_single_rigids/src/meshtools.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX implementation of Graph Networks Simulator.
JAX equivalent of:
https://github.com/deepmind/deepmind-research/blob/master/learning_to_simulate/graph_network.py
"""
from typing import Optional
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
class EncodeProcessDecode(hk.Module):
"""Encode-Process-Decode function approximator for learnable simulator.
This class may be used for shared or unshared message passing:
* num_message_passing_steps = N, num_processor_repetitions = 1, gives
N layers of message passing with fully unshared weights:
[W_1, W_2, ... , W_M] (default)
* num_message_passing_steps = 1, num_processor_repetitions = M, gives
N layers of message passing with fully shared weights:
[W_1] * M
* num_message_passing_steps = N, num_processor_repetitions = M, gives
M*N layers of message passing with both shared and unshared message passing
such that the weights used at each iteration are:
[W_1, W_2, ... , W_N] * M
"""
def __init__(self,
*,
latent_size: int,
mlp_hidden_size: int,
mlp_num_hidden_layers: int,
num_message_passing_steps: int,
num_processor_repetitions: int = 1,
encode_nodes: bool = True,
encode_edges: bool = True,
node_output_size: Optional[int] = None,
edge_output_size: Optional[int] = None,
include_sent_messages_in_node_update: bool = False,
use_layer_norm: bool = True,
activation: str = "relu",
name: str = "EncodeProcessDecode"):
"""Inits the model.
Args:
latent_size: Size of the node and edge latent representations.
mlp_hidden_size: Hidden layer size for all MLPs.
mlp_num_hidden_layers: Number of hidden layers in all MLPs.
num_message_passing_steps: Number of unshared message passing steps in the
processor steps.
num_processor_repetitions: Number of times that the same processor is
applied sequencially.
encode_nodes: If False, the node encoder will be omitted.
encode_edges: If False, the edge encoder will be omitted.
node_output_size: Output size of the decoded node representations.
edge_output_size: Output size of the decoded edge representations.
include_sent_messages_in_node_update: Whether to include pooled sent
messages from each node in the node update.
use_layer_norm: Whether it uses layer norm or not.
activation: name of activation function.
name: Name of the model.
"""
super().__init__(name=name)
self._latent_size = latent_size
self._mlp_hidden_size = mlp_hidden_size
self._mlp_num_hidden_layers = mlp_num_hidden_layers
self._num_message_passing_steps = num_message_passing_steps
self._num_processor_repetitions = num_processor_repetitions
self._encode_nodes = encode_nodes
self._encode_edges = encode_edges
self._node_output_size = node_output_size
self._edge_output_size = edge_output_size
self._include_sent_messages_in_node_update = (
include_sent_messages_in_node_update)
self._use_layer_norm = use_layer_norm
self._activation = _get_activation_fn(activation)
self._networks_builder()
def __call__(self, input_graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Forward pass of the learnable dynamics model."""
# Encode the input_graph.
latent_graph_0 = self._encode(input_graph)
# Do `m` message passing steps in the latent graphs.
latent_graph_m = self._process(latent_graph_0)
# Decode from the last latent graph.
return self._decode(latent_graph_m)
def _networks_builder(self):
def build_mlp(name, output_size=None):
if output_size is None:
output_size = self._latent_size
mlp = hk.nets.MLP(
output_sizes=[self._mlp_hidden_size] * self._mlp_num_hidden_layers +
[output_size],
name=name + "_mlp",
activation=self._activation)
return jraph.concatenated_args(mlp)
def build_mlp_with_maybe_layer_norm(name, output_size=None):
network = build_mlp(name, output_size)
if self._use_layer_norm:
layer_norm = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
name=name + "_layer_norm")
network = hk.Sequential([network, layer_norm])
return jraph.concatenated_args(network)
# The encoder graph network independently encodes edge and node features.
encoder_kwargs = dict(
embed_edge_fn=build_mlp_with_maybe_layer_norm("encoder_edges")
if self._encode_edges else None,
embed_node_fn=build_mlp_with_maybe_layer_norm("encoder_nodes")
if self._encode_nodes else None,
)
self._encoder_network = jraph.GraphMapFeatures(**encoder_kwargs)
# Create `num_message_passing_steps` graph networks with unshared parameters
# that update the node and edge latent features.
# Note that we can use `modules.InteractionNetwork` because
# it also outputs the messages as updated edge latent features.
self._processor_networks = []
for step_i in range(self._num_message_passing_steps):
self._processor_networks.append(
jraph.InteractionNetwork(
update_edge_fn=build_mlp_with_maybe_layer_norm(
f"processor_edges_{step_i}"),
update_node_fn=build_mlp_with_maybe_layer_norm(
f"processor_nodes_{step_i}"),
include_sent_messages_in_node_update=(
self._include_sent_messages_in_node_update)))
# The decoder MLP decodes edge/node latent features into the output sizes.
decoder_kwargs = dict(
embed_edge_fn=build_mlp("decoder_edges", self._edge_output_size)
if self._edge_output_size else None,
embed_node_fn=build_mlp("decoder_nodes", self._node_output_size)
if self._node_output_size else None,
)
self._decoder_network = jraph.GraphMapFeatures(**decoder_kwargs)
def _encode(self, input_graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Encodes the input graph features into a latent graph."""
# Copy the globals to all of the nodes, if applicable.
if input_graph.globals is not None:
broadcasted_globals = jnp.repeat(
input_graph.globals,
input_graph.n_node,
axis=0,
total_repeat_length=input_graph.nodes.shape[0])
input_graph = input_graph._replace(
nodes=jnp.concatenate([input_graph.nodes, broadcasted_globals],
axis=-1),
globals=None)
# Encode the node and edge features.
latent_graph_0 = self._encoder_network(input_graph)
return latent_graph_0
def _process(self, latent_graph_0: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Processes the latent graph with several steps of message passing."""
# Do `num_message_passing_steps` with each of the `self._processor_networks`
# with unshared weights, and repeat that `self._num_processor_repetitions`
# times.
latent_graph = latent_graph_0
for unused_repetition_i in range(self._num_processor_repetitions):
for processor_network in self._processor_networks:
latent_graph = self._process_step(processor_network, latent_graph,
latent_graph_0)
return latent_graph
def _process_step(self, processor_network_k,
latent_graph_prev_k: jraph.GraphsTuple,
latent_graph_0: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Single step of message passing with node/edge residual connections."""
input_graph_k = latent_graph_prev_k
# One step of message passing.
latent_graph_k = processor_network_k(input_graph_k)
# Add residuals.
latent_graph_k = latent_graph_k._replace(
nodes=(latent_graph_k.nodes + latent_graph_prev_k.nodes),
edges=(latent_graph_k.edges + latent_graph_prev_k.edges),
)
return latent_graph_k
def _decode(self, latent_graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Decodes from the latent graph."""
return self._decoder_network(latent_graph)
def _get_activation_fn(name):
"""Return activation function corresponding to function_name."""
if name == "identity":
return lambda x: x
if hasattr(jax.nn, name):
return getattr(jax.nn, name)
if hasattr(jnp, name):
return getattr(jnp, name)
raise ValueError(f"Unknown activation function {name} specified.")
|
gnn_single_rigids-main
|
gnn_single_rigids/src/graph_network.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytest configuration file."""
from absl import flags
flags.FLAGS.mark_as_parsed()
|
jax_privacy-main
|
conftest.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
import unittest
from setuptools import find_namespace_packages
from setuptools import setup
def _parse_requirements(requirements_txt_path):
with open(requirements_txt_path) as fp:
return fp.read().splitlines()
def test_suite():
test_loader = unittest.TestLoader()
all_tests = test_loader.discover('jax_privacy',
pattern='*_test.py')
return all_tests
setup(
name='jax_privacy',
version='0.2.0',
description='Algorithms for Privacy-Preserving Machine Learning in JAX.',
url='https://github.com/deepmind/jax_privacy',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements('requirements.txt'),
requires_python='>=3.10',
platforms=['any'],
license='Apache 2.0',
test_suite='setup.test_suite',
include_package_data=True,
zip_safe=False,
)
|
jax_privacy-main
|
setup.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Algorithms for Privacy-Preserving Machine Learning in JAX."""
from jax_privacy.src import accounting
from jax_privacy.src import training
|
jax_privacy-main
|
jax_privacy/__init__.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
jax_privacy-main
|
jax_privacy/experiments/__init__.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image dataset loader with typical pre-processing and advanced augs."""
from jax_privacy.experiments.image_data import augmult
import numpy as np
import tensorflow as tf
def decode_large_image(
image: tf.Tensor,
*,
image_size: tuple[int, int],
augmult_config: augmult.AugmultConfig | None,
) -> tf.Tensor:
"""Decodes the image and returns it with float32 values within [0, 1]."""
if image.dtype == tf.dtypes.string:
image = _decode_and_center_crop(
image_bytes=image,
)
# Convert to float32 and rescale to [0, 1] after the decoding.
image = tf.image.convert_image_dtype(image, np.float32)
if (augmult_config is not None
and augmult_config.random_crop
and augmult_config.augmult):
# Increase the target size to later take random crops within the image,
# e.g. 268x268 for 224x224 crops.
image_size = [int(x * 1.2) for x in image_size]
image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC)
# NOTE: Bicubic resizes without clamping overshoots. This means values
# returned will be outside the range [0.0, 1.0].
return tf.clip_by_value(image, 0.0, 1.0)
def _decode_and_center_crop(
*,
image_bytes: tf.Tensor,
) -> tf.Tensor:
"""Decodes a JPEG and takes a square center crop to make the image square."""
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
min_size = tf.minimum(image_height, image_width)
offset_height = (image_height - min_size) // 2
offset_width = (image_width - min_size) // 2
crop_window = tf.stack([offset_height, offset_width, min_size, min_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image # dtype uint8
|
jax_privacy-main
|
jax_privacy/experiments/image_data/decoder.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loading functions for MNIST / CIFAR / SVHN."""
import dataclasses
import functools
from jax_privacy.experiments.image_data import base
from jax_privacy.experiments.image_data import loader
import tensorflow as tf
import tensorflow_datasets as tfds
MEAN_RGB = (0.49139968, 0.48215841, 0.44653091)
STDDEV_RGB = (0.24703223, 0.24348513, 0.26158784)
@dataclasses.dataclass(kw_only=True, slots=True)
class _DatasetConfig(base.ImageDatasetConfig):
"""Builds the input pipeline for MNIST / SVHN / CIFAR.
Attributes:
num_samples: Number of examples in the dataset split.
num_classes: Number of label classes for the dataset.
split_content: Subset split, e.g. "train[:50000]".
name: Unique identifying name for the dataset.
image_size: Image resolution to use.
using_large_images: Whether to decode images as large images.
preprocess_name: Name for the preprocessing function.
"""
num_samples: int
num_classes: int
name: str
split_content: str
image_size: tuple[int, int]
preprocess_name: str | None = None
def _normalize_image(self, image: tf.Tensor) -> tf.Tensor:
if self.name in ('cifar10', 'cifar100', 'svhn_cropped', 'mnist'):
if self.name == 'mnist':
return base.center_image(image)
elif self.preprocess_name:
if self.preprocess_name == 'standardise':
return base.standardize_image_per_channel(
image,
mean_per_channel=MEAN_RGB,
stddev_per_channel=STDDEV_RGB,
)
elif self.preprocess_name == 'center':
return base.center_image(image)
elif self.preprocess_name == 'none':
return image
else:
raise ValueError(
'Unexpected preprocessing function: '
f'{self.preprocess_name}.')
else:
return base.standardize_image_per_channel(
image,
mean_per_channel=MEAN_RGB,
stddev_per_channel=STDDEV_RGB,
)
else:
raise ValueError(f'Invalid dataset {self.name}.')
def _preprocess_label(self, label: tf.Tensor) -> tf.Tensor:
"""Pre-processes the input label."""
return tf.one_hot(label, depth=self.num_classes)
@dataclasses.dataclass(kw_only=True, slots=True)
class _DataLoader(loader.DataLoader):
"""Data loader for MNIST / CIFAR / SVHN."""
config: _DatasetConfig
def load_raw_data(self, shuffle_files: bool) -> tf.data.Dataset:
ds = tfds.load(
name=self.config.name,
split=self.config.split_content,
shuffle_files=shuffle_files,
)
return ds.map(base.DataInputs.from_dict)
MnistLoader = Cifar10Loader = Cifar100Loader = SvhnLoader = _DataLoader
Cifar10TrainConfig = functools.partial(
_DatasetConfig,
name='cifar10',
image_size=(32, 32),
num_classes=10,
split_content='train[:45000]',
num_samples=45_000,
)
Cifar10TrainValidConfig = functools.partial(
_DatasetConfig,
name='cifar10',
image_size=(32, 32),
num_classes=10,
split_content='train',
num_samples=50_000,
)
Cifar10ValidConfig = functools.partial(
_DatasetConfig,
name='cifar10',
image_size=(32, 32),
num_classes=10,
split_content='train[45000:]',
num_samples=5_000,
)
Cifar10TestConfig = functools.partial(
_DatasetConfig,
name='cifar10',
image_size=(32, 32),
num_classes=10,
split_content='test',
num_samples=5_000,
)
Cifar100TrainConfig = functools.partial(
_DatasetConfig,
name='cifar100',
image_size=(32, 32),
num_classes=100,
split_content='train[:45000]',
num_samples=45_000,
)
Cifar100TrainValidConfig = functools.partial(
_DatasetConfig,
name='cifar100',
image_size=(32, 32),
num_classes=100,
split_content='train',
num_samples=50_000,
)
Cifar100ValidConfig = functools.partial(
_DatasetConfig,
name='cifar100',
image_size=(32, 32),
num_classes=100,
split_content='train[45000:]',
num_samples=5_000,
)
Cifar100TestConfig = functools.partial(
_DatasetConfig,
name='cifar100',
image_size=(32, 32),
num_classes=100,
split_content='test',
num_samples=5_000,
)
SvhnTrainConfig = functools.partial(
_DatasetConfig,
name='svhn_cropped',
image_size=(28, 28),
num_classes=10,
num_samples=68_257,
split_content='train[:68257]',
)
SvhnValidConfig = functools.partial(
_DatasetConfig,
name='svhn_cropped',
image_size=(28, 28),
num_classes=10,
num_samples=5_000,
split_content='train[68257:]',
)
SvhnTrainValidConfig = functools.partial(
_DatasetConfig,
name='svhn_cropped',
image_size=(28, 28),
num_classes=10,
num_samples=73_257,
split_content='train',
)
SvhnTestConfig = functools.partial(
_DatasetConfig,
name='svhn_cropped',
image_size=(28, 28),
num_classes=10,
num_samples=26_032,
split_content='test',
)
MnistTrainConfig = functools.partial(
_DatasetConfig,
name='mnist',
image_size=(28, 28),
num_classes=10,
num_samples=50_000,
split_content='train[:50_000]',
)
MnistValidConfig = functools.partial(
_DatasetConfig,
name='mnist',
image_size=(28, 28),
num_classes=10,
num_samples=10_000,
split_content='train[50_000:]',
)
MnistTrainValidConfig = functools.partial(
_DatasetConfig,
name='mnist',
image_size=(28, 28),
num_classes=10,
num_samples=60_000,
split_content='train',
)
MnistTestConfig = functools.partial(
_DatasetConfig,
name='mnist',
image_size=(28, 28),
num_classes=10,
num_samples=10_000,
split_content='test',
)
|
jax_privacy-main
|
jax_privacy/experiments/image_data/mnist_cifar_svhn.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Places 365 dataset with typical pre-processing and advanced augs."""
import dataclasses
import enum
import functools
from typing import Sequence
from jax_privacy.experiments.image_data import base
from jax_privacy.experiments.image_data import loader
import tensorflow as tf
import tensorflow_datasets as tfds
MEAN_RGB = (0.485, 0.456, 0.406)
STDDEV_RGB = (0.229, 0.224, 0.225)
class Places365NumSamples(enum.IntEnum):
TRAIN = 1_793_460
VALID = 10_000
TEST = 36_500
@dataclasses.dataclass(kw_only=True, slots=True)
class Places365Config(base.ImageDatasetConfig):
"""Builds the input pipeline for Places365.
Attributes:
num_samples: Number of examples in the dataset split.
num_classes: Number of label classes for the dataset.
name: Unique identifying name for the dataset.
split_content: Subset split, e.g. "train[:50000]".
image_size: Image resolution.
preprocess_name: Name of preprocessing function. The default is to
`standardise` the images to preserve the current behaviour in image
classification. Going forward, consider setting this to `center` to
avoid data-dependent pre-processing.
cached_class_names: names of the different classes stored in a cache to
avoid multiple slow queries to TFDS.
"""
name: str
num_samples: int
split_content: str
num_classes: int = 365
name: str = 'places365'
image_size: tuple[int, int]
preprocess_name: str = 'standardise'
cached_class_names: Sequence[str] | None = dataclasses.field(
default=None, init=False)
@property
def class_names(self) -> Sequence[str]:
if self.cached_class_names is None:
# This is relatively slow, so fetch the information only if required.
self.cached_class_names = tfds.builder(
'places365_small').info.features['label'].names
return self.cached_class_names
def _normalize_image(self, image: tf.Tensor) -> tf.Tensor:
"""Normalizes the input image."""
if self.preprocess_name == 'center':
return base.center_image(image)
elif self.preprocess_name == 'standardise':
return base.standardize_image_per_channel(
image,
mean_per_channel=MEAN_RGB,
stddev_per_channel=STDDEV_RGB,
)
else:
raise NotImplementedError(
f'Preprocessing with {self.preprocess_name} not implemented for '
'Places365.')
def _preprocess_label(self, label: tf.Tensor) -> tf.Tensor:
"""Pre-processes the input label."""
return tf.one_hot(label, depth=self.num_classes)
@dataclasses.dataclass(kw_only=True, slots=True)
class Places365Loader(loader.DataLoader):
"""Data loader for Places365."""
config: Places365Config
def load_raw_data(self, shuffle_files: bool) -> tf.data.Dataset:
ds = tfds.load(
'places365_small:2.*.*',
split=self.config.split_content,
decoders={'image': tfds.decode.SkipDecoding()},
shuffle_files=shuffle_files,
)
return ds.map(base.DataInputs.from_dict)
Places365TrainConfig = functools.partial(
Places365Config,
num_samples=Places365NumSamples['TRAIN'],
split_content='train[10000:]',
)
Places365ValidConfig = functools.partial(
Places365Config,
num_samples=Places365NumSamples['VALID'],
split_content='train[:10000]',
)
Places365TrainValidConfig = functools.partial(
Places365Config,
num_samples=(Places365NumSamples['TRAIN'] + Places365NumSamples['VALID']),
split_content='train',
)
Places365Testconfig = functools.partial(
Places365Config,
num_samples=Places365NumSamples['TEST'],
split_content='validation',
)
|
jax_privacy-main
|
jax_privacy/experiments/image_data/places365.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet dataset with typical pre-processing and data augmentations."""
import dataclasses
import enum
import functools
from jax_privacy.experiments.image_data import base
from jax_privacy.experiments.image_data import loader
import tensorflow as tf
import tensorflow_datasets as tfds
MEAN_RGB = (0.485, 0.456, 0.406)
STDDEV_RGB = (0.229, 0.224, 0.225)
class ImageNetNumSamples(enum.IntEnum):
TRAIN = 1_271_167
VALID = 10_000
TEST = 50_000
@dataclasses.dataclass(kw_only=True, slots=True)
class ImageNetConfig(base.ImageDatasetConfig):
"""ImageNet dataset.
Attributes:
num_samples: Number of examples in the dataset split.
num_classes: Number of label classes for the dataset.
split_content: Subset split, e.g. "train[:50000]".
name: Unique identifying name for the dataset.
preprocess_name: Name of preprocessing function. The default is to
`standardise` the images to preserve the current behaviour in image
classification. Going forward, consider setting this to `center` to
avoid data-dependent pre-processing.
"""
num_samples: int
split_content: str
image_size: tuple[int, int]
num_classes: int = 1000
name: str = 'imagenet'
preprocess_name: str = 'standardise'
def _normalize_image(self, image: tf.Tensor) -> tf.Tensor:
"""Normalizes the input image."""
if self.preprocess_name == 'center':
return base.center_image(image)
elif self.preprocess_name == 'standardise':
return base.standardize_image_per_channel(
image,
mean_per_channel=MEAN_RGB,
stddev_per_channel=STDDEV_RGB,
)
else:
raise NotImplementedError()
def _preprocess_label(self, label: tf.Tensor) -> tf.Tensor:
"""Pre-processes the input label."""
return tf.one_hot(label, depth=self.num_classes)
@dataclasses.dataclass(kw_only=True, slots=True)
class ImageNetLoader(loader.DataLoader):
"""Data loader for ImageNet."""
config: ImageNetConfig
def load_raw_data(self, shuffle_files: bool) -> tf.data.Dataset:
if self.config.using_large_images:
ds = tfds.load(
'imagenet2012:5.*.*',
split=self.config.split_content,
decoders={'image': tfds.decode.SkipDecoding()},
shuffle_files=shuffle_files,
)
else:
im_size = self.config.image_size
ds = tfds.load(
name=f'imagenet_resized/{im_size[0]}x{im_size[1]}',
split=self.config.split_content,
shuffle_files=shuffle_files,
)
return ds.map(base.DataInputs.from_dict)
ImagenetTrainConfig = functools.partial(
ImageNetConfig,
num_samples=ImageNetNumSamples['TRAIN'],
split_content='train[10000:]',
)
ImagenetValidConfig = functools.partial(
ImageNetConfig,
num_samples=ImageNetNumSamples['VALID'],
split_content='train[:10000]',
)
ImagenetTrainValidConfig = functools.partial(
ImageNetConfig,
num_samples=(ImageNetNumSamples['TRAIN'] + ImageNetNumSamples['VALID']),
split_content='train',
)
ImagenetTestConfig = functools.partial(
ImageNetConfig,
num_samples=ImageNetNumSamples['TEST'],
split_content='validation',
)
|
jax_privacy-main
|
jax_privacy/experiments/image_data/imagenet.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loading."""
# pylint:disable=g-multiple-import
from jax_privacy.experiments.image_data.augmult import AugmultConfig
from jax_privacy.experiments.image_data.base import (
DataInputs,
DatasetConfig,
ImageDatasetConfig,
)
from jax_privacy.experiments.image_data.imagenet import (
ImageNetLoader,
ImageNetConfig,
ImagenetTestConfig,
ImagenetTrainConfig,
ImagenetTrainValidConfig,
ImagenetValidConfig,
ImageNetNumSamples,
)
from jax_privacy.experiments.image_data.loader import DataLoader
from jax_privacy.experiments.image_data.mnist_cifar_svhn import (
MnistLoader,
Cifar10Loader,
Cifar100Loader,
SvhnLoader,
Cifar10TrainConfig,
Cifar10TrainValidConfig,
Cifar10ValidConfig,
Cifar10TestConfig,
Cifar100TrainConfig,
Cifar100TrainValidConfig,
Cifar100ValidConfig,
Cifar100TestConfig,
SvhnTrainConfig,
SvhnValidConfig,
SvhnTrainValidConfig,
SvhnTestConfig,
MnistTrainConfig,
MnistValidConfig,
MnistTrainValidConfig,
MnistTestConfig,
)
from jax_privacy.experiments.image_data.places365 import (
Places365Loader,
Places365TrainConfig,
Places365ValidConfig,
Places365TrainValidConfig,
Places365Testconfig,
Places365NumSamples,
)
|
jax_privacy-main
|
jax_privacy/experiments/image_data/__init__.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test datasets with fake data."""
from absl.testing import absltest
import chex
import jax
from jax_privacy.experiments import image_data
_FAKE_DATA = True
_AUGMULT = 2
def datasets_to_test():
augmult_config = image_data.AugmultConfig(
augmult=_AUGMULT,
random_crop=True,
random_flip=True,
random_color=False,
)
return (
image_data.ImageNetLoader(
config=image_data.ImagenetTrainConfig(
image_size=(224, 224),
),
augmult_config=augmult_config,
debug=_FAKE_DATA,
),
image_data.MnistLoader(
config=image_data.MnistTrainConfig(),
augmult_config=augmult_config,
debug=_FAKE_DATA,
),
image_data.Cifar10Loader(
config=image_data.Cifar10TrainConfig(),
augmult_config=augmult_config,
debug=_FAKE_DATA,
),
image_data.Places365Loader(
config=image_data.Places365TrainConfig(
image_size=(224, 224),
),
augmult_config=augmult_config,
debug=_FAKE_DATA,
),
)
class DatasetTest(chex.TestCase):
def setUp(self):
super().setUp()
self.num_hosts = jax.process_count()
self.num_devices = jax.local_device_count()
self.local_batch_size = 4
def test_dataset(self):
for dataset in datasets_to_test():
images_train_shape = (
self.num_devices,
self.local_batch_size,
_AUGMULT,
*dataset.config.image_size,
)
labels_train_shape = (
self.num_devices,
self.local_batch_size,
_AUGMULT,
dataset.config.num_classes,
)
data_train = dataset.load_dataset(
batch_dims=(self.num_devices, self.local_batch_size),
shard_data=True,
is_training=True,
)
batch_train = next(iter(data_train))
# Assert shape, except on the channel dimension, which is unknown to
# images_train_shape.
chex.assert_tree_shape_prefix(batch_train.image, images_train_shape)
chex.assert_shape(batch_train.label, labels_train_shape)
images_eval_shape = (
self.num_hosts,
self.num_devices,
self.local_batch_size,
*dataset.config.image_size,
)
labels_eval_shape = (
self.num_hosts,
self.num_devices,
self.local_batch_size,
dataset.config.num_classes,
)
data_eval = dataset.load_dataset(
batch_dims=(self.num_hosts, self.num_devices, self.local_batch_size),
shard_data=False,
is_training=False,
)
batch_eval = next(iter(data_eval))
# Assert shape, except on the channel dimension, which is unknown to
# images_eval_shape.
chex.assert_tree_shape_prefix(batch_eval.image, images_eval_shape)
chex.assert_shape(batch_eval.label, labels_eval_shape)
if __name__ == '__main__':
absltest.main()
|
jax_privacy-main
|
jax_privacy/experiments/image_data/dataset_test.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image dataset loader with typical pre-processing and advanced augs."""
import abc
import dataclasses
import functools
import itertools
from typing import Iterator, Sequence
import jax
from jax_privacy.experiments.image_data import augmult
from jax_privacy.experiments.image_data import base
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
@dataclasses.dataclass(kw_only=True, slots=True)
class DataLoader(metaclass=abc.ABCMeta):
"""Create a data loader.
Attributes:
config: Dataset configuration.
augmult_config: Configuration for data augmentation. If set to None, no
data augmentation is applied. NOTE: data augmentation is ignored in
evaluation mode.
debug: Whether to load fake data for debugging purposes.
cache_train: Whether the training dataset should be cached. This should only
be set to True for small datasets.
"""
config: base.DatasetConfig
augmult_config: augmult.AugmultConfig | None = None
debug: bool = False
cache_train: bool = False
@abc.abstractmethod
def load_raw_data(self, shuffle_files: bool) -> tf.data.Dataset:
"""Method for loading the raw tensorflow dataset."""
def load_dataset(
self,
*,
is_training: bool,
shard_data: bool,
batch_dims: Sequence[int],
drop_metadata: bool = True,
max_num_batches: int | None = None,
) -> Iterator[base.DataInputs]:
"""Loads the dataset and preprocesses it.
In training mode, each batch has the shape `num_local_devices x
batch_size_per_device x augmult x example_shape.`
In evaluation mode, each batch has the shape `num_processes x
num_local_devices x batch_size_per_device x augmult x example_shape.`
Args:
is_training: If set to true, data augmentation may be applied to each
batch of data.
shard_data: Whether to shard data across hosts, i.e. to partition the data
with each host only seeing its own subset (shard) of the partition. It
should be enabled if and only if data is not batched across hosts.
batch_dims: The size of each dimension to be batched.
drop_metadata: Whether to drop the metadata in the batch (True by
default). This can be useful when the metadata does not have the
consistent shapes required by pmapped functions.
max_num_batches: Maximum number of batches to load.
Yields:
A TFDS numpy iterator.
"""
if self.debug:
ds = self.config.make_fake_data()
else:
ds = self.load_raw_data(shuffle_files=is_training)
if shard_data:
ds = ds.shard(jax.process_count(), jax.process_index())
if drop_metadata:
ds = ds.map(lambda x: base.DataInputs(image=x.image, label=x.label))
options = tf.data.Options()
options.threading.private_threadpool_size = 48
options.threading.max_intra_op_parallelism = 1
options.experimental_optimization.map_parallelization = True
options.experimental_optimization.parallel_batch = True
ds = ds.with_options(options)
if is_training:
if self.cache_train:
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(
buffer_size=10*np.prod(batch_dims),
seed=None,
)
ds = ds.map(
functools.partial(
self.config.preprocess,
augmult_config=self.augmult_config,
is_training=is_training,
),
num_parallel_calls=tf.data.AUTOTUNE,
)
for dim in reversed(batch_dims):
ds = ds.batch(dim, drop_remainder=is_training)
ds = ds.prefetch(tf.data.AUTOTUNE)
ds = tfds.as_numpy(ds)
if max_num_batches is not None:
ds = itertools.islice(ds, max_num_batches)
yield from ds
|
jax_privacy-main
|
jax_privacy/experiments/image_data/loader.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data augmentation with augmult (Hoffer et al., 2019; Fort et al., 2021).
References:
E. Hoffer, T. Ben-Nun, I. Hubara, N. Giladi, T. Hoefler, and D. Soudry.
Augment your batch: bettertraining with larger batches.arXiv, 2019.
S. Fort, A. Brock, R. Pascanu, S. De, and S. L. Smith.
Drawing multiple augmentation samples perimage during training efficiently
decreases test error.arXiv, 2021.
"""
import dataclasses
from typing import Sequence
import tensorflow.compat.v2 as tf
@dataclasses.dataclass(kw_only=True, slots=True)
class AugmultConfig:
"""Preprocessing options for images at training time.
Attributes:
augmult: Number of augmentation multiplicities to use. `augmult=0`
corresponds to no augmentation at all, `augmult=1` to standard data
augmentation (one augmented view per mini-batch) and `augmult>1` to
having several augmented view of each sample within the mini-batch.
random_crop: Whether to use random crops for data augmentation.
random_flip: Whether to use random horizontal flips for data augmentation.
random_color: Whether to use random color jittering for data augmentation.
pad: Optional padding before the image is cropped for data augmentation.
"""
augmult: int
random_crop: bool
random_flip: bool
random_color: bool
pad: int | None = 4
def apply(
self,
image: tf.Tensor,
label: tf.Tensor,
*,
crop_size: Sequence[int],
) -> tuple[tf.Tensor, tf.Tensor]:
return apply_augmult(
image,
label,
augmult=self.augmult,
random_flip=self.random_flip,
random_crop=self.random_crop,
random_color=self.random_color,
pad=self.pad,
crop_size=crop_size,
)
def apply_augmult(
image: tf.Tensor,
label: tf.Tensor,
*,
augmult: int,
random_flip: bool,
random_crop: bool,
random_color: bool,
crop_size: Sequence[int],
pad: int | None,
) -> tuple[tf.Tensor, tf.Tensor]:
"""Augmult data augmentation (Hoffer et al., 2019; Fort et al., 2021).
Args:
image: (single) image to augment.
label: label corresponding to the image (not modified by this function).
augmult: number of augmentation multiplicities to use. This number
should be non-negative (this function will fail if it is not).
random_flip: whether to use random horizontal flips for data augmentation.
random_crop: whether to use random crops for data augmentation.
random_color: whether to use random color jittering for data augmentation.
crop_size: size of the crop for random crops.
pad: optional padding before the image is cropped.
Returns:
images: augmented images with a new prepended dimension of size `augmult`.
labels: repeated labels with a new prepended dimension of size `augmult`.
"""
if augmult == 0:
# No augmentations; return original images and labels with a new dimension.
images = tf.expand_dims(image, axis=0)
labels = tf.expand_dims(label, axis=0)
elif augmult > 0:
# Perform one or more augmentations.
raw_image = tf.identity(image)
augmented_images = []
for _ in range(augmult):
image_now = raw_image
if random_crop:
if pad:
image_now = padding_input(image_now, pad=pad)
image_now = tf.image.random_crop(image_now, size=crop_size)
if random_flip:
image_now = tf.image.random_flip_left_right(image_now)
if random_color:
# values copied/adjusted from a color jittering tutorial
# https://www.wouterbulten.nl/blog/tech/data-augmentation-using-tensorflow-data-dataset/
image_now = tf.image.random_hue(image_now, 0.1)
image_now = tf.image.random_saturation(image_now, 0.6, 1.6)
image_now = tf.image.random_brightness(image_now, 0.15)
image_now = tf.image.random_contrast(image_now, 0.7, 1.3)
augmented_images.append(image_now)
images = tf.stack(augmented_images, axis=0)
labels = tf.stack([label]*augmult, axis=0)
else:
raise ValueError('Augmult should be non-negative.')
return images, labels
def padding_input(x: tf.Tensor, pad: int):
"""Pad input image through 'mirroring' on the four edges.
Args:
x: image to pad.
pad: number of padding pixels.
Returns:
Padded image.
"""
x = tf.concat([x[:pad, :, :][::-1], x, x[-pad:, :, :][::-1]], axis=0)
x = tf.concat([x[:, :pad, :][:, ::-1], x, x[:, -pad:, :][:, ::-1]], axis=1)
return x
|
jax_privacy-main
|
jax_privacy/experiments/image_data/augmult.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for a dataset split."""
import abc
import dataclasses
from typing import Any, Mapping, Sequence, Type
import chex
from jax_privacy.experiments.image_data import augmult
from jax_privacy.experiments.image_data import decoder
import numpy as np
import tensorflow as tf
@chex.dataclass(frozen=True)
class DataInputs:
"""Data inputs (either as a single example or as a batch).
Attributes:
image: Image content (potentially batched).
label: Label content (potentially batched).
metadata: Auxiliary content (potentially batched).
"""
image: tf.Tensor | chex.Array
label: tf.Tensor | chex.Array
metadata: Mapping[str, Any] = dataclasses.field(default_factory=dict)
@classmethod
def from_dict(
cls: Type['DataInputs'],
data_dict: Mapping[str, tf.Tensor | chex.Array],
) -> 'DataInputs':
metadata = {
k: v for k, v in data_dict.items() if k not in ('image', 'label')}
return cls(
image=data_dict['image'],
label=data_dict['label'],
metadata=metadata,
)
@dataclasses.dataclass(kw_only=True, slots=True)
class DatasetConfig(metaclass=abc.ABCMeta):
"""Dataset configuration.
Attributes:
num_samples: Number of examples in the dataset split.
num_classes: Number of label classes for the dataset.
split_content: Subset split, e.g. "train[:50000]".
name: Unique identifying name for the dataset.
"""
num_samples: int
num_classes: int
name: str
split_content: str
@property
def class_names(self) -> Sequence[str]:
"""Returns class names for the dataset, defaulting to an index value."""
return [str(i) for i in range(self.num_classes)]
@abc.abstractmethod
def preprocess(
self,
data: DataInputs,
*,
is_training: bool,
augmult_config: augmult.AugmultConfig | None = None,
) -> DataInputs:
"""Preprocesses the image and the label."""
@abc.abstractmethod
def make_fake_data(self) -> tf.data.Dataset:
"""Creates fake data for debugging purposes."""
@dataclasses.dataclass(kw_only=True, slots=True)
class ImageDatasetConfig(DatasetConfig):
"""Abstract dataset split using loader for large images.
Attributes:
num_samples: Number of examples in the dataset split.
num_classes: Number of label classes for the dataset.
split_content: Subset split, e.g. "train[:50000]".
name: Unique identifying name for the dataset.
image_size: Image resolution to use.
using_large_images: Whether to decode images as large images.
"""
name: str
num_samples: int
num_classes: int
split_content: str
image_size: tuple[int, int]
@property
def using_large_images(self) -> bool:
return min(self.image_size) > 64
@abc.abstractmethod
def _normalize_image(self, image: tf.Tensor) -> tf.Tensor:
"""Normalizes the input image."""
@abc.abstractmethod
def _preprocess_label(self, label: tf.Tensor) -> tf.Tensor:
"""Pre-processes the input label."""
def preprocess(
self,
data: DataInputs,
*,
is_training: bool,
augmult_config: augmult.AugmultConfig | None = None,
) -> DataInputs:
"""Preprocesses the image and the label."""
if not is_training:
# Ignore augmult in evaluation mode.
augmult_config = None
image, label = data.image, data.label
if self.using_large_images:
# Large images are decoded in a custom resolution (either `image_size`,
# or slightly larger than `image_size` if using random crops).
image = decoder.decode_large_image(
image,
image_size=self.image_size,
augmult_config=augmult_config,
)
else:
# Otherwise, the image is simply converted to float in its original
# resolution and scaled to [0, 1].
image = tf.image.convert_image_dtype(image, np.float32)
image = self._normalize_image(image)
label = self._preprocess_label(label)
if is_training and augmult_config is not None:
# Apply augmult in training mode.
image, label = augmult_config.apply(
image=image,
label=label,
crop_size=[*self.image_size, image.shape[2]],
)
elif is_training:
# Match the augmult dimensions in training mode.
image = tf.expand_dims(image, axis=0)
label = tf.expand_dims(label, axis=0)
else:
# Ignore augmult in evaluation mode.
pass
if not self.using_large_images and self.image_size:
# Small images may get resized after the pre-processing and data
# augmentation.
image = tf.image.resize(image, self.image_size)
return DataInputs(image=image, label=label, metadata=data.metadata)
def make_fake_data(self) -> tf.data.Dataset:
"""Creates fake data for debugging purposes."""
fake_data = DataInputs(
image=tf.random.normal(shape=(*self.image_size, 3)),
label=tf.random.uniform(
shape=(), minval=0, maxval=self.num_classes, dtype=tf.int32),
)
return tf.data.Dataset.from_tensors(fake_data).repeat(self.num_samples)
def center_image(
image: tf.Tensor,
min_value: float = -1.0,
max_value: float = 1.0,
) -> tf.Tensor:
"""Centers the image to have values in [min_value, max_value].
Args:
image: A multi-dimensional array of floating point values in [0, 1].
min_value: The minimum value for the pixels in the centered image.
max_value: The minimum value for the pixels in the centered image.
Returns:
The centered image, with values in the range [min_value, max_value].
"""
return image * (max_value - min_value) + min_value
def standardize_image_per_channel(
image: tf.Tensor,
mean_per_channel: float | tuple[float, float, float],
stddev_per_channel: float | tuple[float, float, float],
) -> tf.Tensor:
"""Standardizes the image per channel.
Args:
image: A [H, W, C] array of floating point values in [0, 1].
mean_per_channel: The mean value for pixels in each channel of the image.
stddev_per_channel: The standard deviation for pixels in each channel of the
image.
Returns:
The standardized image, with mean 0 and standard deviation 1 in each
channel.
"""
mean_per_channel = tf.constant(
mean_per_channel, shape=[1, 1, image.shape[2]], dtype=image.dtype
)
stddev_per_channel = tf.constant(
stddev_per_channel, shape=[1, 1, image.shape[2]], dtype=image.dtype
)
return (image - mean_per_channel) / stddev_per_channel
|
jax_privacy-main
|
jax_privacy/experiments/image_data/base.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test."""
from unittest import mock
from absl.testing import absltest
import chex
import jax
from jax import random
import jax.numpy as jnp
from jax_privacy.experiments import image_data as data
from jax_privacy.experiments.image_classification import config_base
from jax_privacy.experiments.image_classification import experiment
from jax_privacy.src.training import experiment_config
from jax_privacy.src.training import optimizer_config
from jaxline import train
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""Creates a dummy config for the test."""
config = config_base.ExperimentConfig(
num_updates=3,
optimizer=optimizer_config.OptimizerConfig(
name='sgd',
lr=optimizer_config.constant_lr_config(4.0),
),
training=experiment_config.TrainingConfig(
batch_size=experiment_config.BatchSizeTrainConfig(
total=8,
per_device_per_step=4,
),
dp=experiment_config.NoDPConfig(),
),
model=config_base.ModelConfig(
name='wideresnet',
kwargs={
'depth': 4,
'width': 1,
'groups': 2,
},
restore=config_base.ModelRestoreConfig(
path=None,
),
),
data_train=data.Cifar10Loader(
config=data.Cifar10TrainConfig(),
debug=True,
),
data_eval=data.Cifar10Loader(
config=data.Cifar10ValidConfig(),
debug=True,
),
evaluation=experiment_config.EvaluationConfig(
batch_size=1,
),
averaging=experiment_config.AveragingConfig(
ema_enabled=True,
ema_coefficient=0.9999,
ema_start_step=0,
polyak_enabled=True,
polyak_start_step=0,
),
)
return config_base.build_jaxline_config(config)
class ExperimentTest(chex.TestCase):
def testTrain(self):
cfg = get_config()
train.train(experiment.Experiment, cfg,
checkpointer=mock.Mock(), writer=mock.Mock())
def testEval(self):
cfg = get_config()
rng = random.PRNGKey(cfg.random_seed)
exp = experiment.Experiment('eval', init_rng=rng, **cfg.experiment_kwargs)
rng = jnp.broadcast_to(rng, (jax.local_device_count(),) + rng.shape)
global_step = jnp.broadcast_to(0, [jax.local_device_count()])
# Simulate a checkpoint restore.
exp.step(global_step=global_step, rng=rng, writer=None)
exp.evaluate(global_step=global_step, rng=rng, writer=None)
if __name__ == '__main__':
absltest.main()
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/experiment_test.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/__init__.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base configuration."""
import dataclasses
import random
from typing import Any, Mapping
from jax_privacy.experiments import image_data as data
from jax_privacy.src.training import auto_tune
from jax_privacy.src.training import experiment_config as experiment_config_py
from jax_privacy.src.training import optimizer_config
from jaxline import base_config as jaxline_base_config
import ml_collections
MODEL_CKPT = ml_collections.FrozenConfigDict({
'WRN_40_4_CIFAR100': 'WRN_40_4_CIFAR100.dill',
'WRN_40_4_IMAGENET32': 'WRN_40_4_IMAGENET32.dill',
'WRN_28_10_IMAGENET32': 'WRN_28_10_IMAGENET32.dill',
})
@dataclasses.dataclass(kw_only=True, slots=True)
class ModelRestoreConfig:
"""Configuration for restoring the model.
Attributes:
path: Path to the model to restore.
params_key: (dictionary) Key identifying the parameters in the checkpoint to
restore.
network_state_key: (dictionary) Key identifying the model state in the
checkpoint to restore.
layer_to_reset: Optional identifying name of the layer to reset when loading
the checkpoint (useful for resetting the classification layer to use a
different number of classes for example).
"""
path: str | None = None
params_key: str | None = None
network_state_key: str | None = None
layer_to_reset: str | None = None
@dataclasses.dataclass(kw_only=True, slots=True)
class ModelConfig:
"""Config for the model.
Attributes:
name: Identifying name of the model.
kwargs: Keyword arguments to construct the model.
restore: Configuration for restoring the model.
"""
name: str
kwargs: Mapping[str, Any] = dataclasses.field(default_factory=dict)
restore: ModelRestoreConfig = dataclasses.field(
default_factory=ModelRestoreConfig)
@dataclasses.dataclass(kw_only=True, slots=True)
class ExperimentConfig:
"""Configuration for the experiment.
Attributes:
num_updates: Number of updates for the experiment.
optimizer: Optimizer configuration.
model: Model configuration.
training: Training configuration.
averaging: Averaging configuration.
evaluation: Evaluation configuration.
data_train: Training data configuration.
data_eval: Eval data configuration.
random_seed: Random seed (automatically changed from the default value).
"""
num_updates: int
optimizer: optimizer_config.OptimizerConfig
model: ModelConfig
training: experiment_config_py.TrainingConfig
averaging: experiment_config_py.AveragingConfig
evaluation: experiment_config_py.EvaluationConfig
data_train: data.DataLoader
data_eval: data.DataLoader
random_seed: int = 0
def build_jaxline_config(
experiment_config: ExperimentConfig,
) -> ml_collections.ConfigDict:
"""Creates the Jaxline configuration for the experiment."""
config = jaxline_base_config.get_base_config()
config.checkpoint_dir = '/tmp/jax_privacy/ckpt_dir'
# We use same rng for all replicas:
# (we take care of specializing ourselves the rngs where needed).
config.random_mode_train = 'same_host_same_device'
config.random_seed = random.randint(0, 1_000_000)
# Intervals can be measured in 'steps' or 'secs'.
config.interval_type = 'steps'
config.log_train_data_interval = 100
config.log_tensors_interval = 100
config.save_checkpoint_interval = 250
config.eval_specific_checkpoint_dir = ''
config.experiment_kwargs = ml_collections.ConfigDict()
config.experiment_kwargs.config = experiment_config
config.experiment_kwargs.config.random_seed = config.random_seed
config.experiment_kwargs.config.training.logging.prepend_split_name = True
# Ensure that random key splitting is configured as expected. The amount of
# noise injected in DP-SGD will be invalid otherwise.
assert config.random_mode_train == 'same_host_same_device'
if config.experiment_kwargs.config.training.dp.auto_tune:
config = auto_tune.dp_auto_tune_config(config)
return config
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/config_base.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jaxline experiment to define training and eval loops."""
import collections
from typing import Iterable, Iterator
import chex
import haiku as hk
import jax
from jax_privacy.experiments import image_data as data
from jax_privacy.experiments.image_classification import config_base
from jax_privacy.experiments.image_classification import forward
from jax_privacy.experiments.image_classification import models
from jax_privacy.src.training import experiment
from jax_privacy.src.training import metrics as metrics_module
class Experiment(experiment.AbstractExperiment):
"""Jaxline experiment.
This class controls the training and evaluation loop at a high-level.
"""
def __init__(
self,
mode: str,
init_rng: chex.PRNGKey,
config: config_base.ExperimentConfig,
):
"""Initializes experiment.
Args:
mode: 'train' or 'eval'.
init_rng: random number generation key for initialization.
config: ConfigDict holding all hyper-parameters of the experiment.
"""
# Unused since we rather rely on `config.random_seed`. The argument
# `init_rng` is kept to conform to jaxline's expectation.
del init_rng
self.config = config
self._forward_fn = forward.MultiClassForwardFn(
net=hk.transform_with_state(self._model_fn))
super().__init__(
mode=mode,
random_seed=self.config.random_seed,
training_config=self.config.training,
optimizer_config=self.config.optimizer,
averaging_config=self.config.averaging,
num_training_samples=self.config.data_train.config.num_samples,
num_updates=self.config.num_updates,
)
@property
def forward_fn(self) -> forward.MultiClassForwardFn:
return self._forward_fn
def _model_fn(self, inputs, is_training=False):
model_kwargs = {
'num_classes': self.config.data_train.config.num_classes,
**self.config.model.kwargs,
}
model_instance = models.get_model_instance(self.config.model.name,
model_kwargs)
return model_instance(
inputs,
is_training=is_training,
)
def _should_restore_model(self) -> bool:
return bool(self.config.model.restore.path)
def _restore_model(self):
self._params, self._network_state = models.restore_from_path(
restore_path=self.config.model.restore.path,
params_key=self.config.model.restore.params_key,
network_state_key=self.config.model.restore.network_state_key,
layer_to_reset=self.config.model.restore.layer_to_reset,
params_init=self._params,
network_state_init=self._network_state,
)
def _build_train_input(self) -> Iterator[data.DataInputs]:
"""Builds the training input pipeline."""
return self.config.data_train.load_dataset(
batch_dims=(
jax.local_device_count(),
self.batching.batch_size_per_device_per_step,
),
is_training=True,
shard_data=True,
)
def _build_eval_input(self) -> Iterator[data.DataInputs]:
"""Builds the evaluation input pipeline."""
return self.config.data_eval.load_dataset(
batch_dims=(
jax.process_count(),
jax.local_device_count(),
self.config.evaluation.batch_size,
),
is_training=False,
shard_data=False,
max_num_batches=self.config.evaluation.max_num_batches,
)
def _eval_epoch(self, rng, unused_global_step):
"""Evaluates an epoch."""
avg_metrics = collections.defaultdict(metrics_module.Avg)
# Checkpoints broadcast for each local device, which we undo here since the
# evaluation is performed on a single device (it is not pmapped).
if isinstance(self._averaging_config.ema_coefficient, Iterable):
ema_params = {
f'ema_{ema_decay}': params_ema for ema_decay, params_ema in zip(
self._averaging_config.ema_coefficient,
self._params_ema,
strict=True)
}
else:
ema_params = {'ema': self._params_ema}
params_dict = {
'last': self._params,
**ema_params,
'polyak': self._params_polyak,
}
state = self._network_state
num_samples = 0
host_id = jax.process_index()
# Iterate over the evaluation dataset and accumulate the metrics.
for inputs in self._build_eval_input():
rng, rng_eval = jax.random.split(rng)
num_hosts, num_devices_per_host, batch_size_per_device, *_ = (
inputs.image.shape)
batch_size = num_hosts * num_devices_per_host * batch_size_per_device
num_samples += batch_size
local_inputs = jax.tree_map(lambda x: x[host_id], inputs)
# Evaluate batch for each set of parameters.
for params_name, params in params_dict.items():
metrics = self.updater.evaluate(params, state, rng_eval, local_inputs)
# Update accumulated average for each metric.
for metric_name, val in metrics.scalars.items():
avg_metrics[f'{metric_name}_{params_name}'].update(val, n=batch_size)
metrics = {k: v.avg for k, v in avg_metrics.items()}
metrics['num_samples'] = num_samples
return metrics
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/experiment.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines train and evaluation functions that compute losses and metrics."""
from typing import Mapping
import chex
import haiku as hk
import jax.numpy as jnp
from jax_privacy.experiments import image_data as data
from jax_privacy.src.dp_sgd import typing
from jax_privacy.src.training import forward
from jax_privacy.src.training import metrics as metrics_module
import optax
# TODO: investigate the pytype bug below.
class MultiClassForwardFn(
# pytype: disable=not-indexable
forward.ForwardFn[data.DataInputs, hk.Params, hk.State],
# pytype: enable=not-indexable
):
"""Defines forward passes for multi-class classification."""
def __init__(self, net: hk.TransformedWithState):
"""Initialization function.
Args:
net: haiku model to use for the forward pass.
"""
self._net = net
def train_init(
self,
rng_key: chex.PRNGKey,
inputs: data.DataInputs,
) -> tuple[hk.Params, hk.State]:
"""Initializes the model.
Args:
rng_key: random number generation key used for the random initialization.
inputs: model inputs to infer shapes of the model parameters.
Images are expected to be of shape [NKHWC] (K is augmult).
Returns:
Initialized model parameters and state.
"""
# Images has shape [NKHWC] (K is augmult).
return self._net.init(rng_key, inputs.image[:, 0], is_training=True)
def train_forward(
self,
params: hk.Params,
network_state: hk.State,
rng_per_example: chex.PRNGKey,
inputs: data.DataInputs,
) -> tuple[typing.Loss, tuple[hk.State, typing.Metrics]]:
"""Forward pass per example (training time).
Args:
params: model parameters that should get updated during training.
network_state: model state.
rng_per_example: a random number generation key specific for a device and
accumulation step. It can be used to create a unique seed per
individual example by the user.
inputs: model inputs, where the labels are one-hot encoded. Images are
expected to be of shape [NKHWC] (K is augmult), and labels of shape
[NKO].
Returns:
loss: loss function computed per-example on the mini-batch (averaged over
the K augmentations).
network_state: new model state
metrics: metrics computed on the current mini-batch, including the loss
value per-example.
"""
images, labels = inputs.image, inputs.label
# `images` has shape [NKHWC] (K is augmult), while model accepts [NHWC], so
# we use a single larger batch dimension.
reshaped_images = images.reshape((-1,) + images.shape[2:])
reshaped_labels = labels.reshape((-1,) + labels.shape[2:])
logits, network_state = self._net.apply(
params, network_state, rng_per_example, reshaped_images,
is_training=True)
loss = self._loss(logits, reshaped_labels)
# We reshape back to [NK] and average across augmentations.
loss = loss.reshape(images.shape[:2])
loss = jnp.mean(loss, axis=1)
# Accuracy computation is performed with the first augmentation.
logits = logits.reshape(images.shape[:2] + logits.shape[1:])
selected_logits = logits[:, 0, :]
labels = jnp.mean(labels, axis=1)
metrics = typing.Metrics(
scalars_avg=self._train_metrics(selected_logits, labels),
per_example={'loss': loss},
)
return jnp.mean(loss), (network_state, metrics)
def eval_forward(
self,
params: hk.Params,
network_state: hk.State,
rng: chex.PRNGKey,
inputs: data.DataInputs,
) -> typing.Metrics:
"""Forward pass per example (evaluation time).
Args:
params: model parameters that should get updated during training.
network_state: model state.
rng: random number generation key.
inputs: model inputs (of format `{'images': images, 'labels': labels}`),
where the labels are one-hot encoded. `images` is expected to be of
shape [NHWC], and `labels` of shape [NO].
Returns:
per_example: metrics computed per-example on the mini-batch (logits only).
aggregated: metrics computed and aggregated on the current mini-batch.
"""
logits, unused_network_state = self._net.apply(
params, network_state, rng, inputs.image)
loss = jnp.mean(self._loss(logits, inputs.label))
return typing.Metrics(
per_example={'logits': logits},
scalars_avg={'loss': loss, **self._eval_metrics(logits, inputs.label)},
)
def _loss(self, logits: chex.Array, labels: chex.Array) -> chex.Array:
"""Compute the loss per-example.
Args:
logits: logits vector of expected shape [...O].
labels: one-hot encoded labels of expected shape [...O].
Returns:
Cross-entropy loss computed per-example on leading dimensions.
"""
return optax.softmax_cross_entropy(logits, labels)
def _train_metrics(
self,
logits: chex.Array,
labels: chex.Array,
) -> Mapping[str, chex.Numeric]:
return self._topk_accuracy_metrics(logits, labels)
def _eval_metrics(
self,
logits: chex.Array,
labels: chex.Array,
) -> Mapping[str, chex.Numeric]:
return self._topk_accuracy_metrics(logits, labels)
def _topk_accuracy_metrics(
self,
logits: chex.Array,
labels: chex.Array,
) -> Mapping[str, chex.Numeric]:
"""Evaluates topk accuracy."""
# NB: labels are one-hot encoded.
acc1, acc5 = metrics_module.topk_accuracy(logits, labels, topk=(1, 5))
metrics = {'acc1': 100 * acc1, 'acc5': 100 * acc5}
return metrics
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/forward.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main script to run an experiment.
Usage example (run from this directory):
python run_experiment.py --config=configs/cifar10_wrn.py
"""
import functools
from absl import app
from absl import flags
from jax_privacy.experiments.image_classification import experiment
from jaxline import platform
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(platform.main, experiment.Experiment))
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/run_experiment.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet Norm-Free Residual Networks as defined in (Brock et al., 2021).
Reference:
A. Brock, S. De, and S. L. Smith.
Characterizing signal propagation to close the performance gap
in unnormalized resnets.
International Conference on Learning Representations, 2021.
"""
from typing import Any, Optional
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax_privacy.experiments.image_classification.models import common
class NFResNet(hk.Module):
"""Norm-Free preactivation ResNet."""
variant_dict = {
'ResNet50': {
'depth': [3, 4, 6, 3]
},
'ResNet101': {
'depth': [3, 4, 23, 3]
},
'ResNet152': {
'depth': [3, 8, 36, 3]
},
'ResNet200': {
'depth': [3, 24, 36, 3]
},
'ResNet288': {
'depth': [24, 24, 24, 24]
},
'ResNet600': {
'depth': [50, 50, 50, 50]
},
}
def __init__(
self,
num_classes: int,
*,
variant: str = 'ResNet50',
width: int = 4,
alpha: float = 0.2,
stochdepth_rate: float = 0.1,
drop_rate: Optional[float] = None,
activation: str = 'scaled_relu',
fc_init: Any = None,
skipinit_gain: hk.initializers.Initializer = jnp.zeros,
use_se: bool = False,
se_ratio: float = 0.25,
name: str = 'NF_ResNet',
):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
block_params = self.variant_dict[self.variant]
self.width_pattern = [item * self.width for item in [64, 128, 256, 512]]
self.depth_pattern = block_params['depth']
self.activation = common.activations_dict[activation]
if drop_rate is None:
self.drop_rate = block_params.get('drop_rate', 0.0)
else:
self.drop_rate = drop_rate
# Define the stem of the model.
ch = int(16 * self.width)
self.initial_conv = common.WSConv2D(
ch,
kernel_shape=7,
stride=2,
padding='SAME',
with_bias=False,
name='initial_conv')
# Define the body of the model.
self.blocks = []
expected_std = 1.0
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2])
for block_width, stage_depth, stride in zip(*block_args, strict=True):
for block_index in range(stage_depth):
# Scalar pre-multiplier so each block sees an N(0,1) input at init.
beta = 1. / expected_std
block_stochdepth_rate = stochdepth_rate * index / num_blocks
self.blocks += [
NFResBlock(
ch,
block_width,
stride=stride if block_index == 0 else 1,
beta=beta,
alpha=alpha,
activation=self.activation,
stochdepth_rate=block_stochdepth_rate,
skipinit_gain=skipinit_gain,
use_se=use_se,
se_ratio=se_ratio,
)
]
ch = block_width
index += 1
# Reset expected std but still give it 1 block of growth.
if block_index == 0:
expected_std = 1.0
expected_std = (expected_std**2 + alpha**2)**0.5
# Define the head: by default, initialize with N(0, 0.01).
if fc_init is None:
fc_init = hk.initializers.RandomNormal(0.01, 0)
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x: chex.Array, is_training: bool = True) -> chex.Array:
"""Return the output of the final layer without any [log-]softmax."""
# Forward through the stem.
out = self.initial_conv(x)
out = hk.max_pool(
out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME')
# Forward through the blocks
for block in self.blocks:
out, unused_res_avg_var = block(out, is_training=is_training)
# Final-conv->activation, pool, dropout, classify
pool = jnp.mean(self.activation(out), [1, 2])
# Optionally apply dropout.
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
logits = self.fc(pool)
return logits
class NFResBlock(hk.Module):
"""Normalizer-Free pre-activation ResNet Block."""
def __init__(
self,
in_ch: int,
out_ch: int,
*,
bottleneck_ratio: float = 0.25,
kernel_size: int = 3,
stride: int = 1,
beta: float = 1.0,
alpha: float = 0.2,
activation: common.Activation = jax.nn.relu,
skipinit_gain: hk.initializers.Initializer = jnp.zeros,
stochdepth_rate: Optional[float] = None,
use_se: bool = False,
se_ratio: float = 0.25,
name: Optional[str] = None,
):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.kernel_size = kernel_size
self.activation = activation
self.beta, self.alpha = beta, alpha
self.skipinit_gain = skipinit_gain
self.use_se, self.se_ratio = use_se, se_ratio
# Bottleneck width.
self.width = int(self.out_ch * bottleneck_ratio)
self.stride = stride
# Conv 0 (typically expansion conv).
self.conv0 = common.WSConv2D(
self.width, kernel_shape=1, padding='SAME', name='conv0')
# Grouped NxN conv.
self.conv1 = common.WSConv2D(
self.width,
kernel_shape=kernel_size,
stride=stride,
padding='SAME',
name='conv1',
)
# Conv 2, typically projection conv.
self.conv2 = common.WSConv2D(
self.out_ch, kernel_shape=1, padding='SAME', name='conv2')
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
self.conv_shortcut = common.WSConv2D(
self.out_ch,
kernel_shape=1,
stride=stride,
padding='SAME',
name='conv_shortcut')
# Are we using stochastic depth?
self._has_stochdepth = (
stochdepth_rate is not None and 0. < stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = common.StochDepth(stochdepth_rate)
if self.use_se:
self.se = common.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio)
def __call__(
self,
x: chex.Array,
is_training: bool,
) -> tuple[chex.Array, chex.Array]:
"""Applies the forward pass."""
out = self.activation(x) * self.beta
shortcut = x
if self.use_projection: # Downsample with conv1x1.
shortcut = self.conv_shortcut(out)
out = self.conv0(out)
out = self.conv1(self.activation(out))
out = self.conv2(self.activation(out))
if self.use_se:
out = 2 * self.se(out) * out
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# Apply the kipInit Gain.
out = out * hk.get_parameter(
'skip_gain', (), out.dtype, init=self.skipinit_gain)
return out * self.alpha + shortcut, res_avg_var
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/models/imagenet.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to get a model given collection of models."""
import functools
from typing import Sequence
from absl import logging
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax_privacy.experiments.image_classification.models import cifar
from jax_privacy.experiments.image_classification.models import imagenet
from jax_privacy.experiments.image_classification.models import mnist
from jax_privacy.experiments.image_classification.models.common import restore_from_path
import numpy as np
MODELS = {
'wideresnet': cifar.WideResNet,
'cnn': mnist.MnistCNN,
'nf_resnet': imagenet.NFResNet,
}
def get_model_instance(model_type: str, model_kwargs):
"""Instantiates the model with the model type and kwargs."""
assert model_type in MODELS, (
f'Model type not supported in this expt. Currently support only '
f'{list(MODELS.keys())}. Input model type is {model_type}')
classifier_module = MODELS[model_type]
return classifier_module(**model_kwargs)
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/models/__init__.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions used to define model architectures."""
import os
from typing import Callable, Optional, Tuple
import urllib.request
import chex
import dill
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import utils
import numpy as np
Activation = Callable[[chex.Array], chex.Array]
# Activations with and without scaling.
activations_dict = {
# Regular activations.
'identity': lambda x: x,
'celu': jax.nn.celu,
'elu': jax.nn.elu,
'gelu': jax.nn.gelu,
'glu': jax.nn.glu,
'leaky_relu': jax.nn.leaky_relu,
'log_sigmoid': jax.nn.log_sigmoid,
'log_softmax': jax.nn.log_softmax,
'relu': jax.nn.relu,
'relu6': jax.nn.relu6,
'selu': jax.nn.selu,
'sigmoid': jax.nn.sigmoid,
'silu': jax.nn.silu,
'swish': jax.nn.silu,
'soft_sign': jax.nn.soft_sign,
'softplus': jax.nn.softplus,
'tanh': jnp.tanh,
# Scaled activations.
'scaled_celu': lambda x: jax.nn.celu(x) * 1.270926833152771,
'scaled_elu': lambda x: jax.nn.elu(x) * 1.2716004848480225,
'scaled_gelu': lambda x: jax.nn.gelu(x) * 1.7015043497085571,
'scaled_glu': lambda x: jax.nn.glu(x) * 1.8484294414520264,
'scaled_leaky_relu': lambda x: jax.nn.leaky_relu(x) * 1.70590341091156,
'scaled_log_sigmoid': lambda x: jax.nn.log_sigmoid(x) * 1.9193484783172607,
'scaled_log_softmax': lambda x: jax.nn.log_softmax(x) * 1.0002083778381348,
'scaled_relu': lambda x: jax.nn.relu(x) * 1.7139588594436646,
'scaled_relu6': lambda x: jax.nn.relu6(x) * 1.7131484746932983,
'scaled_selu': lambda x: jax.nn.selu(x) * 1.0008515119552612,
'scaled_sigmoid': lambda x: jax.nn.sigmoid(x) * 4.803835391998291,
'scaled_silu': lambda x: jax.nn.silu(x) * 1.7881293296813965,
'scaled_swish': lambda x: jax.nn.silu(x) * 1.7881293296813965,
'scaled_soft_sign': lambda x: jax.nn.soft_sign(x) * 2.338853120803833,
'scaled_softplus': lambda x: jax.nn.softplus(x) * 1.9203323125839233,
'scaled_tanh': lambda x: jnp.tanh(x) * 1.5939117670059204,
}
class WSConv2D(hk.Conv2D):
"""2D Convolution with Scaled Weight Standardization and affine gain+bias."""
@hk.transparent
def standardize_weight(self, weight, eps=1e-4):
"""Apply scaled WS with affine gain."""
mean = jnp.mean(weight, axis=(0, 1, 2), keepdims=True)
var = jnp.var(weight, axis=(0, 1, 2), keepdims=True)
fan_in = np.prod(weight.shape[:-1])
gain = hk.get_parameter('gain', shape=(weight.shape[-1],),
dtype=weight.dtype, init=jnp.ones)
# Manually fused normalization, eq. to (w - mean) * gain / sqrt(N * var).
scale = jax.lax.rsqrt(jnp.maximum(var * fan_in, eps)) * gain
shift = mean * scale
return weight * scale - shift
def __call__(self, inputs: chex.Array, eps: float = 1e-4) -> chex.Array:
w_shape = self.kernel_shape + (
inputs.shape[self.channel_index] // self.feature_group_count,
self.output_channels)
# Use fan-in scaled init, but WS is largely insensitive to this choice.
w_init = hk.initializers.VarianceScaling(1.0, 'fan_in', 'normal')
w = hk.get_parameter('w', w_shape, inputs.dtype, init=w_init)
weight = self.standardize_weight(w, eps)
out = jax.lax.conv_general_dilated(
inputs, weight, window_strides=self.stride, padding=self.padding,
lhs_dilation=self.lhs_dilation, rhs_dilation=self.kernel_dilation,
dimension_numbers=self.dimension_numbers,
feature_group_count=self.feature_group_count)
# Always add bias.
bias_shape = (self.output_channels,)
bias = hk.get_parameter('bias', bias_shape, inputs.dtype, init=jnp.zeros)
return out + bias
class StochDepth(hk.Module):
"""Batchwise Dropout used in EfficientNet, optionally sans rescaling."""
def __init__(
self,
drop_rate: float,
scale_by_keep: bool = False,
name: Optional[str] = None,
):
super().__init__(name=name)
self.drop_rate = drop_rate
self.scale_by_keep = scale_by_keep
def __call__(self, x: chex.Array, is_training: bool) -> chex.Array:
if not is_training:
return x
batch_size = x.shape[0]
r = jax.random.uniform(hk.next_rng_key(), [batch_size, 1, 1, 1],
dtype=x.dtype)
keep_prob = 1. - self.drop_rate
binary_tensor = jnp.floor(keep_prob + r)
if self.scale_by_keep:
x = x / keep_prob
return x * binary_tensor
class SqueezeExcite(hk.Module):
"""Simple Squeeze+Excite module."""
def __init__(
self,
in_ch: int,
out_ch: int,
se_ratio: float = 0.5,
hidden_ch: Optional[int] = None,
activation: Activation = jax.nn.relu,
name: Optional[str] = None,
):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
if se_ratio is None:
if hidden_ch is None:
raise ValueError('Must provide one of se_ratio or hidden_ch')
self.hidden_ch = hidden_ch
else:
self.hidden_ch = max(1, int(self.in_ch * se_ratio))
self.activation = activation
self.fc0 = hk.Linear(self.hidden_ch, with_bias=True)
self.fc1 = hk.Linear(self.out_ch, with_bias=True)
def __call__(self, x):
# Average over HW dimensions.
h = jnp.mean(x, axis=[1, 2])
h = self.fc1(self.activation(self.fc0(h)))
# Broadcast along H, W dimensions.
h = jax.nn.sigmoid(h)[:, None, None]
return h
def download_and_read_file(name: str, root_dir: str = '/tmp/jax_privacy'):
"""Load file, downloading to /tmp/jax_privacy first if necessary."""
local_path = os.path.join(root_dir, name)
if not os.path.exists(os.path.dirname(local_path)):
os.makedirs(os.path.dirname(local_path))
if not os.path.exists(local_path):
gcp_bucket_url = 'https://storage.googleapis.com/dm_jax_privacy/models/'
download_url = gcp_bucket_url + name
urllib.request.urlretrieve(download_url, local_path)
return open(local_path, mode='rb')
def restore_from_path(
*,
restore_path: str,
params_key: str,
network_state_key: str,
layer_to_reset: Optional[str],
params_init: chex.ArrayTree,
network_state_init: chex.ArrayTree,
) -> Tuple[chex.ArrayTree, chex.ArrayTree]:
"""Restore parameters and model state from an existing checkpoint.
Args:
restore_path: path to model to restore. This should point to a dict that can
be loaded through dill.
params_key: key of the dict corresponding to the model parameters.
network_state_key: key of the dict corresponding to the model state.
layer_to_reset: name of the layer to reset (exact match required).
params_init: initial value for the model parameters (used only if
a layer matches with `layer_to_reset`).
network_state_init: initial value for the model state (used only if
a layer matches with `layer_to_reset`).
Returns:
params: model parameters loaded from the checkpoint (with the classifier
potentially reset).
network_state: model state loaded from the checkpoint (with state associated
to the classifier potentially reset).
"""
# Load pretrained experiment state.
with download_and_read_file(restore_path) as f:
ckpt_state = dill.load(f)
params_loaded = utils.bcast_local_devices(ckpt_state[params_key])
network_state_loaded = utils.bcast_local_devices(
ckpt_state[network_state_key])
def should_reset_layer(module_name, *_):
return module_name == layer_to_reset
if layer_to_reset:
_, params_loaded = hk.data_structures.partition(
should_reset_layer, params_loaded)
_, network_state_loaded = hk.data_structures.partition(
should_reset_layer, network_state_loaded)
# Note that the 'loaded' version must be last in the merge to get priority.
params = hk.data_structures.merge(params_init, params_loaded)
network_state = hk.data_structures.merge(
network_state_init, network_state_loaded)
return params, network_state
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/models/common.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of the CIFAR Wide Residual Network."""
import functools
import logging
import haiku as hk
import haiku.initializers as hk_init
import jax.numpy as jnp
from jax_privacy.experiments.image_classification.models import common
class WideResNet(hk.Module):
"""A Module defining a Wide ResNet."""
CONV_MODULES = {
'WSConv2D': common.WSConv2D,
'Conv2D': hk.Conv2D,
}
def __init__(
self,
num_classes: int = 10,
depth: int = 16,
width: int = 4,
dropout_rate: float = 0.0,
use_skip_init: bool = False,
use_skip_paths: bool = True,
which_conv: str = 'WSConv2D', # Conv2D or WSConv2D.
which_norm: str = 'GroupNorm', # LayerNorm / GroupNorm / BatchNorm.
activation: str = 'relu',
groups: int = 16, # Only used for GroupNorm.
is_dp: bool = True,
):
super().__init__()
self.num_output_classes = num_classes
self.width = width
self.which_norm = which_norm
if which_norm is None:
self.norm_fn = lambda *args, **kwargs: (lambda array: array)
else:
self.norm_fn = getattr(hk, which_norm)
if which_norm == 'GroupNorm':
self.norm_fn = functools.partial(self.norm_fn, groups=groups)
elif which_norm == 'BatchNorm':
if is_dp:
raise ValueError('BatchNorm is not compatible with DP training. Set'
' `is_dp=False` if this is intended')
logging.warning('BatchNorm is not compatible with DP training.')
self.norm_fn = functools.partial(
self.norm_fn, create_scale=True, create_offset=True, decay_rate=0.9)
self.conv_fn = self.CONV_MODULES[which_conv]
if which_conv == 'WSConv2D':
self.conv_fn = functools.partial(
self.conv_fn, w_init=hk_init.VarianceScaling(1.0))
self.use_skip_init = use_skip_init
self.use_skip_paths = use_skip_paths
self.dropout_rate = dropout_rate
self.resnet_blocks = (depth - 4) // 6
self.activation = common.activations_dict[activation]
@hk.transparent
def apply_skip_init(self, net, name):
scale = hk.get_parameter(name, [1], init=jnp.zeros)
return net * scale
@hk.transparent
def residual_block(self, net, width, strides, name, is_training):
"""Creates a residual block."""
norm_kwargs = {}
if self.which_norm == 'BatchNorm':
norm_kwargs['is_training'] = is_training
for i in range(self.resnet_blocks):
if self.use_skip_paths:
# This is the 'skip' branch.
skip = net
if i == 0:
skip = self.activation(skip)
skip = self.norm_fn(name=name + '_skip_norm')(skip, **norm_kwargs)
skip = self.conv_fn(
width,
name=name + '_skip_conv',
stride=strides,
kernel_shape=(1, 1),
)(skip)
# This is the 'residual' branch.
for j in range(2):
name_suffix = str(i) + '_' + str(j)
strides = strides if name_suffix == '0_0' else (1, 1)
net = self.activation(net)
net = self.norm_fn(name=name + '_norm_' + name_suffix)(
net, **norm_kwargs)
net = self.conv_fn(
width,
name=name + 'Conv_' + name_suffix,
kernel_shape=(3, 3),
stride=strides,
)(net)
# Merge both branches.
if self.use_skip_init:
net = self.apply_skip_init(net, name=name + 'Scale_' + name_suffix)
if self.use_skip_paths:
net += skip
return net
def __call__(self, inputs, is_training):
norm_kwargs = {}
if self.which_norm == 'BatchNorm':
norm_kwargs['is_training'] = is_training
net = self.conv_fn(16, name='First_conv', kernel_shape=(3, 3))(inputs)
net = self.residual_block(
net, width=16 * self.width, strides=(1, 1), name='Block_1',
is_training=is_training)
net = self.residual_block(
net, width=32 * self.width, strides=(2, 2), name='Block_2',
is_training=is_training)
net = self.residual_block(
net, width=64 * self.width, strides=(2, 2), name='Block_3',
is_training=is_training)
net = self.activation(net)
net = self.norm_fn(name='Final_norm')(net, **norm_kwargs)
net = jnp.mean(net, axis=[1, 2], dtype=jnp.float32)
if self.dropout_rate > 0.0:
dropout_rate = self.dropout_rate if is_training else 0.0
net = hk.dropout(hk.next_rng_key(), dropout_rate, net)
return hk.Linear(
self.num_output_classes,
w_init=hk_init.VarianceScaling(1.0),
name='Softmax',
)(net)
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/models/cifar.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Two-layer CNN for MNIST (mainly for membership inference attacks)."""
import functools
import chex
import haiku as hk
import jax
from jax_privacy.experiments.image_classification.models import common
class MnistCNN(hk.Module):
"""Hard-coded two-layer CNN."""
def __init__(
self,
num_classes: int = 10,
activation: common.Activation = jax.nn.relu
):
super().__init__()
# All conv layers have a kernel shape of 3 and a stride of 1.
self._conv_1 = hk.Conv2D(
output_channels=16,
kernel_shape=8,
stride=2,
padding='SAME',
name='conv2d_1',
)
self._conv_2 = hk.Conv2D(
output_channels=32,
kernel_shape=4,
stride=2,
padding='VALID',
name='conv2d_2',
)
# First linear layer.
self._linear = hk.Linear(32, name='linear')
# Classification layer.
self._logits_module = hk.Linear(num_classes, name='linear_1')
self._pool = functools.partial(
hk.max_pool,
window_shape=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
)
self._activation = activation
def __call__(self, inputs: chex.Array, is_training: bool) -> chex.Array:
return hk.Sequential([
self._conv_1,
self._activation,
self._pool,
self._conv_2,
self._activation,
self._pool,
hk.Flatten(),
self._linear,
self._activation,
self._logits_module,
])(inputs)
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/models/mnist.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training an NF-ResNet-50 on ImageNet with (8.0, 8e-7)-DP."""
import haiku.initializers as hk_init
import jax.numpy as jnp
from jax_privacy.experiments import image_data
from jax_privacy.experiments.image_classification import config_base
from jax_privacy.src.training import experiment_config
from jax_privacy.src.training import optimizer_config
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""Experiment config."""
config = config_base.ExperimentConfig(
num_updates=71589,
optimizer=optimizer_config.sgd_config(
lr=optimizer_config.constant_lr_config(4.0),
),
model=config_base.ModelConfig(
name='nf_resnet',
kwargs={
'variant': 'ResNet50',
'drop_rate': None, # dropout-rate
'fc_init': hk_init.RandomNormal(0.01, 0),
'skipinit_gain': jnp.ones,
},
),
training=experiment_config.TrainingConfig(
batch_size=experiment_config.BatchSizeTrainConfig(
total=16384,
per_device_per_step=32,
),
weight_decay=0.0, # L-2 regularization,
train_only_layer=None, # None
dp=experiment_config.DPConfig(
delta=8e-7,
clipping_norm=1.0,
stop_training_at_epsilon=8.0,
rescale_to_unit_norm=True,
noise_multiplier=2.5,
),
logging=experiment_config.LoggingConfig(
grad_clipping=True,
grad_alignment=False,
snr_global=True, # signal-to-noise ratio across layers
snr_per_layer=False, # signal-to-noise ratio per layer
),
),
averaging=experiment_config.AveragingConfig(ema_coefficient=0.99999,),
data_train=image_data.ImageNetLoader(
config=image_data.ImagenetTrainConfig(
preprocess_name='standardise',
image_size=(224, 224),
),
augmult_config=image_data.AugmultConfig(
augmult=4,
random_flip=True,
random_crop=True,
random_color=False,
),
),
data_eval=image_data.ImageNetLoader(
config=image_data.ImagenetValidConfig(
preprocess_name='standardise',
image_size=(224, 224),
),
),
evaluation=experiment_config.EvaluationConfig(
batch_size=100,
),
)
return config_base.build_jaxline_config(
experiment_config=config,
)
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/configs/imagenet_nf_resnet_50_eps8.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training a WRN-16-4 on CIFAR-10 with (1.0, 1e-5)-DP."""
from jax_privacy.experiments import image_data
from jax_privacy.experiments.image_classification import config_base
from jax_privacy.src.training import experiment_config
from jax_privacy.src.training import optimizer_config
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""Experiment config."""
config = config_base.ExperimentConfig(
num_updates=875,
optimizer=optimizer_config.sgd_config(
lr=optimizer_config.constant_lr_config(2.0),
),
model=config_base.ModelConfig(
name='wideresnet',
kwargs={
'depth': 16,
'width': 4,
},
),
training=experiment_config.TrainingConfig(
batch_size=experiment_config.BatchSizeTrainConfig(
total=4096,
per_device_per_step=64,
),
weight_decay=0.0, # L-2 regularization,
train_only_layer=None,
dp=experiment_config.DPConfig(
delta=1e-5,
clipping_norm=1.0,
stop_training_at_epsilon=1.0,
rescale_to_unit_norm=True,
noise_multiplier=10.0,
auto_tune=None,
),
logging=experiment_config.LoggingConfig(
grad_clipping=True,
grad_alignment=False,
snr_global=True, # signal-to-noise ratio across layers
snr_per_layer=False, # signal-to-noise ratio per layer
),
),
averaging=experiment_config.AveragingConfig(ema_coefficient=0.999,),
data_train=image_data.Cifar10Loader(
config=image_data.Cifar10TrainValidConfig(
preprocess_name='standardise',
),
augmult_config=image_data.AugmultConfig(
augmult=16,
random_flip=True,
random_crop=True,
random_color=False,
),
),
data_eval=image_data.Cifar10Loader(
config=image_data.Cifar10TestConfig(
preprocess_name='standardise',
),
),
evaluation=experiment_config.EvaluationConfig(
batch_size=100,
),
)
return config_base.build_jaxline_config(
experiment_config=config,
)
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/configs/cifar10_wrn_16_4_eps1.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fine-tuning a WRN-28-10 on CIFAR-100 with (1.0, 1e-5)-DP."""
from jax_privacy.experiments import image_data
from jax_privacy.experiments.image_classification import config_base
from jax_privacy.src.training import experiment_config
from jax_privacy.src.training import optimizer_config
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""Experiment config."""
config = config_base.ExperimentConfig(
num_updates=250,
optimizer=optimizer_config.sgd_config(
lr=optimizer_config.constant_lr_config(1.0),
),
model=config_base.ModelConfig(
name='wideresnet',
kwargs={
'depth': 28,
'width': 10,
},
restore=config_base.ModelRestoreConfig(
path=config_base.MODEL_CKPT.WRN_28_10_IMAGENET32,
params_key='params',
network_state_key='network_state',
layer_to_reset='wide_res_net/Softmax',
),
),
training=experiment_config.TrainingConfig(
batch_size=experiment_config.BatchSizeTrainConfig(
total=16384,
per_device_per_step=16,
),
weight_decay=0.0,
train_only_layer=None, # 'wide_res_net/Softmax',
dp=experiment_config.DPConfig(
delta=1e-5,
clipping_norm=1.0,
stop_training_at_epsilon=1.0,
rescale_to_unit_norm=True,
noise_multiplier=21.1,
auto_tune=None, # 'num_updates',
),
logging=experiment_config.LoggingConfig(
grad_clipping=True,
grad_alignment=False,
snr_global=True, # signal-to-noise ratio across layers
snr_per_layer=False, # signal-to-noise ratio per layer
),
),
averaging=experiment_config.AveragingConfig(ema_coefficient=0.9,),
data_train=image_data.Cifar100Loader(
config=image_data.Cifar100TrainValidConfig(
preprocess_name='standardise',
),
augmult_config=image_data.AugmultConfig(
augmult=16,
random_flip=True,
random_crop=True,
random_color=False,
),
),
data_eval=image_data.Cifar100Loader(
config=image_data.Cifar100TestConfig(
preprocess_name='standardise',
),
),
evaluation=experiment_config.EvaluationConfig(
batch_size=100,
),
)
return config_base.build_jaxline_config(
experiment_config=config,
)
|
jax_privacy-main
|
jax_privacy/experiments/image_classification/configs/cifar100_wrn_28_10_eps1_finetune.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
jax_privacy-main
|
jax_privacy/src/__init__.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute bounds on DP epsilon (given delta) for DP-SGD."""
import abc
import dataclasses
import numbers
from typing import Sequence, Tuple, Union
import warnings
import dp_accounting
import numpy as np
_DEFAULT_RDP_ORDERS = np.concatenate((
np.linspace(1.01, 8, num=50),
np.arange(8, 64),
np.linspace(65, 512, num=10, dtype=int),
))
_DEFAULT_PLD_DISCRETIZATION = 1e-4
class DpAccountantConfig(metaclass=abc.ABCMeta):
"""Configuration for the DP Accountant to use."""
@abc.abstractmethod
def create_accountant(self) -> dp_accounting.PrivacyAccountant:
"""Creates an accountant (with a new state)."""
@dataclasses.dataclass(kw_only=True, slots=True)
class RdpAccountantConfig(DpAccountantConfig):
orders: Sequence[int] = dataclasses.field(
default_factory=lambda: _DEFAULT_RDP_ORDERS)
def __post_init__(self):
self.orders = np.array(self.orders)
def create_accountant(self) -> dp_accounting.rdp.RdpAccountant:
return dp_accounting.rdp.RdpAccountant(
orders=self.orders,
neighboring_relation=(
dp_accounting.NeighboringRelation.ADD_OR_REMOVE_ONE),
)
@dataclasses.dataclass(kw_only=True, slots=True)
class PldAccountantConfig(DpAccountantConfig):
# Values smaller than 1e-5 can result in slower and less accurate accounting.
# b/251010738
value_discretization_interval: float = _DEFAULT_PLD_DISCRETIZATION
def create_accountant(self) -> dp_accounting.pld.PLDAccountant:
return dp_accounting.pld.PLDAccountant(
neighboring_relation=(
dp_accounting.NeighboringRelation.ADD_OR_REMOVE_ONE),
value_discretization_interval=self.value_discretization_interval,
)
def compute_epsilon(
noise_multipliers: Union[float, Sequence[Tuple[int, float]]],
batch_sizes: Union[int, Sequence[Tuple[int, int]]],
num_steps: int,
num_examples: int,
target_delta: float,
dp_accountant_config: DpAccountantConfig,
) -> float:
"""Computes epsilon for heterogeneous noise and mini-batch sizes.
Args:
noise_multipliers: Noise multiplier. Float or list of pairs
(t: int, nm: float) if the noise multiplier changes across steps.
't' indicates step where noise_multiplier is set to 'nm'.
batch_sizes: Batch size. Integer or list of pairs (t: int, bs: int) if the
noise multiplier changes across steps. 't' indicates step where batch_size
is set to 'bs'.
num_steps: Total number of iterations.
num_examples: Number of training examples.
target_delta: Desired delta for the returned epsilon.
dp_accountant_config: Configuration for the DP accountant to use.
Returns:
epsilon: Privacy spent.
"""
if num_examples * target_delta > 1.:
warnings.warn('Your delta might be too high.')
# If noise_multipliers is a number, turn it into list format of (0, nm).
if isinstance(noise_multipliers, numbers.Number):
noise_multipliers = [(0, noise_multipliers)]
# If batch_sizes is a number, turn it into list format of (0, bs).
if isinstance(batch_sizes, int):
batch_sizes = [(0, batch_sizes)]
# Make sure the time steps of changes are increasing.
noise_multipliers = sorted(noise_multipliers, key=lambda t: t[0])
batch_sizes = sorted(batch_sizes, key=lambda x: x[0])
# Make sure the first time step is 0 in both sequences of hyper-parameters.
assert noise_multipliers[0][0] == 0
assert batch_sizes[0][0] == 0
# Remove any settings which occur later than the maximum number of steps.
noise_multipliers = [(t, x) for t, x in noise_multipliers if t <= num_steps]
batch_sizes = [x for x in batch_sizes if x[0] <= num_steps]
# Interleave both sequences of hyper-parameters into a single one.
nm_and_bs = _interleave(noise_multipliers, batch_sizes)
t_nm_and_bs = []
# Adjust time indices to count number of steps in each configuration.
for i in range(len(nm_and_bs) - 1):
t_nm_and_bs.append((nm_and_bs[i + 1][0] - nm_and_bs[i][0], nm_and_bs[i][1],
nm_and_bs[i][2]))
t_nm_and_bs.append(
(num_steps - nm_and_bs[-1][0], nm_and_bs[-1][1], nm_and_bs[-1][2]))
dp_accountant = dp_accountant_config.create_accountant()
for t, nm, bs in t_nm_and_bs:
q = bs / float(num_examples)
event = dp_accounting.PoissonSampledDpEvent(
q, dp_accounting.GaussianDpEvent(nm))
dp_accountant.compose(event, t)
eps = dp_accountant.get_epsilon(target_delta=target_delta)
return eps
def _interleave(t_a, t_b):
"""Helper function to pair two timed sequences."""
ts = [t for (t, _) in t_a] + [t for (t, _) in t_b]
ts = list(set(ts))
ts.sort()
def _find_pair(t):
a = [a for (s, a) in t_a if s <= t][-1]
b = [b for (s, b) in t_b if s <= t][-1]
return a, b
return [(t, *_find_pair(t)) for t in ts]
|
jax_privacy-main
|
jax_privacy/src/accounting/dp_bounds.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for calibration of DP hyper-parameters using privacy accountant."""
from absl.testing import absltest
from jax_privacy.src.accounting import calibrate
from jax_privacy.src.accounting import dp_bounds
import numpy as np
_BATCH_SIZE = 1024
_NOISE_MULTIPLIER = 4.0
_NUM_EXAMPLES = 50_000
_EPSILON = 2.27535
_DELTA = 1e-5
_NUM_STEPS = 10_000
class CalibrateTest(absltest.TestCase):
def test_calibrate_noise(self):
noise_multiplier = calibrate.calibrate_noise_multiplier(
target_epsilon=_EPSILON,
batch_sizes=_BATCH_SIZE,
num_steps=_NUM_STEPS,
num_examples=_NUM_EXAMPLES,
target_delta=_DELTA,
dp_accountant_config=dp_bounds.RdpAccountantConfig(),
tol=1e-4,
)
np.testing.assert_allclose(noise_multiplier, _NOISE_MULTIPLIER, rtol=1e-4)
epsilon = dp_bounds.compute_epsilon(
noise_multipliers=noise_multiplier,
batch_sizes=_BATCH_SIZE,
num_steps=_NUM_STEPS,
num_examples=_NUM_EXAMPLES,
target_delta=_DELTA,
dp_accountant_config=dp_bounds.RdpAccountantConfig(),
)
np.testing.assert_allclose(epsilon, _EPSILON, rtol=1e-4)
def test_calibrate_batch_size(self):
batch_size = calibrate.calibrate_batch_size(
noise_multipliers=_NOISE_MULTIPLIER,
target_epsilon=_EPSILON,
num_steps=_NUM_STEPS,
num_examples=_NUM_EXAMPLES,
target_delta=_DELTA,
dp_accountant_config=dp_bounds.RdpAccountantConfig(),
)
self.assertLessEqual(np.abs(batch_size - _BATCH_SIZE), 1)
epsilon = dp_bounds.compute_epsilon(
noise_multipliers=_NOISE_MULTIPLIER,
batch_sizes=batch_size,
num_steps=_NUM_STEPS,
num_examples=_NUM_EXAMPLES,
target_delta=_DELTA,
dp_accountant_config=dp_bounds.RdpAccountantConfig(),
)
np.testing.assert_allclose(epsilon, _EPSILON, rtol=1e-2)
def test_calibrate_num_steps(self):
num_steps = calibrate.calibrate_steps(
noise_multipliers=_NOISE_MULTIPLIER,
target_epsilon=_EPSILON,
batch_sizes=_BATCH_SIZE,
num_examples=_NUM_EXAMPLES,
target_delta=_DELTA,
dp_accountant_config=dp_bounds.RdpAccountantConfig(),
)
self.assertLessEqual(np.abs(num_steps - _NUM_STEPS), 1)
epsilon = dp_bounds.compute_epsilon(
noise_multipliers=_NOISE_MULTIPLIER,
batch_sizes=_BATCH_SIZE,
num_steps=num_steps,
num_examples=_NUM_EXAMPLES,
target_delta=_DELTA,
dp_accountant_config=dp_bounds.RdpAccountantConfig(),
)
np.testing.assert_allclose(epsilon, _EPSILON, rtol=1e-4)
if __name__ == '__main__':
absltest.main()
|
jax_privacy-main
|
jax_privacy/src/accounting/calibrate_test.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for computation of DP bounds."""
from absl.testing import absltest
from jax_privacy.src.accounting import dp_bounds
import numpy as np
_BATCH_SIZE = 1024
_NOISE_MULTIPLIER = 4.0
_NUM_EXAMPLES = 50_000
_EPSILON_RDP = 2.27535
_EPSILON_PLD = 2.09245
_DELTA = 1e-5
_NUM_STEPS = 10_000
class DPBoundsTest(absltest.TestCase):
def test_compute_epsilon_via_rdp(self):
epsilon = dp_bounds.compute_epsilon(
noise_multipliers=_NOISE_MULTIPLIER,
batch_sizes=_BATCH_SIZE,
num_steps=_NUM_STEPS,
num_examples=_NUM_EXAMPLES,
target_delta=_DELTA,
dp_accountant_config=dp_bounds.RdpAccountantConfig(),
)
np.testing.assert_allclose(epsilon, _EPSILON_RDP, rtol=1e-5)
def test_compute_epsilon_via_pld(self):
epsilon = dp_bounds.compute_epsilon(
noise_multipliers=_NOISE_MULTIPLIER,
batch_sizes=_BATCH_SIZE,
num_steps=_NUM_STEPS,
num_examples=_NUM_EXAMPLES,
target_delta=_DELTA,
dp_accountant_config=dp_bounds.PldAccountantConfig(),
)
np.testing.assert_allclose(epsilon, _EPSILON_PLD, rtol=1e-5)
if __name__ == '__main__':
absltest.main()
|
jax_privacy-main
|
jax_privacy/src/accounting/dp_bounds_test.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Privacy accounting."""
from jax_privacy.src.accounting.accountant import CachedExperimentAccountant
from jax_privacy.src.accounting.accountant import ExperimentAccountant
from jax_privacy.src.accounting.calibrate import calibrate_batch_size
from jax_privacy.src.accounting.calibrate import calibrate_noise_multiplier
from jax_privacy.src.accounting.calibrate import calibrate_steps
from jax_privacy.src.accounting.dp_bounds import compute_epsilon
from jax_privacy.src.accounting.dp_bounds import DpAccountantConfig
from jax_privacy.src.accounting.dp_bounds import PldAccountantConfig
from jax_privacy.src.accounting.dp_bounds import RdpAccountantConfig
|
jax_privacy-main
|
jax_privacy/src/accounting/__init__.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calibrating DP hyper-parameters using the RDP accountant."""
import math
from typing import Sequence, Tuple, Union
from jax_privacy.src.accounting import dp_bounds
import numpy as np
import scipy.optimize as sp_opt
def calibrate_steps(
target_epsilon: float,
noise_multipliers: Union[float, Sequence[Tuple[int, float]]],
batch_sizes: Union[int, Sequence[Tuple[int, int]]],
num_examples: int,
target_delta: float,
dp_accountant_config: dp_bounds.DpAccountantConfig,
initial_max_steps: int = 4,
initial_min_steps: int = 1,
tol: float = 0.1,
) -> int:
"""Computes the number of steps to achieve `target_epsilon`.
Args:
target_epsilon: The desired final epsilon.
noise_multipliers: Noise multiplier. Float or list of pairs
(t: int, nm: float) if the noise multiplier changes across steps.
't' indicates step where noise_multiplier is set to 'nm'.
batch_sizes: Batch size. Integer or list of pairs (t: int, bs: int) if the
noise multiplier changes across steps. 't' indicates step where batch_size
is set to 'bs'.
num_examples: Number of training examples.
target_delta: Desired delta for the returned epsilon.
dp_accountant_config: Configuration for the DP accountant to use.
initial_max_steps: An initial estimate of the number of steps.
initial_min_steps: Minimum number of steps.
tol: tolerance of the optimizer for the calibration.
Returns:
steps: Number of steps.
"""
def get_epsilon(num_steps):
return dp_bounds.compute_epsilon(
noise_multipliers=noise_multipliers,
batch_sizes=batch_sizes,
num_steps=num_steps,
num_examples=num_examples,
target_delta=target_delta,
dp_accountant_config=dp_accountant_config,
)
if get_epsilon(initial_min_steps) > target_epsilon:
raise ValueError('Epsilon at initial_min_steps is too large. '
'Try increasing `target_epsilon`.')
max_steps = initial_max_steps
min_steps = initial_min_steps
while get_epsilon(max_steps) < target_epsilon:
min_steps, max_steps = max_steps, 2*max_steps
error_epsilon = lambda s: np.abs(get_epsilon(int(s)) - target_epsilon)
steps = int(
math.ceil(_solve_calibration(error_epsilon, min_steps, max_steps, tol)))
return steps
def calibrate_noise_multiplier(
target_epsilon: float,
batch_sizes: Union[int, Sequence[Tuple[int, int]]],
num_steps: int,
num_examples: int,
target_delta: float,
dp_accountant_config: dp_bounds.DpAccountantConfig,
initial_max_noise: float = 1.0,
initial_min_noise: float = 0.0,
tol: float = 0.01,
) -> float:
"""Computes the noise multiplier to achieve `target_epsilon`.
Args:
target_epsilon: The desired final epsilon.
batch_sizes: Batch size. Integer or list of pairs (t: int, bs: int) if the
noise multiplier changes across steps. 't' indicates step where batch_size
is set to 'bs'.
num_steps: Total number of iterations.
num_examples: Number of training examples.
target_delta: Desired delta for the returned epsilon.
dp_accountant_config: Configuration for the DP accountant to use.
initial_max_noise: An initial estimate of the noise multiplier.
initial_min_noise: Minimum noise multiplier.
tol: tolerance of the optimizer for the calibration.
Returns:
noise_multiplier: Noise multiplier.
"""
def get_epsilon(noise_multiplier):
return dp_bounds.compute_epsilon(
noise_multipliers=noise_multiplier,
batch_sizes=batch_sizes,
num_steps=num_steps,
num_examples=num_examples,
target_delta=target_delta,
dp_accountant_config=dp_accountant_config,
)
max_noise = initial_max_noise
min_noise = initial_min_noise
while get_epsilon(max_noise) > target_epsilon:
min_noise, max_noise = max_noise, 2*max_noise
error_epsilon = lambda s: np.abs(get_epsilon(s) - target_epsilon)
noise_multiplier = _solve_calibration(error_epsilon, min_noise, max_noise,
tol)
return noise_multiplier
def calibrate_batch_size(
target_epsilon: float,
noise_multipliers: Union[float, Sequence[Tuple[int, float]]],
num_steps: int,
num_examples: int,
target_delta: float,
dp_accountant_config: dp_bounds.DpAccountantConfig,
initial_max_batch_size: int = 8,
initial_min_batch_size: int = 1,
tol: float = 0.01,
) -> int:
"""Computes the batch size required to achieve `target_epsilon`.
Args:
target_epsilon: The desired final epsilon.
noise_multipliers: Noise multiplier. Float or list of pairs
(t: int, nm: float) if the noise multiplier changes across steps.
't' indicates step where noise_multiplier is set to 'nm'.
num_steps: Total number of iterations.
num_examples: Number of training examples.
target_delta: Desired delta for the returned epsilon.
dp_accountant_config: Configuration for the DP accountant to use.
initial_max_batch_size: An initial estimate of the batch size.
initial_min_batch_size: Minimum batch size.
tol: tolerance of the optimizer for the calibration.
Returns:
batch_size: Batch size.
"""
def get_epsilon(batch_size):
return dp_bounds.compute_epsilon(
noise_multipliers=noise_multipliers,
batch_sizes=batch_size,
num_steps=num_steps,
num_examples=num_examples,
target_delta=target_delta,
dp_accountant_config=dp_accountant_config,
)
max_batch_size = initial_max_batch_size
min_batch_size = initial_min_batch_size
if get_epsilon(min_batch_size) > target_epsilon:
raise ValueError('Epsilon at batch size 1 is too large. '
'Try increasing `target_epsilon`.')
while get_epsilon(max_batch_size) < target_epsilon:
min_batch_size, max_batch_size = max_batch_size, 2*max_batch_size
error_epsilon = lambda s: np.abs(get_epsilon(int(s)) - target_epsilon)
batch_size = int(math.floor(
_solve_calibration(error_epsilon, min_batch_size, max_batch_size, tol)))
return batch_size
def _solve_calibration(fn, x_min, x_max, tol):
opt_result = sp_opt.minimize_scalar(
fn,
bounds=(x_min, x_max),
method='bounded',
options={'xatol': tol},
)
assert opt_result.success
return opt_result.x
|
jax_privacy-main
|
jax_privacy/src/accounting/calibrate.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keeping track of the differential privacy guarantee."""
from typing import Optional
from jax_privacy.src.accounting import calibrate
from jax_privacy.src.accounting import dp_bounds
from jax_privacy.src.dp_sgd import batching as batching_py
import numpy as np
class ExperimentAccountant:
"""Tracks privacy spent and maximal number of updates for an experiment."""
def __init__(
self,
clipping_norm: Optional[float],
noise_multiplier: Optional[float],
dp_epsilon: float,
dp_delta: float,
num_samples: int,
batching: batching_py.VirtualBatching,
dp_accountant_config: dp_bounds.DpAccountantConfig,
):
"""Initializes the accountant for Differential Privacy.
This class wraps the functions defined in `calibrate.py` and `dp_bounds.py`
so that we can gracefully handle limit cases where either
`noise_multiplier=0` or the clipping norm is infinite, which both result in
infinite (vacuous) DP guarantees.
Args:
clipping_norm: clipping-norm for the per-example gradients (before
averaging across the examples of the mini-batch).
noise_multiplier: The noise multiplier, excluding the clipping norm and
the batch-size.
dp_epsilon: epsilon-value of DP guarantee.
dp_delta: delta-value of DP guarantee.
num_samples: number of examples in the training set.
batching: batch-size used during training.
dp_accountant_config: Configuration for the DP accountant to use.
"""
if clipping_norm is None:
self._clipping_norm = float('inf')
elif clipping_norm < 0:
raise ValueError('Clipping norm must be non-negative.')
else:
self._clipping_norm = clipping_norm
if noise_multiplier is None:
self._noise_multiplier = 0
elif noise_multiplier < 0:
raise ValueError('Standard deviation must be non-negative.')
else:
self._noise_multiplier = noise_multiplier
self._batching = batching
self._num_samples = num_samples
self._dp_epsilon = dp_epsilon
self._dp_delta = dp_delta
self._batch_sizes = [(0, self._batching.batch_size_init)]
if self._batching.scale_schedule is not None:
self._batch_sizes.extend(
[(threshold, self._batching.batch_size(threshold+1))
for threshold in self._batching.scale_schedule]
)
self._dp_accountant_config = dp_accountant_config
def finite_dp_guarantee(self) -> bool:
"""Returns whether the DP guarantee (eps, delta) can be finite."""
# The privacy (eps, delta) can only be finite with non-zero noise
# and with a finite clipping-norm.
return bool(self._noise_multiplier and np.isfinite(self._clipping_norm))
def compute_max_num_updates(self) -> int:
"""Compute maximum number of updates given the DP parameters."""
if self.finite_dp_guarantee():
return calibrate.calibrate_steps(
target_epsilon=self._dp_epsilon,
noise_multipliers=self._noise_multiplier,
batch_sizes=self._batch_sizes,
num_examples=self._num_samples,
target_delta=self._dp_delta,
dp_accountant_config=self._dp_accountant_config,
)
else:
return 0
def compute_current_epsilon(self, num_updates: int) -> float:
"""Compute DP epsilon given the DP parameters and current `num_updates`."""
if num_updates == 0:
return 0.0
elif self.finite_dp_guarantee():
return dp_bounds.compute_epsilon(
num_steps=num_updates,
noise_multipliers=self._noise_multiplier,
batch_sizes=self._batch_sizes,
num_examples=self._num_samples,
target_delta=self._dp_delta,
dp_accountant_config=self._dp_accountant_config,
)
else:
return float('inf')
def _ceil_div(a: int, b: int) -> int:
return (a + b - 1) // b
class CachedExperimentAccountant:
"""Pre-computes and caches epsilon for different `num_updates` values."""
def __init__(
self,
accountant: ExperimentAccountant,
max_num_updates: int,
num_cached_points: int = 100,
):
"""Creates the cached accoutant and computes the cached points and values.
Args:
accountant: DP accountant to use for computing the results to be cached.
max_num_updates: Maximum value for `num_updates` to be requested.
num_cached_points: Number of points to pre-compute and cache.
"""
self._accountant = accountant
self._max_num_updates = max_num_updates
self._num_cached_points = num_cached_points
self._cached_points = [
_ceil_div(self._max_num_updates * j, self._num_cached_points)
for j in range(self._num_cached_points + 1)
]
self._cached_values = {
x: self._accountant.compute_current_epsilon(x)
for x in self._cached_points}
def compute_approximate_epsilon(self, num_updates: int) -> float:
"""Uses cached results to give an approximate (over-estimated) epsilon.
The value returned should always be an over-approximation of the true
epsilon: this method uses the closest `num_updates` in the cache that is
equal to or greater than the requested `num_updates`. If such a value cannot
be found, an indexing error will be raised.
Args:
num_updates: Number of updates for which to compute epsilon.
Returns:
Approximate (over-estimated) value of epsilon.
"""
closest_cached_point = self._cached_points[
_ceil_div(self._num_cached_points * num_updates, self._max_num_updates)]
return self._cached_values[closest_cached_point]
|
jax_privacy-main
|
jax_privacy/src/accounting/accountant.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiment configuration."""
import dataclasses
from typing import Any, Mapping, Optional, Protocol, Union
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
from jax_privacy.src import accounting
from jax_privacy.src.dp_sgd import typing
class FilterFn(Protocol):
def __call__(
self,
module_name: str,
parameter_name: str,
parameter_value: Any,
) -> bool:
"""Predicate function compatible with `haiku.data_structures` functions.
Args:
module_name: name of the haiku layer.
parameter_name: name of the haiku parameter (within the layer).
parameter_value: value of the parameter.
"""
@dataclasses.dataclass(kw_only=True, slots=True)
class LoggingConfig:
"""Logging configuration.
Attributes:
grad_clipping: Whether to log the proportion of per-example gradients
that get clipped at each iteration.
grad_alignment: Whether to compute the gradient alignment: cosine
distance between the differentially private gradients and the
non-private gradients computed on the same data.
snr_global: Whether to log the Signal-to-Noise Ratio (SNR) globally
across layers, where the SNR is defined as:
||non_private_grads||_2 / ||noise||_2.
snr_per_layer: Whether to log the Signal-to-Noise Ratio (SNR) per
layer, where the SNR is defined as:
||non_private_grads||_2 / ||noise||_2.
prepend_split_name: Whether to prepend the name of the split to metrics
being logged (e.g. 'train/loss' instead of 'loss').
log_params_shapes: Whether to log parameter shapes (called during
compilation).
grad_sparsity: Whether to log the number of non-"approximately zero"
gradient coordinates.
grad_sparsity_threshold: threshold to determine that a coordinate is
approximately zero.
"""
grad_clipping: bool = False
grad_alignment: bool = False
snr_global: bool = False
snr_per_layer: bool = False
prepend_split_name: bool = False
log_params_shapes: bool = True
def maybe_log_param_shapes(self, params: hk.Params, prefix: str = ''):
"""Logs information about `params` if `log_params_shapes` is True."""
if self.log_params_shapes:
logging.info(
'%s total_size=%i', prefix, hk.data_structures.tree_size(params))
for layer_name in params:
layer = params[layer_name]
logging.info(
'%s%s size=%i shapes=%s',
prefix,
layer_name,
hk.data_structures.tree_size(layer),
jax.tree_map(jnp.shape, layer),
)
@dataclasses.dataclass(kw_only=True, slots=True)
class DPConfig:
"""Configuration to activate / deactivate DP training.
Attributes:
delta: DP delta to use to compute DP guarantees.
clipping_norm: maximal l2 norm to clip each gradient per sample. No clipping
is applied if it is set to either `None` or `float('inf')`.
rescale_to_unit_norm: whether to rescale to an l2 norm of 1 after each
gradient per sample has been clipped to `clipping_norm`.
stop_training_at_epsilon: DP epsilon to use to stop training.
noise_multiplier: noise multiplier to use in DP-SGD.
auto_tune: whether to automatically adapt a hyper-parameter to fit the
privacy budget. It should be set to one of 'batch_size',
'noise_multiplier', 'stop_training_at_epsilon', 'num_updates', or None.
vectorize_grad_clipping: whether the computation of gradients clipped
per sample is to be vectorized across the mini batch. Otherwise, a
(JAX) loop is used to iterate over the mini-batch. Using a `for` loop is
usually faster when the program requires a large amount of device memory
(e.g. large batch sizes per device), otherwise vectorization is faster.
accountant: Configuration for the DP accountant to use.
"""
delta: float
clipping_norm: Optional[float]
noise_multiplier: Optional[float]
rescale_to_unit_norm: bool = True
stop_training_at_epsilon: Optional[float] = None
auto_tune: typing.AutoTuneField = None
vectorize_grad_clipping: bool = False
accountant: accounting.DpAccountantConfig = dataclasses.field(
default_factory=accounting.PldAccountantConfig)
@dataclasses.dataclass(kw_only=True, slots=True)
class NoDPConfig(DPConfig):
"""Configuration to deactivate DP training."""
delta: float = 1.0
clipping_norm: Optional[float] = None
noise_multiplier: Optional[float] = None
rescale_to_unit_norm: bool = False
stop_training_at_epsilon: Optional[float] = None
auto_tune: typing.AutoTuneField = None
vectorize_grad_clipping: bool = False
@dataclasses.dataclass(kw_only=True, slots=True)
class BatchSizeTrainConfig:
"""Configuration for the batch-size at training time.
Attributes:
total: total batch-size to use.
per_device_per_step: batch-size to use on each device on each step. This
number should divide `total` * `jax.device_count()`.
scale_schedule: schedule for scaling the batch-size.
"""
total: int
per_device_per_step: int
scale_schedule: Optional[Mapping[int, Any]] = None
@dataclasses.dataclass(kw_only=True, slots=True)
class AveragingConfig:
"""Configuration for the parameter averaging.
Attributes:
ema_enabled: Whether to enable the Exponential Moving Average (EMA) of the
parameters.
ema_coefficient: coefficient for the Exponential Moving Average. The default
is 0.9999.
ema_start_step: first update step to start the Exponential Moving Average
(EMA).
polyak_enabled: Whether to enable Polyak averaging.
polyak_start_step: first update step to start performing Polyak averaging.
"""
ema_enabled: bool = True
ema_coefficient: float = 0.9999
ema_start_step: int = 0
polyak_enabled: bool = False
polyak_start_step: int = 0
@dataclasses.dataclass(kw_only=True, slots=True)
class TrainingConfig:
"""Configuration for training.
Attributes:
batch_size: batch size configuration.
dp: DP configuration.
logging: logging configuration.
weight_decay: weight-decay to apply to the model weights during training.
train_only_layer: if set to None, train all layers of the models. If
specified as a string, train only layer whose name is an exact match
of this string. If specified as a filter function, it will be
called on each `(layer_name, parameter_name parameter_value)` to
determine whether the parameter should be trained.
"""
batch_size: BatchSizeTrainConfig
dp: DPConfig
logging: LoggingConfig = dataclasses.field(default_factory=LoggingConfig)
weight_decay: float = 0.0
train_only_layer: Optional[Union[str, FilterFn]] = None
def is_trainable(self, module_name: str, param_name: str, param: Any) -> bool:
if self.train_only_layer is None:
return True
elif isinstance(self.train_only_layer, str):
return module_name == self.train_only_layer
else:
return self.train_only_layer(module_name, param_name, param)
@dataclasses.dataclass(kw_only=True, slots=True)
class EvaluationConfig:
"""Configuration for the evaluation.
Attributes:
batch_size: Batch-size for evaluation.
max_num_batches: Maximum number of batches to use in an evaluation run.
"""
batch_size: int
max_num_batches: int | None = None
|
jax_privacy-main
|
jax_privacy/src/training/experiment_config.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""Metrics utils."""
from typing import Sequence
import chex
import jax
import jax.numpy as jnp
class Avg:
"""Simple class to iteratively compute a weighted average."""
def __init__(self):
self._avg = 0.0
self._n = 0
def update(self, val, n: int = 1):
self._avg = (self._avg * self._n + val * n) / (self._n + n)
self._n += n
@property
def avg(self):
return self._avg
def _logits_are_valid_per_row(logits: chex.Array) -> chex.Array:
"""Check per-row that no entry is set to NaN.
Args:
logits: array of expected shape [NO]
Returns:
Boolean indicator of shape [N] with the k-th entry set to True if the k-th
row of logits contains a NaN entry, and False otherwise.
"""
return jnp.logical_not(jnp.any(jnp.isnan(logits), axis=1))
def _labels_one_hot_are_valid_per_row(labels_one_hot: chex.Array) -> chex.Array:
"""Check per-row whether each row is a valid one-hot encoding.
Args:
labels_one_hot: array of expected shape [NO]
Returns:
Boolean indicator of shape [N] with the k-th entry set to True if the k-th
row of `labels_one_hot` is a valid encoding, and False otherwise.
"""
zero_or_one = jnp.all(
labels_one_hot * labels_one_hot == labels_one_hot, axis=1)
sum_to_one = jnp.sum(labels_one_hot, axis=1) == 1
return jnp.logical_and(zero_or_one, sum_to_one)
def topk_accuracy(
logits: chex.Array,
labels_one_hot: chex.Array,
topk: Sequence[int] = (1, 5),
) -> Sequence[jax.Array]:
"""Calculate (fast!) top-k error for multiple k values.
Args:
logits: array of expected shape [NO]
labels_one_hot: array of expected shape [NO]
topk: all k values for which top-k accuracy should be computed.
Returns:
Top-k accuracies for the given logits and labels.
"""
assert logits.shape == labels_one_hot.shape
label_scores = jnp.sum(logits * labels_one_hot, 1)
# Compute classes that are scored at least as high as the ground truth.
high_score_matrix = logits >= label_scores[:, jnp.newaxis]
num_high_scores_per_sample = jnp.sum(high_score_matrix, axis=1)
num_high_scores_per_sample = jnp.where(
jnp.logical_and(_logits_are_valid_per_row(logits),
_labels_one_hot_are_valid_per_row(labels_one_hot)),
num_high_scores_per_sample,
jnp.inf,
)
# Each sample is correct for top-k accuracy if it has <= k high scores.
return [jnp.mean(num_high_scores_per_sample <= k) for k in topk]
|
jax_privacy-main
|
jax_privacy/src/training/metrics.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for auto_tune."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
from jax_privacy.experiments import image_data
from jax_privacy.experiments.image_classification import config_base
from jax_privacy.src import accounting
from jax_privacy.src.training import auto_tune
from jax_privacy.src.training import experiment_config
import ml_collections
import numpy as np
def get_config() -> ml_collections.ConfigDict:
"""Creates a dummy config for the test."""
config = config_base.ExperimentConfig( # pytype: disable=wrong-arg-types
num_updates=100,
optimizer=None,
model=None,
training=experiment_config.TrainingConfig(
batch_size=experiment_config.BatchSizeTrainConfig(
total=1024,
per_device_per_step=8,
),
dp=experiment_config.DPConfig(
delta=1e-5,
clipping_norm=None,
auto_tune=None,
stop_training_at_epsilon=2.0,
noise_multiplier=1.0,
accountant=accounting.RdpAccountantConfig(),
),
),
averaging=None,
random_seed=None,
data_train=image_data.Cifar10Loader(
config=image_data.Cifar10TrainValidConfig(
preprocess_name='standardise',
),
),
data_eval=image_data.Cifar10Loader(
config=image_data.Cifar10TestConfig(
preprocess_name='standardise',
),
),
evaluation=experiment_config.EvaluationConfig(
batch_size=100,
),
)
return config_base.build_jaxline_config(
experiment_config=config,
)
class AutoTuneTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.config = get_config()
self._accountant = {
'pld': accounting.PldAccountantConfig(
value_discretization_interval=1e-2,
),
'rdp': accounting.RdpAccountantConfig(),
}
def _assert_calibrated(self, config, target_eps):
"""Asserts that the config is calibrated wr.t. the target epsilon."""
config_xp = config.experiment_kwargs.config
eps = accounting.compute_epsilon(
noise_multipliers=config_xp.training.dp.noise_multiplier,
batch_sizes=config_xp.training.batch_size.total,
num_steps=config_xp.num_updates,
num_examples=config_xp.data_train.config.num_samples,
target_delta=config_xp.training.dp.delta,
dp_accountant_config=config_xp.training.dp.accountant,
)
np.testing.assert_allclose(eps, target_eps, atol=0.05)
def test_no_autotune(self):
config_dp = self.config.experiment_kwargs.config.training.dp
config_dp.auto_tune = None
config = auto_tune.dp_auto_tune_config(copy.deepcopy(self.config))
config_xp_after = config.experiment_kwargs.config
config_xp_before = self.config.experiment_kwargs.config
assert config_xp_after.num_updates == config_xp_before.num_updates
assert (
config_xp_after.training.dp.stop_training_at_epsilon
== config_xp_before.training.dp.stop_training_at_epsilon
)
assert (
config_xp_after.training.dp.noise_multiplier
== config_xp_before.training.dp.noise_multiplier
)
assert (
config_xp_after.training.batch_size.total
== config_xp_before.training.batch_size.total
)
@parameterized.parameters('rdp', 'pld')
def test_tune_noise_multiplier(self, accountant_name):
config_dp = self.config.experiment_kwargs.config.training.dp
config_dp.auto_tune = 'noise_multiplier'
config_dp.accountant = self._accountant[accountant_name]
target_eps = config_dp.stop_training_at_epsilon
config = auto_tune.dp_auto_tune_config(copy.deepcopy(self.config))
assert (
config.experiment_kwargs.config != self.config.experiment_kwargs.config
)
self._assert_calibrated(config, target_eps)
@parameterized.parameters('rdp', 'pld')
def test_tune_num_updates(self, accountant_name):
config_dp = self.config.experiment_kwargs.config.training.dp
config_dp.auto_tune = 'num_updates'
config_dp.accountant = self._accountant[accountant_name]
target_eps = config_dp.stop_training_at_epsilon
config = auto_tune.dp_auto_tune_config(copy.deepcopy(self.config))
assert (
config.experiment_kwargs.config != self.config.experiment_kwargs.config
)
self._assert_calibrated(config, target_eps)
@parameterized.parameters('rdp', 'pld')
def test_tune_epsilon(self, accountant_name):
config_dp = self.config.experiment_kwargs.config.training.dp
config_dp.auto_tune = 'stop_training_at_epsilon'
config_dp.accountant = self._accountant[accountant_name]
config = auto_tune.dp_auto_tune_config(copy.deepcopy(self.config))
assert (
config.experiment_kwargs.config != self.config.experiment_kwargs.config
)
self._assert_calibrated(
config,
config.experiment_kwargs.config.training.dp.stop_training_at_epsilon,
)
@parameterized.parameters('rdp', 'pld')
def test_tune_batch_size(self, accountant_name):
config_dp = self.config.experiment_kwargs.config.training.dp
config_dp.auto_tune = 'batch_size'
config_dp.accountant = self._accountant[accountant_name]
target_eps = config_dp.stop_training_at_epsilon
config = auto_tune.dp_auto_tune_config(copy.deepcopy(self.config))
assert (
config.experiment_kwargs.config != self.config.experiment_kwargs.config
)
self._assert_calibrated(config, target_eps)
if __name__ == '__main__':
absltest.main()
|
jax_privacy-main
|
jax_privacy/src/training/auto_tune_test.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The updater computes and applies the update.
Typical usage:
# The updater requires a forward function, specification of virtual batching,
# and a DP-SGD gradient computer:
updater = dp_updater.Updater(
batching=batching, # see `batching.py`
forward_fn=forward_fn, # see `forward.py`
grad_computer=grad_computer, # see `gradients.py`
)
...
# Initialize model and optimizer (pmapped).
params, network_state, opt_state, step_count = updater.init(
rng=rng,
inputs=inputs,
)
# Apply update (pmapped).
params, network_state, opt_state, step_count, stats = updater.update(
params=params,
network_state=network_state,
opt_state=opt_state,
step_count=step_count,
inputs=inputs,
)
"""
import functools
from typing import Mapping, Optional
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax_privacy.src.dp_sgd import batching as batching_module
from jax_privacy.src.dp_sgd import devices
from jax_privacy.src.dp_sgd import gradients
from jax_privacy.src.dp_sgd import optim
from jax_privacy.src.dp_sgd import typing
from jax_privacy.src.training import averaging
from jax_privacy.src.training import experiment_config
from jax_privacy.src.training import forward
from jax_privacy.src.training import optimizer_config as opt_config
from jax_privacy.src.training import updater
from jaxline import utils as jaxline_utils
import optax
class Updater(updater.AbstractUpdater):
"""Defines and applies the update, potentially in parallel across devices."""
def __init__(
self,
*,
batching: batching_module.VirtualBatching,
forward_fn: forward.ForwardFn,
grad_computer: gradients.GradientComputer,
weight_decay: Optional[chex.Numeric],
is_trainable: experiment_config.FilterFn = lambda *args: True,
optimizer_config: opt_config.OptimizerConfig,
max_num_updates: int,
device_layout: devices.DeviceLayout = devices.DeviceLayout(),
logging_config: experiment_config.LoggingConfig,
rng_seed: int = 42,
):
"""Initializes the updater.
Args:
batching: virtual batching that allows to use 'virtual' batches across
devices and steps.
forward_fn: forward pass providing the loss function and metrics.
grad_computer: Computer of the gradient.
weight_decay: whether to apply weight-decay on the parameters of the model
(mechanism not privatized since it is data-independent).
is_trainable: function to be called on each
`(layer_name, parameter_name parameter_value)` to
determine whether the parameter should be updated during training.
optimizer_config: Optimizer configuration.
max_num_updates: Maximal number of updates to perform.
device_layout: Common args to `pmap` and `psum` for data parallelism.
logging_config: configuration of the logging options.
rng_seed: seed to use for the random key generator;
this will override the rng provided by jaxline.
"""
self._batching = batching
self._forward_fn = forward_fn
self._grad_computer = grad_computer
self._weight_decay = weight_decay
self._is_trainable = is_trainable
# Create an optimizer that will only apply the update every
# `k=batching.apply_update_every` steps, and accumulate gradients
# in-between so that we can use a large 'virtual' batch-size.
# For example, if `k` is 4, on the first three steps, the optimizer will
# store the gradients of the mini-batches and not perform any update.
# On the fourth step, the optimizer will average the gradients of the fourth
# mini-batch with those of the first three, perform the update, and reset
# its memory, and so on. This allows to use large virtual batch-sizes.
self._lr_decay_schedule_fn = optimizer_config.make_lr_schedule_fn(
max_num_updates)
self._optimizer = optax.MultiSteps(
optimizer_config.make_optimizer(max_num_updates),
batching.apply_update_every,
)
self._logging_config = logging_config
# This key will be used instead of the rng provided by jaxline
# since the latter is updated at every step.
self._rng_init = jax.random.PRNGKey(rng_seed)
self._device_layout = device_layout
self._pmapped_init = jax.pmap(
self._single_device_init,
in_axes=(None, 0), # rng, inputs
donate_argnums=(0,),
**device_layout.pmap_kwargs)
self._pmapped_update = jax.pmap(
self._single_device_update,
in_axes=(0, 0, 0, 0), # params, net_state, opt_state, inputs
donate_argnums=(0, 1, 2, 4),
**device_layout.pmap_kwargs)
self._pmapped_evaluate = jax.pmap(
self._single_device_evaluate,
in_axes=(0, 0, None, 0), # params, net_state, rng, inputs
donate_argnums=(2,),
**device_layout.pmap_kwargs)
self._init_average = jax.pmap(
lambda x: x,
**device_layout.pmap_kwargs)
self._average_ema = jax.pmap(
averaging.ema,
in_axes=(0, 0, None, None), # old_average, new, mu, start_step
donate_argnums=(0,),
**device_layout.pmap_kwargs)
self._average_polyak = jax.pmap(
averaging.polyak,
in_axes=(0, 0, None), # old_average, new, start_step
donate_argnums=(0,),
**device_layout.pmap_kwargs)
def step_count_from_opt_state(
self,
opt_state: optax.MultiStepsState,
) -> updater.StepCount:
"""Returns the hierarchical step number."""
assert isinstance(opt_state, optax.MultiStepsState)
return updater.StepCount(
update_step=int(jaxline_utils.get_first(opt_state.gradient_step)),
accumulation_step=int(jaxline_utils.get_first(opt_state.mini_step)),
)
def _regularization(
self,
params: typing.ParamsT,
) -> tuple[chex.Numeric, chex.Numeric]:
l2_loss = self._grad_computer.l2_loss(params)
return self._weight_decay * l2_loss, l2_loss
def init(
self,
rng: chex.PRNGKey,
inputs: typing.InputsT,
) -> tuple[typing.ParamsT, typing.ModelStateT, optax.MultiStepsState,
updater.StepCount]:
"""Initialises parameters."""
params, network_state, opt_state = self._pmapped_init(rng, inputs)
# Non-vectorised Python integers, so keep outside the pmap.
step_count = updater.StepCount(
update_step=0,
accumulation_step=0,
)
return params, network_state, opt_state, step_count
def update(
self,
params: typing.ParamsT,
network_state: typing.ModelStateT,
opt_state: optax.MultiStepsState,
step_count: updater.StepCount,
inputs: typing.InputsT,
) -> tuple[typing.ParamsT, typing.ModelStateT, optax.MultiStepsState,
updater.StepCount, typing.Metrics]:
"""Updates parameters."""
# The function below is p-mapped, so arguments must be provided without name
# and in the right order.
params, network_state, opt_state, metrics = self._pmapped_update(
params, network_state, opt_state, inputs)
# Replicate the logic in optax.MultiSteps to determine the updated
# hierarchical step (full + inner) in Python integers. This makes it
# available to the caller without blocking on the device computation.
every_k = self._batching.apply_update_every(step_count.update_step)
step_count = step_count.next(every_k)
return params, network_state, opt_state, step_count, metrics
def evaluate(
self,
params: typing.ParamsT,
network_state: typing.ModelStateT,
rng: chex.PRNGKey,
inputs: typing.InputsT,
) -> typing.Metrics:
"""Evaluates model parameters."""
# The function below is p-mapped, so arguments must be provided without name
# and in the right order.
return jaxline_utils.get_first(
self._pmapped_evaluate(params, network_state, rng, inputs))
def optimizer(self) -> optax.GradientTransformation:
return self._optimizer.gradient_transformation()
def init_average(
self,
params: typing.ParamsT,
) -> typing.ParamsT:
return self._init_average(params)
def update_ema(
self,
ema_params: typing.ParamsT,
params: typing.ParamsT,
opt_state: optax.MultiStepsState,
*,
mu: chex.Numeric,
start_step: chex.Numeric,
) -> typing.ParamsT:
# We only perform parameter averaging if the current step corresponds to an
# update step (and not a gradient accumulation step).
if self._optimizer.has_updated(jaxline_utils.get_first(opt_state)):
t = jaxline_utils.get_first(opt_state.gradient_step) - start_step
return self._average_ema(ema_params, params, mu, t)
else:
return ema_params
def update_polyak(
self,
polyak_params: typing.ParamsT,
params: typing.ParamsT,
opt_state: optax.MultiStepsState,
*,
start_step: chex.Numeric,
) -> typing.ParamsT:
# We only perform parameter averaging if the current step corresponds to an
# update step (and not a gradient accumulation step).
if self._optimizer.has_updated(jaxline_utils.get_first(opt_state)):
t = jaxline_utils.get_first(opt_state.gradient_step) - start_step
return self._average_polyak(polyak_params, params, t)
else:
return polyak_params
def _single_device_init(
self,
rng: chex.PRNGKey,
inputs: typing.InputsT,
) -> tuple[typing.ParamsT, typing.ModelStateT, optax.MultiStepsState]:
"""Initialization function (to be pmapped)."""
params, network_state = self._forward_fn.train_init(rng, inputs)
trainable_params, unused_frozen_params = hk.data_structures.partition(
self._is_trainable, params)
opt_state = self._optimizer.init(trainable_params)
return params, network_state, opt_state
def _single_device_update(
self,
params: typing.ParamsT,
network_state: typing.ModelStateT,
opt_state: optax.MultiStepsState,
inputs: typing.InputsT,
) -> tuple[
typing.ParamsT, typing.ModelStateT, optax.MultiStepsState,
typing.Metrics,
]:
"""Updates parameters (to be pmapped)."""
# Potentially split params between trainable parameters and frozen
# parameters. Trainable parameters get updated, while frozen parameters do
# not.
trainable_params, frozen_params = hk.data_structures.partition(
self._is_trainable, params)
def loss_fn(train_params, *args):
all_params = hk.data_structures.merge(train_params, frozen_params)
return self._forward_fn.train_forward(all_params, *args)
self._logging_config.maybe_log_param_shapes(
trainable_params, prefix='[Trainable params] ')
self._logging_config.maybe_log_param_shapes(
frozen_params, prefix='[Frozen params] ')
(
trainable_params,
network_state,
opt_state,
metrics,
) = self._update_with_stats(
loss_fn=loss_fn,
params=trainable_params,
network_state=network_state,
opt_state=opt_state,
inputs=inputs,
)
# Merge the updated parameters with the parameters that are supposed to
# remain frozen during training.
params = hk.data_structures.merge(trainable_params, frozen_params)
return params, network_state, opt_state, metrics
def _update_with_stats(
self,
loss_fn: typing.LossFn,
params: typing.ParamsT,
network_state: typing.ModelStateT,
opt_state: optax.MultiStepsState,
inputs: typing.InputsT,
) -> tuple[typing.ParamsT, typing.ModelStateT, optax.MultiStepsState,
typing.Metrics]:
"""Updates parameters and computes relevant training statistics."""
# `rng_per_batch` is common across replicas and accumulation steps.
# NOTE: folding an int (scalar) array into a random key is valid, but fails
# the type check, hence why pytype is disabled below.
rng_per_batch = jax.random.fold_in(self._rng_init, opt_state.gradient_step)
# Compute the regularization.
reg, l2_loss = self._regularization(params)
if self._logging_config.grad_alignment:
# Compute (non-clipped) gradients w.r.t. trainable parameters.
# Do so before `params` and `network_state` are updated.
clean_grads = self._grad_computer.clean_gradients(
loss_fn=loss_fn,
params=params,
network_state=network_state,
rng_per_batch=rng_per_batch,
accumulation_step=opt_state.mini_step,
inputs=inputs,
)
# Compute the clipped gradients (across all replicas).
(loss, (network_state, metrics)), avg_grads = (
self._grad_computer.loss_and_clipped_gradients(
loss_fn=loss_fn,
params=params,
network_state=network_state,
rng_per_batch=rng_per_batch,
accumulation_step=opt_state.mini_step,
inputs=inputs,
)
)
# Compute the noise scale based on `noise_multiplier`, the batch-size and
# the clipping-norm. Compute our 'final' gradients `grads`: add the clipped
# data-dependent gradients (`avg_grads`) and the noise to be added to
# achieved differential privacy.
grads, std = self._grad_computer.add_noise_to_grads(
total_batch_size=self._batching.batch_size(opt_state.gradient_step),
grads=avg_grads,
rng_per_batch=rng_per_batch,
)
# The update step is logged in the optimizer state (by optax.MultiSteps)
# under the name of 'gradient_step'.
# Note that the learning rate schedule evolves with `update_step`
# rather than `global_step`, since the former accounts for the fact that
# gradients may be accumulated over multiple global steps.
learning_rate = self._lr_decay_schedule_fn(opt_state.gradient_step)
params, opt_state = self._opt_update(
params, opt_state, grads, learning_rate)
# Log all relevant statistics in a dictionary.
loss_vector = metrics.per_example['loss']
scalars = {
'noise_std': std,
'loss': loss,
'loss_mean': jnp.mean(loss_vector),
'loss_min': jnp.min(loss_vector),
'loss_max': jnp.max(loss_vector),
'loss_std': jnp.std(loss_vector),
'loss_median': jnp.median(loss_vector),
'reg': reg,
'batch_size': self._batching.batch_size(opt_state.gradient_step),
'update_every': self._batching.apply_update_every(
opt_state.gradient_step),
'l2_loss': l2_loss,
'obj': (reg + loss),
'grads_norm': self._grad_computer.global_norm(grads),
'update_step': opt_state.gradient_step, # use value after opt update
'learning_rate': learning_rate,
}
scalars.update(metrics.scalars_avg)
# Possibly log additional statistics from the gradient.
scalars.update(self._compute_gradient_stats(
opt_state=opt_state,
rng_per_batch=rng_per_batch,
avg_grads=avg_grads,
grad_norms_per_sample=metrics.per_example.get('grad_norm'),
))
if self._logging_config.grad_alignment:
# TODO: This only computes alignment on the current shard.
scalars.update(grad_alignment=optim.cosine_distance(grads, clean_grads))
metrics = typing.Metrics(
scalars_avg=scalars,
per_example=metrics.per_example,
scalars_sum=metrics.scalars_sum,
)
return params, network_state, opt_state, metrics
def _opt_update(
self,
params: typing.ParamsT,
opt_state: optax.MultiStepsState,
grads: typing.ParamsT,
learning_rate: chex.Array,
) -> tuple[typing.ParamsT, optax.MultiStepsState]:
"""Returns `params` and `opt_state` updated with `grads`."""
# Perform the update on the model parameters (no-op if this step
# is meant to accumulate gradients rather than performing the model update).
updates, opt_state = self._optimizer.update(grads, opt_state, params)
params = optax.apply_updates(params, updates)
# Manually apply weight decay with the current learning-rate.
if self._weight_decay:
params = jax.lax.cond(
self._optimizer.has_updated(opt_state),
functools.partial( # decay parameters if this is an update step
optim.apply_weight_decay,
learning_rate=learning_rate,
weight_decay=self._weight_decay,
),
lambda p: p, # do not decay if this is an accumulation step
params,
)
return params, opt_state
def _compute_gradient_stats(
self,
*,
opt_state: optax.MultiStepsState,
rng_per_batch: chex.PRNGKey,
avg_grads: typing.ParamsT,
grad_norms_per_sample: chex.ArrayBatched,
) -> Mapping[str, chex.Numeric]:
"""Compute various gradient statistics for logging."""
stats = {}
# Log Signal-to-Noise Ratio.
if self._logging_config.snr_global or self._logging_config.snr_per_layer:
noise = self._recompute_noise(
opt_state=opt_state,
grads_like=avg_grads,
rng_per_batch=rng_per_batch,
)
def snr(s, n):
return (self._grad_computer.global_norm(s) /
self._grad_computer.global_norm(n))
if self._logging_config.snr_global:
stats['snr_global'] = snr(avg_grads, noise)
if self._logging_config.snr_per_layer:
if noise is None:
noise = self._recompute_noise(
opt_state=opt_state,
grads_like=avg_grads,
rng_per_batch=rng_per_batch,
)
signal_to_noise_per_layer = jax.tree_map(snr, avg_grads, noise)
stats.update({
f'snr_{mod_name}_{name}': value
for mod_name, name, value in hk.data_structures.traverse(
signal_to_noise_per_layer)})
if self._logging_config.grad_clipping:
if not self._grad_computer.using_clipped_grads:
stats.update(grads_clipped=0.0)
else:
grads_clipped = jnp.mean(
jnp.greater(grad_norms_per_sample,
self._grad_computer.clipping_norm))
stats.update(
grads_clipped=grads_clipped,
grad_norms_before_clipping_mean=jnp.mean(grad_norms_per_sample),
grad_norms_before_clipping_median=jnp.median(grad_norms_per_sample),
grad_norms_before_clipping_min=jnp.min(grad_norms_per_sample),
grad_norms_before_clipping_max=jnp.max(grad_norms_per_sample),
grad_norms_before_clipping_std=jnp.std(grad_norms_per_sample),
)
return stats
def _recompute_noise(self, opt_state, grads_like, rng_per_batch):
"""Re-create the noise with the same RNG and add it to zeros."""
noise, unused_std = self._grad_computer.add_noise_to_grads(
total_batch_size=self._batching.batch_size(opt_state.gradient_step),
grads=jax.tree_map(jnp.zeros_like, grads_like),
rng_per_batch=rng_per_batch,
)
return noise
def _single_device_evaluate(
self,
params: typing.ParamsT,
network_state: typing.ModelStateT,
rng: chex.PRNGKey,
inputs: typing.InputsT,
) -> typing.Metrics:
"""Evaluates model parameters (to be pmapped)."""
# Note on rngs:
# - rng is common across replicas.
# - rng_per_example is specialised per sample (for independent randonmness).
rng_per_example = jax.random.fold_in(rng, self._device_layout.replica_index)
metrics = self._forward_fn.eval_forward(
params, network_state, rng_per_example, inputs)
per_example = jax.lax.all_gather(
metrics.per_example, **self._device_layout.data_psum_kwargs)
scalars_avg = jax.lax.pmean(
metrics.scalars_avg, **self._device_layout.data_psum_kwargs)
scalars_sum = jax.lax.psum(
metrics.scalars_sum, **self._device_layout.data_psum_kwargs)
return typing.Metrics(
scalars_avg=scalars_avg,
scalars_sum=scalars_sum,
per_example=per_example,
)
|
jax_privacy-main
|
jax_privacy/src/training/dp_updater.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auto-tune DP parameters of config so that they fit the privacy budget."""
from absl import logging
from jax_privacy.src import accounting as dp_accounting
from jax_privacy.src.dp_sgd import typing
import ml_collections
def dp_auto_tune(
*,
auto_tune: typing.AutoTuneField,
num_examples: int,
dp_epsilon: float,
dp_delta: float,
noise_multiplier: float,
batch_sizes: int,
num_updates: int,
dp_accountant_config: dp_accounting.DpAccountantConfig,
) -> tuple[float, int, float, int]:
"""Auto-tune DP parameters so that we can obtain the desired DP guarantees.
Args:
auto_tune: which hyper-parameter to adapt.
num_examples: number of examples in the training set.
dp_epsilon: epsilon-value of DP guarantee.
dp_delta: delta-value of DP guarantee.
noise_multiplier: standard deviation of the noise (relative to the
clipping-norm).
batch_sizes: batch-size used during training.
num_updates: number of updates to be performed.
dp_accountant_config: Configuration for the DP accountant to use.
Returns:
Potentially updated values for dp_epsilon, num_updates, noise_multiplier,
and batch_sizes.
"""
if not auto_tune:
pass
elif auto_tune == 'stop_training_at_epsilon':
dp_epsilon: float = dp_accounting.compute_epsilon(
noise_multipliers=noise_multiplier,
batch_sizes=batch_sizes,
num_steps=num_updates,
num_examples=num_examples,
target_delta=dp_delta,
dp_accountant_config=dp_accountant_config,
)
elif auto_tune == 'num_updates':
num_updates: int = dp_accounting.calibrate_steps(
target_epsilon=dp_epsilon,
noise_multipliers=noise_multiplier,
batch_sizes=batch_sizes,
num_examples=num_examples,
target_delta=dp_delta,
dp_accountant_config=dp_accountant_config,
)
elif auto_tune == 'noise_multiplier':
noise_multiplier: float = dp_accounting.calibrate_noise_multiplier(
target_epsilon=dp_epsilon,
num_steps=num_updates,
batch_sizes=batch_sizes,
num_examples=num_examples,
target_delta=dp_delta,
dp_accountant_config=dp_accountant_config,
)
elif auto_tune == 'batch_size':
batch_sizes: int = dp_accounting.calibrate_batch_size(
target_epsilon=dp_epsilon,
noise_multipliers=noise_multiplier,
num_steps=num_updates,
num_examples=num_examples,
target_delta=dp_delta,
dp_accountant_config=dp_accountant_config,
)
else:
raise ValueError(f'Unsupported auto-tuning option: {auto_tune}.')
return dp_epsilon, num_updates, noise_multiplier, batch_sizes
def dp_auto_tune_config(
config: ml_collections.ConfigDict,
) -> ml_collections.ConfigDict:
"""Apply DP auto-tuning to the config (modified in-place)."""
config_xp = config.experiment_kwargs.config
if config_xp.training.batch_size.scale_schedule is not None:
raise ValueError('Batch-size schedules are not supported.')
dp_accountant_config = config_xp.training.dp.accountant
if isinstance(dp_accountant_config, dp_accounting.PldAccountantConfig):
logging.warning(
'Auto tuning with PLD accountant can be slow. Be patient...'
)
epsilon, num_updates, noise_multiplier, batch_size = dp_auto_tune(
batch_sizes=config_xp.training.batch_size.total,
noise_multiplier=config_xp.training.dp.noise_multiplier,
dp_epsilon=config_xp.training.dp.stop_training_at_epsilon,
num_updates=config_xp.num_updates,
auto_tune=config_xp.training.dp.auto_tune,
num_examples=config_xp.data_train.config.num_samples,
dp_delta=config_xp.training.dp.delta,
dp_accountant_config=dp_accountant_config,
)
config_xp.num_updates = num_updates
config_xp.training.dp.stop_training_at_epsilon = epsilon
config_xp.training.dp.noise_multiplier = noise_multiplier
config_xp.training.batch_size.total = batch_size
return config
|
jax_privacy-main
|
jax_privacy/src/training/auto_tune.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the updater."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax_privacy.experiments import image_data
from jax_privacy.experiments.image_classification import forward
from jax_privacy.src.dp_sgd import batching as batching_module
from jax_privacy.src.dp_sgd import gradients
from jax_privacy.src.training import dp_updater
from jax_privacy.src.training import experiment_config
from jax_privacy.src.training import optimizer_config
from jaxline import utils
import numpy as np
import optax
import scipy.stats as spst
INPUT_SIZE = 3
LOCAL_BATCH_SIZE = 2
NUM_CLASSES = 7
NUM_DEVICES = 4
AUGMULT = 5
NUM_TEST_SAMPLES = 18
def _standard_updater_kwargs(
*,
learning_rate: float = 1.0,
weight_decay: float = 0.0,
):
return {
'weight_decay': weight_decay,
'optimizer_config': optimizer_config.sgd_config(
lr=optimizer_config.constant_lr_config(learning_rate),
),
'logging_config': experiment_config.LoggingConfig(),
'max_num_updates': 10,
}
def _flatten_tree(tree):
return jnp.concatenate(
[jnp.ravel(x) for x in jax.tree_util.tree_leaves(tree)])
def model_fn(inputs, is_training=False):
del is_training # unused
return hk.nets.MLP(output_sizes=[INPUT_SIZE, 10, NUM_CLASSES])(inputs)
# Adapt the forward function to echo the per-example random key.
class _ForwardFnWithRng(forward.MultiClassForwardFn):
def eval_forward(self, params, network_state, rng, inputs):
metrics = super().eval_forward(params, network_state, rng, inputs)
metrics.scalars_avg = {'rng': rng, **metrics.scalars_avg}
return metrics
def assert_close_calibrated(value, ref, expected, rtol=2, atol=1e-5):
"""Check that |value - expected| <= rtol * max(|ref - expected|) + atol."""
delta = jnp.abs(value - expected)
max_delta_allowed = rtol * jnp.max(jnp.abs(ref - expected)) + atol
np.testing.assert_array_less(delta, max_delta_allowed)
def assert_trees_all_close(tree_1, tree_2, rtol=2e-2, atol=1e-5):
"""Check closeness up to *both* absolute and relative tolerance values."""
chex.assert_trees_all_close(tree_1, tree_2, atol=atol, rtol=0)
chex.assert_trees_all_close(tree_1, tree_2, rtol=rtol, atol=0)
def _test_data(num_batches, local_batch_size, seed=9273):
"""Generates dummy data for testing purposes."""
prng_seq = hk.PRNGSequence(seed)
batches = []
for _ in range(num_batches):
rng = next(prng_seq)
images = jax.random.normal(
rng,
[NUM_DEVICES, local_batch_size, AUGMULT, INPUT_SIZE],
)
rng = next(prng_seq)
labels = jax.random.randint(
rng,
[NUM_DEVICES, local_batch_size, AUGMULT],
minval=0,
maxval=NUM_CLASSES,
)
labels = jax.nn.one_hot(labels, NUM_CLASSES)
batches.append(image_data.DataInputs(image=images, label=labels))
return batches
class UpdaterTest(parameterized.TestCase):
def setUp(self):
super().setUp()
chex.set_n_cpu_devices(NUM_DEVICES)
rng = jax.random.PRNGKey(84452)
self.rng, self.rng_init = jax.random.split(rng, 2)
self.net = hk.transform_with_state(model_fn)
self.forward_fn = forward.MultiClassForwardFn(self.net)
def init_with_updater(self, updater):
inputs = _test_data(num_batches=1, local_batch_size=LOCAL_BATCH_SIZE)[0]
(
self.initial_params,
self.initial_network_state,
self.initial_opt_state,
self.initial_step_count,
) = updater.init(rng=self.rng_init, inputs=inputs)
def run_updater(self, updater, data, return_all_params=False):
"""Runs the updater on the data given in argument."""
params = self.initial_params
network_state = self.initial_network_state
opt_state = self.initial_opt_state
step_count = self.initial_step_count
all_params = [utils.get_first(params)]
for inputs in data:
# Args are donated. Take copies so that we can reuse them.
(
params,
network_state,
opt_state,
step_count,
unused_scalars,
) = updater.update(
params=jax.tree_map(jnp.copy, params),
network_state=jax.tree_map(jnp.copy, network_state),
opt_state=jax.tree_map(jnp.copy, opt_state),
step_count=step_count,
inputs=inputs,
)
all_params.append(utils.get_first(params))
return all_params if return_all_params else all_params[-1]
@parameterized.named_parameters(
('no_accumulation_no_weight_decay', 1, 0.0),
('no_accumulation_with_weight_decay', 1, 1000.0),
('with_accumulation_no_weight_decay', 3, 0.0),
('with_accumulation_with_weight_decay', 3, 1000.0),
)
def test_accumulation(self, num_accumulations, weight_decay):
batch_size = LOCAL_BATCH_SIZE * NUM_DEVICES * num_accumulations
batching = batching_module.VirtualBatching(
batch_size_init=batch_size,
batch_size_per_device_per_step=LOCAL_BATCH_SIZE,
scale_schedule=None,
)
# When using weight-decay, ensure that it is the dominant term in the update
# by using a small learning-rate (downweighs the importance of gradients),
# so that we can detect it.
learning_rate = 0.1 if not weight_decay else 0.1 / weight_decay
updater_no_noise = dp_updater.Updater(
forward_fn=self.forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=1.0,
rescale_to_unit_norm=False,
noise_multiplier=0.0,
vectorize_grad_clipping=True,
),
**_standard_updater_kwargs(
learning_rate=learning_rate,
weight_decay=weight_decay,
),
)
self.init_with_updater(updater_no_noise)
data = _test_data(
num_batches=num_accumulations,
local_batch_size=LOCAL_BATCH_SIZE,
)
output_params = self.run_updater(
updater_no_noise, data, return_all_params=True)
# Check that parameters are unchanged during accumulation steps.
for params in output_params[1:-1]:
chex.assert_trees_all_equal(params, output_params[0])
# Check that parameters have changed during the update step.
with self.assertRaises(AssertionError):
chex.assert_trees_all_close(
output_params[-1],
output_params[0],
rtol=0.1,
)
@parameterized.named_parameters(
('no_accumulation', 1),
('with_accumulation', 5),
)
def test_noise(self, num_accumulations):
std = 0.3
clipping_norm = 0.1
batch_size = LOCAL_BATCH_SIZE * NUM_DEVICES * num_accumulations
batching = batching_module.VirtualBatching(
batch_size_init=batch_size,
batch_size_per_device_per_step=LOCAL_BATCH_SIZE,
scale_schedule=None,
)
updater_no_noise = dp_updater.Updater(
forward_fn=self.forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=clipping_norm,
rescale_to_unit_norm=False,
noise_multiplier=0.0,
vectorize_grad_clipping=True,
),
**_standard_updater_kwargs(),
)
self.init_with_updater(updater_no_noise)
data = _test_data(
num_batches=num_accumulations,
local_batch_size=LOCAL_BATCH_SIZE,
)
# Run one pass of the updater over the data with no noise.
params_no_noise = self.run_updater(updater_no_noise, data)
# Multiple realizations for different rngs.
noise_samples = []
for i in range(NUM_TEST_SAMPLES):
updater_noise = dp_updater.Updater(
forward_fn=self.forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=clipping_norm,
rescale_to_unit_norm=False,
noise_multiplier=std,
vectorize_grad_clipping=True,
),
rng_seed=i,
**_standard_updater_kwargs(),
)
# Run one pass of the updater over data with noise using rng_iteration.
params_noise = self.run_updater(updater_noise, data)
# The difference with params_no_noise should only contain the noise.
noise_samples.append(
_flatten_tree(params_noise) - _flatten_tree(params_no_noise))
noise_samples = jnp.stack(noise_samples)
std_expected = std * clipping_norm / batch_size
# Use synthetic noise as a reference to calibrate the precision required
# to pass the test.
synthetic_noise = std_expected * jax.random.normal(self.rng,
noise_samples.shape)
# Sanity check: synthetic noise passes KS goodness-of-fit test.
_, p_synthetic = spst.kstest(
jnp.ravel(synthetic_noise) / std_expected, 'norm')
self.assertGreater(p_synthetic, 0.05)
# Run KS goodness-of-fit test on noise introduced by DP-SGD.
_, p_dpsgd = spst.kstest(
jnp.ravel(noise_samples) / std_expected, 'norm')
# Reject null hypothesis "implementation is correct" if p-value <= 0.05.
self.assertGreater(p_dpsgd, 0.05)
# Statistics per coordinate, across samples (rng instances)
# (to test that the noise is independent across rng instances).
mean_per_coordinate = jnp.mean(noise_samples, axis=0)
std_per_coordinate = jnp.std(noise_samples, axis=0)
mean_per_coordinate_ref = jnp.mean(synthetic_noise, axis=0)
std_per_coordinate_ref = jnp.std(synthetic_noise, axis=0)
# Statistics per sample (rng instance), across coordinates
# (to test that the noise is independent across coordinates).
mean_per_sample = jnp.mean(noise_samples, axis=1)
std_per_sample = jnp.std(noise_samples, axis=1)
mean_per_sample_ref = jnp.mean(synthetic_noise, axis=1)
std_per_sample_ref = jnp.std(synthetic_noise, axis=1)
# Statistics across both samples and coordinates.
total_mean = jnp.mean(noise_samples)
total_mean_ref = jnp.mean(synthetic_noise)
total_std = jnp.std(noise_samples)
total_std_ref = jnp.std(synthetic_noise)
assert_close_calibrated(
value=mean_per_coordinate,
ref=mean_per_coordinate_ref,
expected=0.0,
)
assert_close_calibrated(
value=std_per_coordinate,
ref=std_per_coordinate_ref,
expected=std_expected,
)
assert_close_calibrated(
value=mean_per_sample,
ref=mean_per_sample_ref,
expected=0.0,
)
assert_close_calibrated(
value=std_per_sample,
ref=std_per_sample_ref,
expected=std_expected,
)
assert_close_calibrated(
value=total_mean,
ref=total_mean_ref,
expected=0.0,
)
assert_close_calibrated(
value=total_std,
ref=total_std_ref,
expected=std_expected,
)
@parameterized.parameters(0.01, 0.1, 1.0, 10.0)
def test_clipping(self, clipping_norm):
batching = batching_module.VirtualBatching(
batch_size_init=LOCAL_BATCH_SIZE * NUM_DEVICES,
batch_size_per_device_per_step=LOCAL_BATCH_SIZE,
scale_schedule=None,
)
updater = dp_updater.Updater(
forward_fn=self.forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=clipping_norm,
rescale_to_unit_norm=False,
noise_multiplier=0.0,
vectorize_grad_clipping=True,
),
**_standard_updater_kwargs(),
)
self.init_with_updater(updater)
data = _test_data(num_batches=1, local_batch_size=LOCAL_BATCH_SIZE)
params = self.run_updater(updater, data)
initial_params = utils.get_first(self.initial_params)
# Invert SGD equation (with lr=1) to find gradients.
grads_updater = _flatten_tree(initial_params) - _flatten_tree(params)
# Only one mini-batch in this test.
inputs = data[0]
# Merge two leading dimensions to put all data on a single device.
inputs_single_device = jax.tree_util.tree_map(
lambda x: jnp.reshape(x, (x.shape[0] * x.shape[1],) + x.shape[2:]),
inputs,
)
def forward_per_sample(p):
logits, unused_network_state = self.net.apply(
p,
self.initial_network_state,
self.rng,
inputs_single_device.image,
is_training=True,
)
loss_per_sample_per_augmentation = optax.softmax_cross_entropy(
logits, inputs_single_device.label)
# Average over the augmult dimension.
loss_per_sample = jnp.mean(loss_per_sample_per_augmentation, axis=1)
# Check that the batch dimension is correct.
chex.assert_shape(loss_per_sample, [LOCAL_BATCH_SIZE * NUM_DEVICES])
return loss_per_sample
def clip_global_norm(tree):
l2_norm = optax.global_norm(tree)
coeff = jnp.minimum(clipping_norm / l2_norm, 1.0)
return jax.tree_util.tree_map(lambda x: x * coeff, tree)
# Compute Jacobian of the loss function.
jacobian = jax.jacobian(forward_per_sample)(initial_params)
# Clip Jacobian per sample.
jacobian_clipped = jax.vmap(clip_global_norm)(jacobian)
# Average over samples.
grads_manual = jax.tree_util.tree_map(
lambda x: jnp.mean(x, axis=0),
jacobian_clipped,
)
# Flatten to compare with grads_updater.
grads_manual = _flatten_tree(grads_manual)
assert_trees_all_close(grads_updater, grads_manual)
def test_frozen_params(self):
batching = batching_module.VirtualBatching(
batch_size_init=LOCAL_BATCH_SIZE * NUM_DEVICES,
batch_size_per_device_per_step=LOCAL_BATCH_SIZE,
scale_schedule=None,
)
train_only_layer = 'mlp/~/linear_1'
updater = dp_updater.Updater(
forward_fn=self.forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=0.1,
rescale_to_unit_norm=False,
noise_multiplier=0.1,
vectorize_grad_clipping=True,
),
is_trainable=(
lambda module_name, *args: module_name == train_only_layer),
**_standard_updater_kwargs(),
)
self.init_with_updater(updater)
data = _test_data(num_batches=1, local_batch_size=LOCAL_BATCH_SIZE)
params = self.run_updater(updater, data)
initial_params = utils.get_first(self.initial_params)
count_trainable, count_frozen = 0, 0
for layer_name in params:
params_layer = params[layer_name]
initial_params_layer = initial_params[layer_name]
if layer_name != train_only_layer:
# This layer should be frozen.
count_frozen += 1
assert_trees_all_close(params_layer, initial_params_layer)
else:
# This layer should be updated.
count_trainable += 1
chex.assert_trees_all_equal_comparator(
lambda x1, x2: jnp.linalg.norm(x1 - x2) > 1e-2,
lambda x1, x2: 'Failed',
params_layer,
initial_params_layer,
)
self.assertEqual(count_trainable, 1)
self.assertEqual(count_frozen, 2)
@parameterized.parameters(0.01, 0.1, 1.0, 10.0)
# TODO: explore why 0.01 and 0.1 clipping norms require higher rtol
def test_rescaling(self, clipping_norm):
noise_std = 0.1
batching = batching_module.VirtualBatching(
batch_size_init=LOCAL_BATCH_SIZE * NUM_DEVICES,
batch_size_per_device_per_step=LOCAL_BATCH_SIZE,
scale_schedule=None,
)
updater_no_rescaling = dp_updater.Updater(
forward_fn=self.forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=clipping_norm,
noise_multiplier=noise_std,
rescale_to_unit_norm=False,
vectorize_grad_clipping=True,
),
**_standard_updater_kwargs(),
)
updater_with_rescaling = dp_updater.Updater(
forward_fn=self.forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=clipping_norm,
noise_multiplier=noise_std,
rescale_to_unit_norm=True,
vectorize_grad_clipping=True,
),
**_standard_updater_kwargs(),
)
self.init_with_updater(updater_no_rescaling)
data = _test_data(num_batches=1, local_batch_size=LOCAL_BATCH_SIZE)
params_with_rescaling = self.run_updater(updater_with_rescaling, data)
params_no_rescaling = self.run_updater(updater_no_rescaling, data)
initial_params = utils.get_first(self.initial_params)
# Invert SGD equation (with lr=1) to find gradients.
grads_with_rescaling = (
_flatten_tree(initial_params) - _flatten_tree(params_with_rescaling))
grads_no_rescaling = (
_flatten_tree(initial_params) - _flatten_tree(params_no_rescaling))
grads_manual_rescaling = grads_no_rescaling / clipping_norm
assert_trees_all_close(grads_with_rescaling, grads_manual_rescaling)
def test_evaluation(self):
forward_fn = _ForwardFnWithRng(self.net)
batching = batching_module.VirtualBatching(
batch_size_init=LOCAL_BATCH_SIZE * NUM_DEVICES,
batch_size_per_device_per_step=LOCAL_BATCH_SIZE,
scale_schedule=None,
)
updater = dp_updater.Updater(
forward_fn=forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=None,
noise_multiplier=None,
rescale_to_unit_norm=False,
vectorize_grad_clipping=True,
),
**_standard_updater_kwargs(),
)
self.init_with_updater(updater)
inputs = _test_data(num_batches=1, local_batch_size=LOCAL_BATCH_SIZE)
metrics = updater.evaluate(
self.initial_params,
self.initial_network_state,
self.rng,
inputs[0])
# The different devices' outputs should arise from different random keys.
for j in range(1, NUM_DEVICES):
self.assertNotAlmostEqual(
metrics.scalars_avg['rng'][0],
metrics.scalars_avg['rng'][j])
def test_average_init_takes_copy(self):
batching = batching_module.VirtualBatching(
batch_size_init=LOCAL_BATCH_SIZE * NUM_DEVICES,
batch_size_per_device_per_step=LOCAL_BATCH_SIZE,
scale_schedule=None,
)
updater = dp_updater.Updater(
forward_fn=self.forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=None,
noise_multiplier=None,
rescale_to_unit_norm=False,
vectorize_grad_clipping=True,
),
**_standard_updater_kwargs(),
)
params = jnp.array([[3., 4., 5.]] * NUM_DEVICES)
avg_init = updater.init_average(params)
chex.assert_trees_all_close(params, avg_init)
# Assert that the average survives even when the original is donated.
params.delete()
jax.device_get(avg_init)
def test_no_averaging_on_accumulation_steps(self):
# 3 accumulation steps per full batch.
batch_size = 3 * LOCAL_BATCH_SIZE * NUM_DEVICES
batching = batching_module.VirtualBatching(
batch_size_init=batch_size,
batch_size_per_device_per_step=LOCAL_BATCH_SIZE,
scale_schedule=None,
)
updater = dp_updater.Updater(
forward_fn=self.forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=None,
noise_multiplier=None,
rescale_to_unit_norm=False,
vectorize_grad_clipping=True,
),
**_standard_updater_kwargs(),
)
inputs = _test_data(num_batches=6, local_batch_size=LOCAL_BATCH_SIZE)
params, network_state, opt_state, step_count = updater.init(
rng=self.rng_init, inputs=inputs[0])
for step in range(5):
if step in (1, 2, 4):
# This is an accumulation-only step.
# Average update should be a no-op.
avg_params = jnp.array([[6., 7., 8.]] * NUM_DEVICES)
new_params = jnp.array([[3., 4., 5.]] * NUM_DEVICES)
new_avg_params = updater.update_polyak(
avg_params, new_params, opt_state, start_step=2)
chex.assert_trees_all_close(avg_params, new_avg_params)
# Run an update step to ensure that the multi-step optimiser's
# accumulation step count is updated. This is how the average updater
# determines whether this is an update step or an accumulation-only step.
params, network_state, opt_state, step_count, _ = updater.update(
params, network_state, opt_state, step_count, inputs[1+step])
def test_ema_on_update_steps(self):
# 3 accumulation steps per full batch.
batch_size = 3 * LOCAL_BATCH_SIZE * NUM_DEVICES
batching = batching_module.VirtualBatching(
batch_size_init=batch_size,
batch_size_per_device_per_step=LOCAL_BATCH_SIZE,
scale_schedule=None,
)
updater = dp_updater.Updater(
forward_fn=self.forward_fn,
batching=batching,
grad_computer=gradients.GradientComputer(
clipping_norm=None,
noise_multiplier=None,
rescale_to_unit_norm=False,
vectorize_grad_clipping=True,
),
**_standard_updater_kwargs(),
)
inputs = _test_data(num_batches=8, local_batch_size=LOCAL_BATCH_SIZE)
params, network_state, opt_state, step_count = updater.init(
rng=self.rng_init, inputs=inputs[0])
for step in range(7):
if step in (3, 6):
# This is an update step.
avg_params = jnp.array([[6., 9., 4.]] * NUM_DEVICES)
new_params = jnp.array([[3., 4., 5.]] * NUM_DEVICES)
expected_new_avg_params = jnp.array([[3.03, 4.05, 4.99]] * NUM_DEVICES)
new_avg_params = updater.update_ema(
avg_params, new_params, opt_state, mu=.01, start_step=-50)
chex.assert_trees_all_close(expected_new_avg_params, new_avg_params)
# Run an update step to ensure that the multi-step optimiser's
# accumulation step count is updated. This is how the average updater
# determines whether this is an update step or an accumulation-only step.
params, network_state, opt_state, step_count, _ = updater.update(
params, network_state, opt_state, step_count, inputs[1+step])
if __name__ == '__main__':
absltest.main()
|
jax_privacy-main
|
jax_privacy/src/training/dp_updater_test.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optim utils."""
import dataclasses
from typing import Any, Mapping, Optional, Sequence, Union
import haiku as hk
import jax
from jax_privacy.src.training import experiment_config
import optax
@dataclasses.dataclass(kw_only=True, slots=True)
class LearningRateConfig:
"""Configuration for the learning-rate.
Attributes:
name: name of the optax decay schedule to use.
kwargs: keyword arguments for the optax decay schedule.
relative_kwargs: name of a keyword argument provided in
kwargs that is defined only relatively to the total number
of model updates. Its value will later be multiplied by the number of
model updates so that it can be correctly interpreted by optax.
"""
name: str
kwargs: Mapping[str, Any]
relative_kwargs: Optional[Sequence[str]] = None
def constant_lr_config(
value: float,
) -> LearningRateConfig:
return LearningRateConfig(
name='constant_schedule',
kwargs={'value': value},
)
def cosine_decay_lr_config(
*,
init_value: float,
alpha: float = 0.0,
) -> LearningRateConfig:
return LearningRateConfig(
name='cosine_decay_schedule',
kwargs={
'init_value': init_value,
'alpha': alpha,
'decay_steps': 1.0,
},
relative_kwargs=['decay_steps'],
)
@dataclasses.dataclass(kw_only=True, slots=True)
class OptimizerConfig:
"""Configuration for the optimizer.
Attributes:
name: Name of the optax optimizer to use.
kwargs: Keyword arguments for the optax optimizer.
lr: Learning-rate configuration.
"""
name: str
lr: LearningRateConfig
kwargs: Mapping[str, Any] = dataclasses.field(default_factory=dict)
def make_lr_schedule_fn(self, max_num_updates: int) -> optax.Schedule:
"""Creates the learning-rate schedule based on the number of updates."""
if isinstance(self.lr, float):
return optax.constant_schedule(self.lr)
else:
kwargs = {**self.lr.kwargs}
if self.lr.relative_kwargs is not None:
# Adapt relative arguments by multiplying them by `max_num_updates`.
for kwarg_name in self.lr.relative_kwargs:
rel_val = kwargs[kwarg_name]
abs_val = rel_val * max_num_updates
kwargs[kwarg_name] = abs_val
return getattr(
optax,
self.lr.name,
)(**kwargs)
def make_optimizer(
self,
max_num_updates: int,
) -> optax.GradientTransformation:
optimizer = getattr(optax, self.name)
return optimizer(
self.make_lr_schedule_fn(max_num_updates),
**self.kwargs,
)
@dataclasses.dataclass(kw_only=True, slots=True)
class AgcOptimizerConfig(OptimizerConfig):
"""Configuration for Adaptive Gradient Clipping optimizer.
This is useful in particular to stabilize the training of NF-ResNets at
large batch-sizes and NFNets.
References:
[Brock, De, Smith, Simonyan 2021] High-Performance Large-Scale Image
Recognition Without Normalization. (https://arxiv.org/abs/2102.06171)
Attributes:
filter_fn: On which parameters to enable AGC. If set to None, this
corresponds to enabling AGC on all parameters.
name: Name of the optax optimizer to use.
kwargs: Keyword arguments for the optax optimizer.
lr: Learning-rate configuration.
clipping: The maximum allowed ratio of update norm to parameter norm.
eps: An epsilon term to prevent clipping of zero-initialized params. Usually
significantly larger than the epsilon used for Adam (defauilt: 1e-3).
"""
name: str
kwargs: Mapping[str, Any] = dataclasses.field(default_factory=dict)
lr: Union[LearningRateConfig, float]
clipping: float = 0.01
eps: float = 1e-3
filter_fn: experiment_config.FilterFn | None = None
def make_optimizer(
self,
max_num_updates: int,
) -> optax.GradientTransformation:
# TODO: investigate use of super() here.
base_optimizer = getattr(optax, self.name)(
self.make_lr_schedule_fn(max_num_updates),
**self.kwargs,
)
# The AGC optimizer clips the gradient with the AGC rule before applying
# the transformation of `base_optimizer`.
agc_optimizer = optax.chain(
optax.adaptive_grad_clip(self.clipping, self.eps),
base_optimizer,
)
def label_parameters(tree: hk.Params):
if self.filter_fn is None:
# If no filter_fn is provided, all leaves of the tree are tagged as
# 'agc'.
return jax.tree_map(lambda x: 'agc', tree)
else:
# Leaves for which `self.filter_fn` returns True are tagged as 'agc',
# and other leaves are tagged as 'no_agc'.
label_map = {True: 'agc', False: 'no_agc'}
return hk.data_structures.map(
lambda *args: label_map[self.filter_fn(*args)], tree)
return optax.multi_transform(
{'agc': agc_optimizer, 'no_agc': base_optimizer},
param_labels=label_parameters,
)
def adam_config(
*,
lr: LearningRateConfig,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
) -> OptimizerConfig:
return OptimizerConfig(
name='adam',
lr=lr,
kwargs={'eps': eps, 'b1': b1, 'b2': b2},
)
def sgd_config(
*,
lr: LearningRateConfig,
momentum: Optional[float] = None,
nesterov: bool = False,
) -> OptimizerConfig:
return OptimizerConfig(
name='sgd',
lr=lr,
kwargs={'momentum': momentum, 'nesterov': nesterov},
)
def agc_config(
*,
lr: LearningRateConfig,
filter_fn: experiment_config.FilterFn | None = None,
momentum: float | None = None,
nesterov: bool = False,
clipping: float = 0.01,
eps: float = 1e-3,
) -> AgcOptimizerConfig:
return AgcOptimizerConfig(
name='sgd',
lr=lr,
kwargs={'momentum': momentum, 'nesterov': nesterov},
clipping=clipping,
eps=eps,
filter_fn=filter_fn,
)
|
jax_privacy-main
|
jax_privacy/src/training/optimizer_config.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
jax_privacy-main
|
jax_privacy/src/training/__init__.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for metrics."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
from jax_privacy.src.training import metrics
class AccuracyTest(absltest.TestCase):
def test_distinct(self):
logits = jnp.array([
[0.1, 0.5, 0.2],
[-0.3, -0.2, 0.0],
])
labels = jnp.array([1, 2])
labels_one_hot = jax.nn.one_hot(labels, 3)
acc1, acc2, acc3 = metrics.topk_accuracy(
logits, labels_one_hot, topk=[1, 2, 3])
chex.assert_equal(acc1, 1.0) # all correct
chex.assert_equal(acc2, 1.0) # all correct
chex.assert_equal(acc3, 1.0) # all correct
def test_with_ties(self):
logits = jnp.array([
[0.1, 0.5, 0.5],
[-0.2, -0.2, -0.2],
])
labels = jnp.array([1, 2])
labels_one_hot = jax.nn.one_hot(labels, 3)
acc1, acc2, acc3 = metrics.topk_accuracy(
logits, labels_one_hot, topk=[1, 2, 3])
chex.assert_equal(acc1, 0.0) # all incorrect
chex.assert_equal(acc2, 0.5) # first sample correct, second one incorrect
chex.assert_equal(acc3, 1.0) # all correct
def test_with_nan(self):
logits = jnp.array([
[0.1, 0.5, jnp.nan],
[-0.2, -0.2, -0.2],
[-0.3, -0.2, -0.5],
])
labels = jnp.array([1, jnp.nan, 0])
labels_one_hot = jax.nn.one_hot(labels, 3)
acc1, acc2, acc3 = metrics.topk_accuracy(
logits, labels_one_hot, topk=[1, 2, 3])
chex.assert_equal(acc1, 0.0) # all incorrect
chex.assert_equal(acc2, 1 / 3) # third sample correct
chex.assert_equal(acc3, 1 / 3) # third sample correct
if __name__ == '__main__':
absltest.main()
|
jax_privacy-main
|
jax_privacy/src/training/metrics_test.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The updater computes and applies the update.
Typical usage:
# Initialize model and optimizer (pmapped).
params, network_state, opt_state, step_count = updater.init(
rng=rng,
inputs=inputs,
)
# Apply update (pmapped).
params, network_state, opt_state, step_count, stats = updater.update(
params=params,
network_state=network_state,
opt_state=opt_state,
step_count=step_count,
inputs=inputs,
)
"""
import abc
from typing import Generic, NamedTuple
import chex
from jax_privacy.src.dp_sgd import typing
import optax
class StepCount(NamedTuple):
"""Hierarchical step - a full batch count plus inner accumulation step."""
update_step: int
accumulation_step: int
def next(self, every_k: int) -> 'StepCount':
"""Returns updated with by accumulation step, rolling over every k."""
new_accumulation_step = self.accumulation_step + 1
return StepCount(
update_step=(self.update_step + new_accumulation_step // every_k),
accumulation_step=(new_accumulation_step % every_k),
)
class AbstractUpdater(
# False positive error with pytype failing to use a `TypeVar` imported
# from elsewhere.
# pytype: disable=invalid-annotation
Generic[typing.InputsT, typing.ParamsT, typing.ModelStateT],
metaclass=abc.ABCMeta,
# pytype: enable=invalid-annotation
):
"""Defines and applies the update, potentially in parallel across devices."""
@abc.abstractmethod
def init(
self,
rng: chex.PRNGKey,
inputs: typing.InputsT,
) -> tuple[
typing.ParamsT, typing.ModelStateT, optax.MultiStepsState, StepCount]:
"""Provides initial training state.
Args:
rng: Random key.
inputs: Training inputs.
Returns:
params: Initial model parameters (both trainable and frozen).
network_state: Initial network state.
opt_state: Initial optimiser state.
step_count: Initial number of full steps and inner accumulation steps.
"""
raise NotImplementedError('init method is not implemented')
@abc.abstractmethod
def update(
self,
params: typing.ParamsT,
network_state: typing.ModelStateT,
opt_state: optax.MultiStepsState,
step_count: StepCount,
inputs: typing.InputsT,
) -> tuple[typing.ParamsT, typing.ModelStateT, optax.MultiStepsState,
StepCount, typing.Metrics]:
"""Computes updated training state (to be pmapped).
Args:
params: Model parameters (both trainable and frozen).
network_state: Network state.
opt_state: Optimiser state.
step_count: Number of full steps and inner accumulation steps.
inputs: Training inputs.
Returns:
params: Updated model parameters (both trainable and frozen).
network_state: Updated network state.
opt_state: Updated optimiser state.
step_count: Updated number of full steps and inner accumulation steps.
scalars: Scalar outputs to log.
"""
raise NotImplementedError('update method is not implemented')
@abc.abstractmethod
def step_count_from_opt_state(
self,
opt_state: optax.MultiStepsState,
) -> StepCount:
"""Returns the hierarchical step number."""
@abc.abstractmethod
def evaluate(
self,
params: typing.ParamsT,
network_state: typing.ModelStateT,
rng: chex.PRNGKey,
inputs: typing.InputsT,
) -> typing.Metrics:
"""Evaluates the model with the current state.
Args:
params: Model parameters (both trainable and frozen).
network_state: Network state.
rng: Random key.
inputs: Evaluation inputs, consisting of tensors of shape
(num_local_replicas, batch_size, ...).
Returns:
Evaluation results for the mini-batch, as a pair of the form
(per-example outputs over all hosts, aggregated metrics).
The per-example outputs have shape (num_replicas, batch_size, ...).
"""
@abc.abstractmethod
def optimizer(self) -> optax.GradientTransformation:
"""Returns optimiser giving rise to `opt_state`."""
@abc.abstractmethod
def init_average(
self,
params: typing.ParamsT,
) -> typing.ParamsT:
"""Initialises a copy of the params for moving averages.
Taking a copy is important because `params` may subsequently be donated.
Args:
params: Model parameters (both trainable and frozen).
Returns:
Initial averages of model parameters (both trainable and frozen).
"""
@abc.abstractmethod
def update_ema(
self,
ema_params: typing.ParamsT,
params: typing.ParamsT,
opt_state: optax.MultiStepsState,
*,
mu: chex.Numeric,
start_step: chex.Numeric,
) -> typing.ParamsT:
"""Initialises a copy of the params for exponential moving averages.
Taking a copy is important because `params` may subsequently be donated.
Args:
ema_params: Existing averages of parameters (both trainable and frozen).
params: Model parameters (both trainable and frozen).
opt_state: Optimiser state.
mu: Decay factor.
start_step: Update step number at which to start applying averaging.
Returns:
Updated averages of model parameters (both trainable and frozen).
"""
@abc.abstractmethod
def update_polyak(
self,
polyak_params: typing.ParamsT,
params: typing.ParamsT,
opt_state: optax.MultiStepsState,
*,
start_step: chex.Numeric,
) -> typing.ParamsT:
"""Initialises a copy of the params for Polyak moving averages.
Taking a copy is important because `params` may subsequently be donated.
Args:
polyak_params: Existing averages of parameters (both trainable and
frozen).
params: Model parameters (both trainable and frozen).
opt_state: Optimiser state.
start_step: Update step number at which to start applying averaging.
Returns:
Updated averages of model parameters (both trainable and frozen).
"""
|
jax_privacy-main
|
jax_privacy/src/training/updater.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jaxline experiment to define training and eval loops."""
import abc
import functools
from typing import Any, Union
from absl import logging
import chex
import jax
import jax.numpy as jnp
from jax_privacy.src import accounting
from jax_privacy.src.dp_sgd import batching as batching_module
from jax_privacy.src.dp_sgd import devices
from jax_privacy.src.dp_sgd import gradients
from jax_privacy.src.training import dp_updater
from jax_privacy.src.training import experiment_config
from jax_privacy.src.training import forward
from jax_privacy.src.training import optimizer_config as opt_config
from jax_privacy.src.training import updater as updater_py
from jaxline import experiment
from jaxline import utils as jaxline_utils
import ml_collections
import numpy as np
def _to_scalar(
x: Union[chex.Numeric, chex.ArrayNumpy],
) -> Union[chex.Numeric, chex.ArrayNumpy]:
"""Casts the input to a scalar if it is an array with a single element."""
if isinstance(x, (chex.Array, chex.ArrayNumpy)) and x.size == 1:
return x.reshape(())
else:
return x
class AbstractExperiment(experiment.AbstractExperiment, metaclass=abc.ABCMeta):
"""Jaxline Experiment performing DP-SGD training."""
# Holds a map from object properties that will be checkpointed to their name
# within a checkpoint. Currently it is assumed that these are all sharded
# device arrays.
CHECKPOINT_ATTRS = {
'_params': 'params',
'_opt_state': 'opt_state',
'_network_state': 'network_state',
'_params_ema': 'params_ema',
'_params_polyak': 'params_polyak',
}
def __init__(
self,
mode: str,
random_seed: int,
training_config: experiment_config.TrainingConfig,
averaging_config: experiment_config.AveragingConfig,
optimizer_config: opt_config.OptimizerConfig,
num_training_samples: int,
num_updates: int,
*,
device_layout: devices.DeviceLayout = devices.DeviceLayout(),
):
"""Initializes experiment."""
self.mode = mode
self.random_seed = random_seed
self._training_config = training_config
self._averaging_config = averaging_config
self._optimizer_config = optimizer_config
self._device_layout = device_layout
self._params = None
self._network_state = None
self._opt_state = None
self._step_count = updater_py.StepCount(update_step=0, accumulation_step=0)
# The ema coefficient may be a scalar or a list of scalars.
self._params_ema = jax.tree_map(lambda _: None,
self._averaging_config.ema_coefficient)
self._params_polyak = None
self._train_input = None
self._eval_input = None
self.num_training_samples = num_training_samples
self.batching = batching_module.VirtualBatching(
batch_size_init=self._training_config.batch_size.total,
batch_size_per_device_per_step=(
self._training_config.batch_size.per_device_per_step),
scale_schedule=self._training_config.batch_size.scale_schedule,
)
self.accountant = accounting.ExperimentAccountant(
clipping_norm=self._training_config.dp.clipping_norm,
noise_multiplier=self._training_config.dp.noise_multiplier,
dp_epsilon=self._training_config.dp.stop_training_at_epsilon,
dp_delta=self._training_config.dp.delta,
batching=self.batching,
num_samples=self.num_training_samples,
dp_accountant_config=self._training_config.dp.accountant,
)
if self._training_config.dp.stop_training_at_epsilon:
self._max_num_updates = self.accountant.compute_max_num_updates()
else:
self._max_num_updates = num_updates
self._cached_accountant = accounting.CachedExperimentAccountant(
max_num_updates=self._max_num_updates,
accountant=self.accountant,
)
self._updater = None
@property
def updater(self) -> updater_py.AbstractUpdater:
if self._updater is None:
self._updater = self._build_updater()
return self._updater
@property
@abc.abstractmethod
def forward_fn(self) -> forward.ForwardFn:
"""Forward function."""
def _build_updater(self) -> updater_py.AbstractUpdater:
"""Builds a 'standard' Updater from the config."""
grad_computer = gradients.GradientComputer(
clipping_norm=self._training_config.dp.clipping_norm,
noise_multiplier=self._training_config.dp.noise_multiplier,
rescale_to_unit_norm=self._training_config.dp.rescale_to_unit_norm,
vectorize_grad_clipping=(
self._training_config.dp.vectorize_grad_clipping),
device_layout=self._device_layout,
)
return dp_updater.Updater(
batching=self.batching,
forward_fn=self.forward_fn,
grad_computer=grad_computer,
weight_decay=self._training_config.weight_decay,
optimizer_config=self._optimizer_config,
max_num_updates=self._max_num_updates,
is_trainable=self._training_config.is_trainable,
logging_config=self._training_config.logging,
rng_seed=self.random_seed,
device_layout=self._device_layout,
)
def _compute_epsilon(
self,
num_updates: chex.Numeric,
use_approximate_cache: bool = False,
) -> float:
"""Computes DP-epsilon either on-the-fly or reusing cached results."""
if jnp.size(num_updates) > 0:
num_updates = jnp.reshape(num_updates, [-1])[0]
num_updates = int(num_updates)
if use_approximate_cache:
return self._cached_accountant.compute_approximate_epsilon(num_updates)
else:
return self.accountant.compute_current_epsilon(num_updates)
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(
self,
*,
global_step: chex.Array,
rng: chex.Array,
writer: Any,
) -> dict[str, np.ndarray]:
"""Perform a single step of training."""
del writer # unused
if self._train_input is None:
self._initialize_train()
(
self._params,
self._network_state,
self._opt_state,
self._step_count,
metrics,
) = (
self.updater.update(
params=self._params,
network_state=self._network_state,
opt_state=self._opt_state,
step_count=self._step_count,
inputs=next(self._train_input),
))
# Just return the tracking metrics on the first device for logging.
scalars = jaxline_utils.get_first(metrics.scalars)
if self._averaging_config.ema_enabled:
def single_ema(ema_coefficient, params_ema):
return self.updater.update_ema(
params_ema, self._params, self._opt_state,
mu=ema_coefficient,
start_step=self._averaging_config.ema_start_step,
)
self._params_ema = jax.tree_map(
single_ema, self._averaging_config.ema_coefficient, self._params_ema)
if self._averaging_config.polyak_enabled:
self._params_polyak = self.updater.update_polyak(
self._params_polyak, self._params, self._opt_state,
start_step=self._averaging_config.polyak_start_step,
)
# Convert the number of samples seen into epochs.
scalars['data_seen'] = self.batching.data_seen(global_step[0])
scalars['epochs'] = scalars['data_seen'] / self.num_training_samples
# Log dp_epsilon (outside the pmapped _update_func method).
scalars.update(dp_epsilon=self._compute_epsilon(
num_updates=self._step_count.update_step,
use_approximate_cache=True,
))
if self._training_config.logging.prepend_split_name:
scalars = {f'train/{k}': v for k, v in scalars.items()}
# Convert arrays to scalars for logging and storing.
return jax.tree_util.tree_map(_to_scalar, scalars)
def should_run_step(
self,
unused_global_step: int,
unused_config: ml_collections.ConfigDict,
) -> bool:
"""Returns whether to run the step function, given the current update_step.
We ignore the global_step and config given by jaxline, because model updates
are not applied at every global_step (due to gradient accumulation to use
large batch-sizes), so we rather use our own `update_step`, which correctly
accounts for that.
"""
return self._step_count.update_step < self._max_num_updates
def _initialize_train(self):
"""Initializes the training data and the model parameters."""
self._train_input = jaxline_utils.py_prefetch(self._build_train_input)
# Check we haven't already restored params
if self._params is None:
rng_init = jax.random.PRNGKey(self.random_seed)
(
self._params,
self._network_state,
self._opt_state,
self._step_count,
) = self.updater.init(
rng=rng_init,
inputs=next(self._train_input),
)
if self._should_restore_model():
# Update self._params and self._network_state
self._restore_model()
logging.info('Initialized parameters from a checkpoint.')
else:
logging.info('Initialized parameters randomly rather than restoring '
'from checkpoint.')
# Take physical copies of the initial params, so that they remain intact
# after the first update step when `params` is donated.
if self._averaging_config.ema_enabled:
self._params_ema = jax.tree_map(
lambda _: self.updater.init_average(self._params),
self._averaging_config.ema_coefficient)
if self._averaging_config.polyak_enabled:
self._params_polyak = self.updater.init_average(self._params)
else:
# We have restored from a checkpoint. The step count is not in the
# checkpoint, but is derived as needed from the optimiser state.
self._step_count = self.updater.step_count_from_opt_state(
self._opt_state)
@abc.abstractmethod
def _should_restore_model(self) -> bool:
"""Whether the model should be restored (or randomly initialized)."""
@abc.abstractmethod
def _restore_model(self):
"""Restore model from pre-trained checkpoint."""
@abc.abstractmethod
def _build_train_input(self):
"""Builds the training input pipeline."""
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(
self,
*,
global_step: chex.Array,
rng: chex.Array,
writer: Any,
) -> dict[str, np.ndarray]:
"""Run the complete evaluation with the current model parameters."""
del writer # unused
if self._opt_state is not None:
# We have restored from a checkpoint. The step count is not in the
# checkpoint, but is derived as needed from the optimiser state.
self._step_count = self.updater.step_count_from_opt_state(
self._opt_state)
# Ensure that the random key is the same across all hosts.
rng = jax.pmap(
functools.partial(jax.lax.all_gather, axis_name='i'),
axis_name='i')(rng)[:, 0]
dp_epsilon = self._compute_epsilon(self._step_count.update_step)
metrics = jax.tree_util.tree_map(
np.asarray,
self._eval_epoch(
jaxline_utils.get_first(rng),
jaxline_utils.get_first(global_step),
),
)
metrics.update(
update_step=self._step_count.update_step,
dp_epsilon=dp_epsilon,
)
if self._training_config.logging.prepend_split_name:
metrics = {f'eval/{k}': v for k, v in metrics.items()}
# Convert arrays to scalars for logging and storing.
metrics = jax.tree_util.tree_map(_to_scalar, metrics)
logging.info(metrics)
# Make sure all hosts stay up until the end of the evaluation.
jaxline_utils.rendezvous()
return metrics
@abc.abstractmethod
def _build_eval_input(self):
"""Builds the evaluation input pipeline."""
@abc.abstractmethod
def _eval_epoch(self, rng, global_step):
"""Evaluates an epoch."""
|
jax_privacy-main
|
jax_privacy/src/training/experiment.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines train and evaluation functions that compute losses and metrics."""
import abc
from typing import Generic
import chex
from jax_privacy.src.dp_sgd import typing
class ForwardFn(
# False positive error with pytype failing to use a `TypeVar` imported
# from elsewhere.
# pytype: disable=invalid-annotation
Generic[typing.InputsT, typing.ParamsT, typing.ModelStateT],
# pytype: enable=invalid-annotation
metaclass=abc.ABCMeta,
):
"""Defines forward passes for learning tasks."""
@abc.abstractmethod
def train_init(
self,
rng_key: chex.PRNGKey,
inputs: typing.InputsT,
) -> tuple[typing.ParamsT, typing.ModelStateT]:
"""Initializes the model.
Args:
rng_key: random number generation key used for the random initialization.
inputs: model inputs.
Returns:
Initialized model parameters and state.
"""
@abc.abstractmethod
def train_forward(
self,
params: typing.ParamsT,
network_state: typing.ModelStateT,
rng_per_example: chex.PRNGKey,
inputs: typing.InputsT,
) -> tuple[typing.Loss, tuple[typing.ModelStateT, typing.Metrics]]:
"""Forward pass per example (training time).
Args:
params: model parameters that should get updated during training.
network_state: model state.
rng_per_example: a random number generation key specific for a device and
accumulation step. It can be used to create a unique seed per
individual example by the user.
inputs: model inputs.
Returns:
loss: loss function averaged on the mini-batch.
aux:
network_state: new model state
metrics: metrics computed on the current mini-batch
"""
@abc.abstractmethod
def eval_forward(
self,
params: typing.ParamsT,
network_state: typing.ModelStateT,
rng: chex.PRNGKey,
inputs: typing.InputsT,
) -> typing.Metrics:
"""Forward pass per example (evaluation time).
Args:
params: model parameters that should get updated during training.
network_state: model state.
rng: random number generation key.
inputs: model inputs.
Returns:
evaluation results for the mini-batch, as a pair of the form
(per-example outputs, aggregated metrics)
"""
|
jax_privacy-main
|
jax_privacy/src/training/forward.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameter averaging functions."""
import chex
import jax
import jax.numpy as jnp
from jax_privacy.src.dp_sgd import typing
def polyak(
tree_old: typing.ParamsT,
tree_new: typing.ParamsT,
t: chex.Numeric,
) -> typing.ParamsT:
"""Polyak averaging if t >= 0, return tree_new otherwise."""
t = jnp.maximum(t, 0)
return jax.tree_util.tree_map(
lambda old, new: (t * old + new) / (t + 1),
tree_old,
tree_new,
)
def ema(
tree_old: typing.ParamsT,
tree_new: typing.ParamsT,
mu: chex.Numeric,
t: chex.Numeric,
use_warmup: bool = True,
) -> typing.ParamsT:
"""Exponential Moving Averaging if t >= 0, return tree_new otherwise."""
# Do not average until t >= 0.
mu *= (t >= 0)
if use_warmup:
mu = jnp.minimum(mu, (1.0 + t) / (10.0 + t))
return jax.tree_util.tree_map(
lambda old, new: mu * old + (1 - mu) * new,
tree_old,
tree_new,
)
|
jax_privacy-main
|
jax_privacy/src/training/averaging.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Virtual batching to allow easy accumulation across devices and steps.
Typical usage:
batching = VirtualBatching(
batch_size_init=128,
batch_size_per_device_per_step=32,
num_replicas=2,
)
update_step = 0
# At each full step, the batch-size accumulated across devices is 32*2=64.
batching.batch_size_per_step
>>> 64
# Thus in order to get a total batch-size of 128, we must accumulate over 2
# steps before actually applying a model update.
batching.apply_update_every(update_step)
>>> 2
"""
from typing import Optional
import chex
import jax
import optax
class VirtualBatching:
"""Batching across devices and steps with a potential schedule."""
def __init__(
self,
batch_size_init: int,
batch_size_per_device_per_step: int,
scale_schedule: Optional[dict[int, int]] = None,
num_replicas: Optional[int] = None,
):
"""Init function.
Args:
batch_size_init: Initial value for the total batch-size.
batch_size_per_device_per_step: Batch-size to fit on each device at
every step.
scale_schedule: Schedule to adapt the total batch-size across iterations.
num_replicas: Number of replicas to use for data parallelization.
"""
self.batch_size_init = batch_size_init
self.batch_size_per_device_per_step = batch_size_per_device_per_step
self.scale_schedule = scale_schedule
self.num_replicas = (
num_replicas if num_replicas is not None else jax.device_count())
if self.batch_size_init % self.batch_size_per_step:
raise ValueError(
f'Batch-size {self.batch_size_init} not divisible by '
f'{self.batch_size_per_device_per_step} * {self.num_replicas}'
)
self._batch_size_fn = optax.piecewise_constant_schedule(
init_value=self.batch_size_init,
boundaries_and_scales=self.scale_schedule,
)
def batch_size(self, update_step: chex.Numeric) -> chex.Numeric:
"""Total batch-size at a given full update step."""
return self._batch_size_fn(update_step)
@property
def batch_size_per_step(self) -> chex.Numeric:
"""Batch-size per step (accumulated over devices) at any step."""
return self.batch_size_per_device_per_step * self.num_replicas
def apply_update_every(self, update_step: chex.Numeric) -> chex.Numeric:
"""Number of accumulation steps required before performing an update."""
return self.batch_size(update_step) // self.batch_size_per_step
def data_seen(self, global_step: chex.Numeric) -> chex.Numeric:
"""Total number of data points seen from beginning until global_step."""
return global_step * self.batch_size_per_step
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/batching.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for grad_clipping.py."""
import functools
import jax
import jax.numpy as jnp
from jax_privacy.src.dp_sgd import typing
_ValueAndGrad = tuple[
tuple[typing.Loss, tuple[typing.ModelStateT, typing.Metrics]],
typing.ParamsT,
]
class LoopAccumulator:
"""Accumulate or stack values and grads over a loop."""
def __init__(self, value_and_grad_fn: typing.ValueAndGradFn):
self._value_and_grad_fn = value_and_grad_fn
def initialize(
self,
batch_size: int,
*arg_shapes,
) -> _ValueAndGrad:
"""Initializes the scan loop."""
loss_and_grad = jax.eval_shape(self._value_and_grad_fn, *arg_shapes)
(loss, (network_state, metrics)), grads = jax.tree_util.tree_map(
jnp.zeros_like, loss_and_grad)
metrics = metrics.replace(
per_example={
'grad_norm': jnp.zeros((batch_size,), dtype=loss.dtype),
**metrics.per_example,
})
return (loss, (network_state, metrics)), grads
def accumulate(
self,
value_and_grad: _ValueAndGrad,
value_and_grad_i: _ValueAndGrad,
i: int,
batch_size: int,
) -> _ValueAndGrad:
"""Running average or stack of `value_and_grad_i` into `value_and_grad`."""
def accumulate_mean(array: jax.Array, array_i: jax.Array) -> jax.Array:
return array + array_i / batch_size
def accumulate_sum(array: jax.Array, array_i: jax.Array) -> jax.Array:
return array + array_i
def update_at_i(array: jax.Array, array_i: jax.Array) -> jax.Array:
array_i = jnp.reshape(array_i, array[i].shape)
return array.at[i].set(array_i)
(loss, (unused_state, metrics)), grads = value_and_grad
(loss_i, (state_i, metrics_i)), grads_i = value_and_grad_i
loss = accumulate_mean(loss, loss_i)
metrics_avg = jax.tree_map(
accumulate_mean, metrics.scalars_avg, metrics_i.scalars_avg)
metrics_sum = jax.tree_map(
accumulate_sum, metrics.scalars_sum, metrics_i.scalars_sum)
grads = jax.tree_map(accumulate_mean, grads, grads_i)
metrics_per_example = jax.tree_map(
update_at_i, metrics.per_example, metrics_i.per_example)
metrics = typing.Metrics(
scalars_avg=metrics_avg,
scalars_sum=metrics_sum,
per_example=metrics_per_example,
)
return (loss, (state_i, metrics)), grads
def reduce_vmap(
value_and_grads: _ValueAndGrad,
) -> tuple[
tuple[typing.Loss, tuple[typing.ModelStateT, typing.Metrics]],
typing.ParamsT,
]:
"""Reduces the vmapped outputs."""
(loss, (network_state, metrics)), grads = value_and_grads
tree_mean = (
lambda tree: jax.tree_map(functools.partial(jnp.mean, axis=0), tree))
tree_sum = (
lambda tree: jax.tree_map(functools.partial(jnp.sum, axis=0), tree))
tree_squeeze = lambda tree: jax.tree_map(jnp.squeeze, tree)
loss = tree_mean(loss)
grads = tree_mean(grads)
metrics = metrics.replace(
scalars_avg=tree_mean(metrics.scalars_avg),
per_example=tree_squeeze(metrics.per_example),
scalars_sum=tree_sum(metrics.scalars_sum),
)
return (loss, (network_state, metrics)), grads
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/grad_clipping_utils.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Differentially private gradient computation."""
from typing import Callable, Generic, Optional
import chex
import jax
import jax.numpy as jnp
from jax_privacy.src.dp_sgd import devices
from jax_privacy.src.dp_sgd import grad_clipping
from jax_privacy.src.dp_sgd import optim
from jax_privacy.src.dp_sgd import typing
import optax
class GradientComputer(
# False positive error with pytype failing to use a `TypeVar` imported
# from elsewhere.
# pytype: disable=invalid-annotation
Generic[typing.InputsT, typing.ParamsT, typing.ModelStateT]
# pytype: enable=invalid-annotation
):
"""Computes (potentially) clipped and noisy gradients."""
def __init__(
self,
*,
clipping_norm: Optional[float],
noise_multiplier: Optional[float],
rescale_to_unit_norm: bool,
vectorize_grad_clipping: bool,
device_layout: devices.DeviceLayout = devices.DeviceLayout(),
rng_per_param_fn: Callable[[chex.PRNGKey], chex.PRNGKey] = lambda x: x,
global_norm_fn: typing.NormFn = optax.global_norm,
):
"""Initialises the gradient computation.
Args:
clipping_norm: maximum L2 norm to which the input tree should be clipped.
noise_multiplier: standard deviation of the noise to add to the average
of the clipped gradient to make it differentially private. It will be
multiplied by `clipping_norm / total_batch_size` before the noise gets
actually added.
rescale_to_unit_norm: whether the tree should be rescaled to have an L2
norm of one once it got clipped.
vectorize_grad_clipping: Whether to use the `vmap` version of gradient
clipping (as opposed to an unrolled loop). This is faster, but uses
more memory.
device_layout: Common args to `pmap` and `psum` for data parallelism.
rng_per_param_fn: Optional callable to allow gradient noise random keys
to be specialised for different param slices.
global_norm_fn: function to compute the L2 norm of an ArrayTree.
"""
self._clipping_norm = clipping_norm
self._noise_multiplier = noise_multiplier
self._rescale_to_unit_norm = rescale_to_unit_norm
self._vectorize_grad_clipping = vectorize_grad_clipping
self._device_layout = device_layout
self._rng_per_param_fn = rng_per_param_fn
self._global_norm_fn = global_norm_fn
@property
def clipping_norm(self):
return self._clipping_norm
@property
def using_clipped_grads(self):
return self.clipping_norm not in (float('inf'), None)
def global_norm(self, x: chex.ArrayTree) -> chex.ArrayTree:
return self._global_norm_fn(x)
def clean_gradients(
self,
loss_fn: typing.LossFn,
params: typing.ParamsT,
network_state: typing.ModelStateT,
rng_per_batch: chex.PRNGKey,
accumulation_step: chex.Array,
inputs: typing.InputsT,
) -> typing.ParamsT:
"""Computes unclipped gradients of the given loss function.
Args:
loss_fn: Loss function whose gradients are required.
params: Trainable parameters.
network_state: Network state input to `loss_fn`.
rng_per_batch: Random number key, expected to be common across devices
and across micro-batches constituting the same logical batch.
accumulation_step: Micro-batch number within a logical batch.
inputs: Inputs to `loss_fn`.
Returns:
Unclipped gradients.
"""
rng_per_example = self._rng_per_example(rng_per_batch, accumulation_step)
# Compute gradients of the loss function w.r.t. the parameters.
device_grads, unused_aux = jax.grad(loss_fn, has_aux=True)(
params, network_state, rng_per_example, inputs)
avg_grads = jax.lax.pmean(
device_grads, **self._device_layout.data_psum_kwargs)
return avg_grads
def loss_and_clipped_gradients(
self,
loss_fn: typing.LossFn,
params: typing.ParamsT,
network_state: typing.ModelStateT,
rng_per_batch: chex.PRNGKey,
accumulation_step: chex.Array,
inputs: typing.InputsT,
) -> tuple[
tuple[typing.Loss, tuple[typing.ModelStateT, typing.Metrics]],
typing.ParamsT,
]:
"""Computes (potentially) clipped gradients of the given loss function.
Args:
loss_fn: Loss function whose clipped gradients are required.
params: Trainable parameters.
network_state: Network state input to `loss_fn`.
rng_per_batch: Random number key, expected to be common across devices
and across micro-batches constituting the same logical batch.
accumulation_step: Micro-batch number within a logical batch.
inputs: Inputs to `loss_fn`.
Returns:
Tuple consisting of (loss-and-aux, clipped_grads)
where `loss-and-aux` is as is returned by `loss_fn` (with the addition
of the grad norm per example in the metrics).
"""
rng_per_example = self._rng_per_example(rng_per_batch, accumulation_step)
# Compute clipped-per-example gradients of the loss function w.r.t. the
# parameters.
(loss, (network_state, metrics)), device_grads = (
self.value_and_clipped_grad(jax.value_and_grad(loss_fn, has_aux=True))(
params, network_state, rng_per_example, inputs))
# Synchronize metrics and gradients across devices.
loss, metrics_avg, avg_grads = jax.lax.pmean(
(loss, metrics.scalars_avg, device_grads),
**self._device_layout.data_psum_kwargs,
)
metrics_sum = jax.lax.psum(
metrics.scalars_sum,
**self._device_layout.data_psum_kwargs,
)
metrics_per_example = jax.lax.all_gather(
metrics.per_example,
**self._device_layout.data_psum_kwargs,
tiled=True,
)
metrics = typing.Metrics(
scalars_avg=metrics_avg,
scalars_sum=metrics_sum,
per_example=metrics_per_example,
)
return (loss, (network_state, metrics)), avg_grads
def value_and_clipped_grad(
self,
value_and_grad_fn: typing.ValueAndGradFn,
) -> typing.ValueAndGradFn:
"""Creates the function commputing (potentially) clipped gradients.
Args:
value_and_grad_fn: Function that produces unclipped gradients.
It is expected to have the following signature:
`(loss, aux), grad = grad_fn(params, inputs, network_state, rng_key)`.
Returns:
A function computing gradients that are potentially clipped per sample.
"""
if not self.using_clipped_grads:
if self._rescale_to_unit_norm:
raise ValueError('Cannot rescale to unit norm without clipping.')
return value_and_grad_fn
clipping_fn = grad_clipping.global_clipping(
global_norm_fn=self._global_norm_fn,
clipping_norm=self._clipping_norm,
rescale_to_unit_norm=self._rescale_to_unit_norm,
)
if self._vectorize_grad_clipping:
# Compute gradients clipped per sample using vectorization.
return grad_clipping.value_and_clipped_grad_vectorized(
value_and_grad_fn,
clipping_fn=clipping_fn)
else:
# Compute gradients clipped per sample using a (JAX) loop.
return grad_clipping.value_and_clipped_grad_loop(
value_and_grad_fn,
clipping_fn=clipping_fn)
def _rng_per_example(
self,
rng_per_batch: chex.PRNGKey,
accumulation_step: chex.Array,
) -> chex.PRNGKey:
"""Returns a random key specialised per sample."""
# Note on rngs:
# - rng_per_batch is common across replicas and accumulation steps.
# - rng_per_microbatch is common across devices for one accumulation step.
# - rng_per_example is specialised per sample (for independent randonmness).
rng_per_microbatch = jax.random.fold_in(rng_per_batch, accumulation_step)
rng_per_example = jax.random.fold_in(
rng_per_microbatch, self._device_layout.replica_index)
return rng_per_example
def add_noise_to_grads(
self,
grads: typing.ParamsT,
rng_per_batch: chex.PRNGKey,
total_batch_size: int,
) -> tuple[typing.ParamsT, chex.Numeric]:
"""Adds noise to gradients.
Args:
grads: gradients to privatize.
rng_per_batch: random number generation key.
total_batch_size: total batch-size once accumulated over devices and steps
(i.e. as seen by the optimizer performing the update).
Returns:
noisy_grads: gradients with the added noise.
std: standard deviation used for the noise (for monitoring purposes).
"""
return optim.add_noise_to_grads(
grads=grads,
rng_per_batch=self._rng_per_param_fn(rng_per_batch),
total_batch_size=total_batch_size,
clipping_norm=self._clipping_norm,
rescale_to_unit_norm=self._rescale_to_unit_norm,
noise_multiplier=self._noise_multiplier,
)
def l2_loss(self, params: typing.ParamsT) -> chex.Numeric:
"""Computes the squared L2 loss.
Args:
params: model parameters for which the loss should be computed, assumed to
be in haiku-like format.
Returns:
Squared L2 loss.
"""
return 0.5 * jnp.square(self._global_norm_fn(params))
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/gradients.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/__init__.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for testing pmapped operations."""
import contextlib
import functools
from unittest import mock
import chex
import jax
import jax.numpy as jnp
from jax_privacy.src.dp_sgd import devices
class PmapTestCase(chex.TestCase):
"""Test case that simulates a multi-device pmapped environment."""
def setUp(self):
super().setUp()
self._device_layout = devices.DeviceLayout(pmap_axis_name='test')
@contextlib.contextmanager
def patch_collectives(self, axis_index=3):
mock_axis_index = functools.partial(self._axis_index, axis_index=axis_index)
with mock.patch('jax.lax.axis_index', new=mock_axis_index), \
mock.patch('jax.lax.pmean', new=self._pmean), \
mock.patch('jax.lax.psum', new=self._psum), \
mock.patch('jax.lax.all_gather', new=self._all_gather):
yield
def _axis_index(self, axis_name, *, axis_index):
self.assertEqual(axis_name, 'test')
return axis_index
def _psum(self, x, axis_name, *, axis_index_groups):
"""Patch to psum."""
self.assertEqual(axis_name, 'test')
self.assertIsNone(axis_index_groups)
# Assume four devices, two of which have zeros.
return jax.tree_map(lambda t: 2. * t, x)
def _pmean(self, x, axis_name, *, axis_index_groups):
"""Patch to pmean."""
self.assertEqual(axis_name, 'test')
self.assertIsNone(axis_index_groups)
# Assume four devices, two of which have zeros.
return jax.tree_map(lambda t: t / 2., x)
def _all_gather(self, x, axis_name, *, axis_index_groups, tiled=False):
"""Patch to all_gather."""
self.assertEqual(axis_name, 'test')
self.assertIsNone(axis_index_groups)
# Assume four devices, two of which have zeros.
mask = jnp.array([1., 0., 0., 1.])
result = jax.tree_map(
lambda t: t * jnp.expand_dims(mask, axis=list(range(1, 1 + t.ndim))), x)
if tiled:
# Merge parallelization and batching dimensions if tiled.
result = jax.tree_map(
lambda t: jnp.reshape(t, [-1, *t.shape[2:]]), result)
return result
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/pmap_testing.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `gradients.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
from jax_privacy.src.dp_sgd import gradients
from jax_privacy.src.dp_sgd import pmap_testing
from jax_privacy.src.dp_sgd import typing
import numpy as np
import optax
def _tree_dot(w: chex.ArrayTree, x: chex.ArrayTree) -> chex.Array:
"""Returns scalar (dot) product of two compatible array trees."""
per_node_products = jax.tree_map(lambda a, b: jnp.sum(a * b), w, x)
flat_products, _ = jax.tree_util.tree_flatten(per_node_products)
return sum(flat_products)
_RNG_SCALE = 1.e7
class GradientsTest(pmap_testing.PmapTestCase):
def test_clean_gradients(self):
gradient_computer = gradients.GradientComputer(
clipping_norm=None,
noise_multiplier=None,
rescale_to_unit_norm=False,
vectorize_grad_clipping=False,
device_layout=self._device_layout,
)
inputs = jnp.array([3., 4.])
network_state = {'k': jnp.array(5.)}
params = self._params_for_testing_loss(inputs, network_state)
rng_per_batch = jax.random.PRNGKey(54)
step = jnp.array(6)
testing_loss = functools.partial(self._testing_loss, include_rngs=True)
with self.patch_collectives(axis_index=3):
avg_grads = gradient_computer.clean_gradients(
testing_loss, params, network_state, rng_per_batch, step, inputs)
# Gradients from next accumulation step.
avg_grads_next = gradient_computer.clean_gradients(
testing_loss, params, network_state, rng_per_batch, step+1, inputs)
with self.patch_collectives(axis_index=2):
# Gradients from different device.
avg_grads_remote = gradient_computer.clean_gradients(
testing_loss, params, network_state, rng_per_batch, step, inputs)
# Gradients are expected to be 0.5 * inputs, as arranged by
# `self._testing_loss`. The factor of 0.5 arises from taking `pmean`.
chex.assert_trees_all_close(
jax.tree_map(lambda t: jnp.mean(t, axis=0) / 2., inputs),
avg_grads['w_inputs'])
chex.assert_trees_all_close(
jax.tree_map(lambda t: t / 2., network_state),
avg_grads['w_network_state'])
# rng_per_example should vary across devices and accumulation steps.
self.assertNotEqual(
jnp.sum(avg_grads['w_rng_per_example']),
jnp.sum(avg_grads_next['w_rng_per_example']),
)
self.assertNotEqual(
jnp.sum(avg_grads['w_rng_per_example']),
jnp.sum(avg_grads_remote['w_rng_per_example']),
)
@parameterized.named_parameters(
('no_clipping', None, False),
('vacuous_clipping_looped', 1.e+10, False),
('vacuous_clipping_vectorised', 1.e+10, True),
)
def test_non_clipped_gradients(self, clipping_norm, vectorize):
gradient_computer = gradients.GradientComputer(
clipping_norm=clipping_norm,
noise_multiplier=None,
rescale_to_unit_norm=False,
vectorize_grad_clipping=vectorize,
device_layout=self._device_layout,
)
inputs = jnp.array([[3., 4.], [5., 7.]])
network_state = {'k': jnp.array(5.)}
params = self._params_for_testing_loss(inputs, network_state)
rng_per_batch = jax.random.PRNGKey(54)
step = jnp.array(6)
testing_loss = functools.partial(self._testing_loss, include_rngs=True)
with self.patch_collectives(axis_index=3):
_, avg_grads = gradient_computer.loss_and_clipped_gradients(
testing_loss, params, network_state, rng_per_batch, step, inputs)
# Gradients from next accumulation step.
_, avg_grads_next = gradient_computer.loss_and_clipped_gradients(
testing_loss, params, network_state, rng_per_batch, step+1, inputs)
with self.patch_collectives(axis_index=2):
# Gradients from different device.
_, avg_grads_remote = gradient_computer.loss_and_clipped_gradients(
testing_loss, params, network_state, rng_per_batch, step, inputs)
# Gradients are expected to be 0.5 * inputs, as arranged by
# `self._testing_loss`. The factor of 0.5 arises from taking `pmean`.
chex.assert_trees_all_close(
jax.tree_map(lambda t: jnp.mean(t, axis=0) / 2., inputs),
avg_grads['w_inputs'])
chex.assert_trees_all_close(
jax.tree_map(lambda t: t / 2., network_state),
avg_grads['w_network_state'])
# rng_per_example should vary across devices and accumulation steps.
self.assertNotEqual(
jnp.sum(avg_grads['w_rng_per_example']),
jnp.sum(avg_grads_next['w_rng_per_example']),
)
self.assertNotEqual(
jnp.sum(avg_grads['w_rng_per_example']),
jnp.sum(avg_grads_remote['w_rng_per_example']),
)
@parameterized.parameters(
(1.e-5, True),
(3.e-2, False),
(1., True),
(20., False),
)
def test_clipped_gradients_looped_equal_vectorised(
self, clipping_norm, rescale_to_unit_norm):
gradient_computer = gradients.GradientComputer(
clipping_norm=clipping_norm,
noise_multiplier=None,
rescale_to_unit_norm=rescale_to_unit_norm,
vectorize_grad_clipping=False,
device_layout=self._device_layout,
)
gradient_computer_v = gradients.GradientComputer(
clipping_norm=clipping_norm,
noise_multiplier=None,
rescale_to_unit_norm=rescale_to_unit_norm,
vectorize_grad_clipping=True,
device_layout=self._device_layout,
)
inputs = jnp.array([[3., 4.], [5., 7.]])
network_state = {'k': jnp.array(5.)}
params = self._params_for_testing_loss(inputs, network_state)
rng_per_batch = jax.random.PRNGKey(54)
step = jnp.array(6)
testing_loss = functools.partial(self._testing_loss, include_rngs=True)
with self.patch_collectives(axis_index=3):
_, avg_grads = gradient_computer.loss_and_clipped_gradients(
testing_loss, params, network_state, rng_per_batch, step, inputs)
_, avg_grads_v = gradient_computer_v.loss_and_clipped_gradients(
testing_loss, params, network_state, rng_per_batch, step, inputs)
chex.assert_trees_all_close(avg_grads, avg_grads_v)
@parameterized.named_parameters(
('noscale_looped', False, False),
('noscale_vectorised', False, True),
('rescale_looped', True, False),
('rescale_vectorised', True, True),
)
def test_tightly_clipped_correctly_normalised(
self, rescale_to_unit_norm, vectorize):
clipping_norm = 1.e-2
gradient_computer = gradients.GradientComputer(
clipping_norm=clipping_norm,
noise_multiplier=None,
rescale_to_unit_norm=rescale_to_unit_norm,
vectorize_grad_clipping=vectorize,
device_layout=self._device_layout,
)
inputs = jnp.array([[3., 4., 1.], [5., 7., 2.]])
network_state = {'k': jnp.array(5.)}
params = self._params_for_testing_loss(inputs, network_state)
rng_per_batch = jax.random.PRNGKey(54)
step = jnp.array(6)
batch_size = inputs.shape[0]
with self.patch_collectives(axis_index=3):
clean_grads_per_example = [
gradient_computer.clean_gradients(
self._testing_loss, params, network_state, rng_per_batch, step,
inputs[i:i+1]) for i in range(batch_size)]
_, avg_grads = gradient_computer.loss_and_clipped_gradients(
self._testing_loss, params, network_state, rng_per_batch, step,
inputs)
# Assuming that the clipping will be effective for each example,
# we expect each example's tree of gradients to be normalised to
# `clipping_norm`. These are then averaged across examples.
clean_grad_norms = [
optax.global_norm(clean_grads)
for clean_grads in clean_grads_per_example]
normalised_grads = [
jax.tree_map(
lambda x, i=i: x / clean_grad_norms[i],
clean_grads_per_example[i]
) for i in range(batch_size)]
# The factor of 0.5 arises from taking `pmean`.
expected_avg_grads = jax.tree_map(
lambda *x: sum(x) / batch_size / 2, *normalised_grads)
if not rescale_to_unit_norm:
expected_avg_grads = jax.tree_map(
lambda x: x * clipping_norm, expected_avg_grads)
chex.assert_trees_all_close(expected_avg_grads, avg_grads)
@parameterized.named_parameters(
('no_clipping', None, False),
('clipping', 3., False),
('clipping_vectorised', 3., True),
)
def test_batch_size_1(self, clipping_norm, vectorize):
gradient_computer = gradients.GradientComputer(
clipping_norm=clipping_norm,
noise_multiplier=None,
rescale_to_unit_norm=False,
vectorize_grad_clipping=vectorize,
device_layout=self._device_layout,
)
# Test that a single example gives the same (averaged) gradients as
# a batch of several identical copies of it.
inputs = jnp.array([[3., 8., 5.]])
inputs_dup = jnp.array([inputs] * 3)
network_state = {'k': jnp.array(5.)}
params = self._params_for_testing_loss(inputs, network_state)
rng_per_batch = jax.random.PRNGKey(54)
step = jnp.array(6)
with self.patch_collectives(axis_index=3):
_, avg_grads = gradient_computer.loss_and_clipped_gradients(
self._testing_loss, params, network_state, rng_per_batch, step,
inputs)
_, avg_grads_dup = gradient_computer.loss_and_clipped_gradients(
self._testing_loss,
params, network_state, rng_per_batch, step, inputs_dup)
for key in ('w_inputs', 'w_network_state'):
chex.assert_trees_all_close(
avg_grads[key], avg_grads_dup[key], atol=1.e-6)
@parameterized.named_parameters(
('no_clipping', None, False),
('vacuous_clipping_looped', 1., False),
('vacuous_clipping_vectorised', 1., True),
)
def test_aux_aggregation(self, clipping_norm, vectorize):
gradient_computer = gradients.GradientComputer(
clipping_norm=clipping_norm,
noise_multiplier=None,
rescale_to_unit_norm=False,
vectorize_grad_clipping=vectorize,
device_layout=self._device_layout,
)
inputs = jnp.array([[3., 4.], [5., 7.], [2., -1.], [1., 0.], [3., 1.]])
network_state = {'k': jnp.array(5.)}
params = self._params_for_testing_loss(inputs, network_state)
rng_per_batch = jax.random.PRNGKey(54)
step = jnp.array(6)
batch_size = inputs.shape[0]
with self.patch_collectives(axis_index=3):
(
(loss, (new_network_state, metrics)), unused_grads
) = gradient_computer.loss_and_clipped_gradients(
self._testing_loss, params, network_state, rng_per_batch, step,
inputs)
chex.assert_trees_all_close(network_state, new_network_state)
# Averaged.
chex.assert_shape(loss, ())
chex.assert_shape(metrics.scalars_avg.get('aggregate'), (3,))
# Stacked, over all devices.
if clipping_norm:
chex.assert_shape(metrics.per_example.get('grad_norm'), (4 * batch_size,))
chex.assert_shape(metrics.per_example.get('loss'), (4 * batch_size,))
chex.assert_shape(metrics.per_example.get('other'), (4 * batch_size, 3, 2))
@parameterized.parameters((None,), (3.,))
def test_adding_zero_noise(self, clipping_norm):
gradient_computer = gradients.GradientComputer(
clipping_norm=clipping_norm,
noise_multiplier=0.,
rescale_to_unit_norm=False,
vectorize_grad_clipping=False,
)
grads = {'a': jnp.array(3.), 'b': [jnp.array([4., 5.]), jnp.array([6.])]}
rng_per_batch = jax.random.PRNGKey(54)
noisy_grads, std = gradient_computer.add_noise_to_grads(
grads, rng_per_batch, total_batch_size=8)
chex.assert_trees_all_close(grads, noisy_grads)
self.assertEqual(std, 0.)
@parameterized.parameters((False,), (True,))
def test_cannot_add_noise_without_clipping(self, rescale_to_unit_norm):
gradient_computer = gradients.GradientComputer(
clipping_norm=None,
noise_multiplier=.2,
rescale_to_unit_norm=rescale_to_unit_norm,
vectorize_grad_clipping=False,
)
grads = {'a': jnp.array(3.), 'b': [jnp.array([4., 5.]), jnp.array([6.])]}
rng_per_batch = jax.random.PRNGKey(54)
with self.assertRaises(ValueError):
gradient_computer.add_noise_to_grads(
grads, rng_per_batch, total_batch_size=8)
@parameterized.parameters(
(0., False, 0., 4, 0.),
(.1, False, 0., 4, 0.),
(.1, False, 3., 4, .075),
(.1, True, 3., 4, .75),
(10., False, 5., 4, 12.5),
(10., True, 5., 4, 1.25),
)
def test_adding_noise(
self,
clipping_norm, rescale_to_unit_norm, noise_multiplier, total_batch_size,
expected_std):
gradient_computer = gradients.GradientComputer(
clipping_norm=clipping_norm,
noise_multiplier=noise_multiplier,
rescale_to_unit_norm=rescale_to_unit_norm,
vectorize_grad_clipping=False,
)
grads = jnp.zeros((1_000_000,))
rng_per_batch = jax.random.PRNGKey(54)
noisy_grads, std = gradient_computer.add_noise_to_grads(
grads, rng_per_batch, total_batch_size=total_batch_size)
np.testing.assert_approx_equal(expected_std, std)
np.testing.assert_approx_equal(np.mean(noisy_grads**2), std**2,
significant=2)
def _testing_loss(
self, params, network_state, rng_per_example, inputs, include_rngs=False):
"""Simulates the loss function."""
# Ensure that random keys have been passed in correctly.
self.assertEqual((2,), rng_per_example.shape)
self.assertEqual(jnp.uint32, rng_per_example.dtype)
# Loss functions MUST be mean-additive.
batch_size = jax.tree_util.tree_leaves(inputs)[0].shape[0]
sum_to_mean = lambda x: x / batch_size
# Take dot product of params with other inputs, so that the gradients
# reflect the inputs provided.
loss = sum([
jax.tree_map(sum_to_mean, _tree_dot(params['w_inputs'], inputs)),
_tree_dot(params['w_network_state'], network_state),
include_rngs * _tree_dot(
params['w_rng_per_example'], rng_per_example / _RNG_SCALE),
])
metrics = typing.Metrics(
scalars_avg={'aggregate': jnp.array([1., 2., 3.])},
per_example={
'loss': loss * jnp.ones((batch_size,)),
'other': jnp.ones((batch_size, 3, 2)),
},
)
return loss, (network_state, metrics)
def _params_for_testing_loss(self, inputs, network_state):
return {
'w_inputs': jax.tree_map(lambda x: jnp.zeros_like(x[0]), inputs),
'w_network_state': jax.tree_map(jnp.zeros_like, network_state),
'w_rng_per_example': jnp.zeros(jax.random.PRNGKey(0).shape),
}
if __name__ == '__main__':
absltest.main()
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/gradients_test.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for grad_clipping."""
import functools
from absl.testing import absltest
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax_privacy.src.dp_sgd import grad_clipping
from jax_privacy.src.dp_sgd import typing
import optax
NUM_SAMPLES = 4
NUM_CLASSES = 7
INPUT_SHAPE = (12, 12, 3)
MAX_NORM = 1e-4
def model_fn(inputs, num_classes):
"""Mini ConvNet."""
out = hk.Conv2D(4, 3)(inputs)
out = jax.nn.relu(out)
out = hk.Conv2D(4, 3)(out)
out = jax.nn.relu(out)
out = jnp.mean(out, axis=[1, 2])
out = hk.Linear(num_classes)(out)
return out
def grad_clipped_per_sample_naive(forward_fn, clipping_norm):
"""Naive implementation for computing gradients clipped per-example."""
grad_fn = jax.grad(forward_fn, has_aux=True)
def accumulate(tree_acc, tree_new, coeff):
return jax.tree_util.tree_map(
lambda leaf_acc, leaf_new: leaf_acc + leaf_new * coeff,
tree_acc,
tree_new,
)
def clipped_grad_fn(params, network_state, rng_per_example, inputs):
loss, (network_state, metrics) = forward_fn(
params, network_state, rng_per_example, inputs)
images, labels = inputs
batch_size = len(images)
grads = jax.tree_util.tree_map(jnp.zeros_like, params)
grad_norms = []
# compute one clipped gradient at a time
for i in range(batch_size):
# expand image: function expects a batch dimension
input_i = (jnp.expand_dims(images[i], 0), labels[i])
grad_i, unused_aux = grad_fn(
params, network_state, rng_per_example, input_i)
norm_grad_i = jnp.sqrt(
sum(jnp.sum(x**2) for x in jax.tree_util.tree_leaves(grad_i)))
# multiplicative factor equivalent to clipping norm
coeff = jnp.minimum(1, clipping_norm / norm_grad_i) / batch_size
# normalize by batch_size and accumulate
grads = accumulate(grads, grad_i, coeff)
grad_norms.append(optax.global_norm(grad_i))
metrics = metrics.replace(
per_example={
'grad_norm': jnp.array(grad_norms),
**metrics.per_example,
}
)
return (loss, (network_state, metrics)), grads
return clipped_grad_fn
class TestClippedGradients(chex.TestCase):
"""Check numerically that gradients are correctly clipped."""
def setUp(self):
super().setUp()
rng_seq = hk.PRNGSequence(0)
images = jax.random.normal(
next(rng_seq), shape=(NUM_SAMPLES,) + INPUT_SHAPE)
labels = jax.random.randint(
next(rng_seq), shape=[NUM_SAMPLES], minval=0, maxval=NUM_CLASSES)
labels_one_hot = hk.one_hot(labels, NUM_CLASSES)
self.net = hk.transform_with_state(
functools.partial(model_fn, num_classes=NUM_CLASSES))
self.params, self.network_state = self.net.init(next(rng_seq), images)
self.inputs = (images, labels_one_hot)
self.rng_per_batch = next(rng_seq)
self.rng_per_example = jax.random.fold_in(self.rng_per_batch, 1)
self.tol = {'rtol': 1e-6, 'atol': 1e-6}
def forward(self, params, state, rng_per_example, inputs):
images, labels = inputs
logits, state = self.net.apply(params, state, rng_per_example, images)
loss_vector = optax.softmax_cross_entropy(logits, labels)
metrics = typing.Metrics(
per_example={'loss': loss_vector, 'a': jnp.ones([images.shape[0]])},
scalars_avg={'loss': jnp.mean(loss_vector), 'b': jnp.ones([])},
)
return jnp.mean(loss_vector), (state, metrics)
def forward_per_sample(self, params, state, rng_per_example, inputs):
images, labels = inputs
logits, state = self.net.apply(params, state, rng_per_example, images)
loss_vector = optax.softmax_cross_entropy(logits, labels)
metrics = typing.Metrics(
per_example={'loss': loss_vector, 'a': jnp.ones([images.shape[0]])},
scalars_avg={'loss': jnp.mean(loss_vector), 'b': jnp.ones([])},
)
return jnp.mean(loss_vector), (state, metrics)
@chex.variants(with_jit=True, without_jit=True)
def test_clipped_gradients(self):
value_and_grad_fn = jax.value_and_grad(self.forward, has_aux=True)
clipping_fn = grad_clipping.global_clipping(clipping_norm=MAX_NORM)
grad_fn_1 = grad_clipping.value_and_clipped_grad_vectorized(
value_and_grad_fn,
clipping_fn,
)
grad_fn_2 = grad_clipping.value_and_clipped_grad_loop(
value_and_grad_fn,
clipping_fn,
)
grad_fn_3 = grad_clipped_per_sample_naive(
self.forward,
clipping_norm=MAX_NORM,
)
grad_fn_args = (
self.params, self.network_state, self.rng_per_example, self.inputs)
(loss_1, aux_1), grad_1 = self.variant(grad_fn_1)(*grad_fn_args)
(loss_2, aux_2), grad_2 = self.variant(grad_fn_2)(*grad_fn_args)
(loss_3, aux_3), grad_3 = self.variant(grad_fn_3)(*grad_fn_args)
chex.assert_trees_all_close(loss_1, loss_2, loss_3, **self.tol)
chex.assert_trees_all_close(aux_1, aux_2, aux_3, **self.tol)
chex.assert_trees_all_close(grad_1, grad_2, grad_3, **self.tol)
@chex.variants(with_jit=True, without_jit=True)
def test_gradients_vectorized_and_loop_match_using_batch_rng(self):
value_and_grad_fn = jax.value_and_grad(self.forward, has_aux=True)
clipping_fn = lambda grads: (grads, optax.global_norm(grads))
grad_fn_1 = grad_clipping.value_and_clipped_grad_vectorized(
value_and_grad_fn,
clipping_fn=clipping_fn,
)
grad_fn_2 = grad_clipping.value_and_clipped_grad_loop(
value_and_grad_fn,
clipping_fn=clipping_fn,
)
grad_fn_3 = grad_clipped_per_sample_naive(
self.forward,
clipping_norm=float('inf'),
)
grad_fn_args = (
self.params, self.network_state, self.rng_per_example, self.inputs)
(loss_1, aux_1), grad_1 = self.variant(grad_fn_1)(*grad_fn_args)
(loss_2, aux_2), grad_2 = self.variant(grad_fn_2)(*grad_fn_args)
(loss_3, aux_3), grad_3 = self.variant(grad_fn_3)(*grad_fn_args)
chex.assert_trees_all_close(loss_1, loss_2, loss_3, **self.tol)
chex.assert_trees_all_close(aux_1, aux_2, aux_3, **self.tol)
chex.assert_trees_all_close(grad_1, grad_2, grad_3, **self.tol)
@chex.variants(with_jit=True, without_jit=True)
def test_gradients_vectorized_and_loop_match_using_per_sample_rng(self):
clipping_fn = lambda grads: (grads, optax.global_norm(grads))
grad_fn_1 = grad_clipping.value_and_clipped_grad_vectorized(
jax.value_and_grad(self.forward_per_sample, has_aux=True),
clipping_fn=clipping_fn,
)
grad_fn_2 = grad_clipping.value_and_clipped_grad_loop(
jax.value_and_grad(self.forward_per_sample, has_aux=True),
clipping_fn=clipping_fn,
)
grad_fn_args = (
self.params, self.network_state, self.rng_per_example, self.inputs)
(loss_1, aux_1), grad_1 = self.variant(grad_fn_1)(*grad_fn_args)
(loss_2, aux_2), grad_2 = self.variant(grad_fn_2)(*grad_fn_args)
chex.assert_trees_all_close(loss_1, loss_2, **self.tol)
chex.assert_trees_all_close(aux_1, aux_2, **self.tol)
chex.assert_trees_all_close(grad_1, grad_2, **self.tol)
@hk.transform_with_state
def simple_net(x):
"""A simple function that computes L = 3 * x + random noise."""
key = hk.next_rng_key()
noise = jax.random.normal(key, shape=x.shape)
return jnp.mean(3 * x + noise), noise
class TestClippedGradientsPerBatchPerSampleRNG(chex.TestCase):
"""Check per-batch rng and per-sample rng are handled correctly."""
def setUp(self):
super().setUp()
rng_seq = hk.PRNGSequence(0)
self.inputs = jax.random.normal(next(rng_seq), shape=(NUM_SAMPLES, 10))
self.params, self.state = simple_net.init(next(rng_seq), self.inputs)
self.rng_per_batch = next(rng_seq)
self.rng_per_example = jax.random.fold_in(self.rng_per_batch, 1)
self.grad_fn_args = (self.params, self.state,
self.rng_per_example, self.inputs)
self.no_clip = lambda grads: (grads, optax.global_norm(grads))
@chex.variants(with_jit=True, without_jit=True)
def test_per_sample_rng_produces_different_random_numbers(self):
@functools.partial(jax.value_and_grad, has_aux=True)
def value_and_grad_fn(params, state, rng_per_example, inputs):
(loss, noise), state = simple_net.apply(
params, state, rng_per_example, inputs)
metrics = typing.Metrics(
per_example={'noise': noise},
)
return loss, (state, metrics)
with self.subTest('vectorized'):
grad_fn = grad_clipping.value_and_clipped_grad_vectorized(
value_and_grad_fn, clipping_fn=self.no_clip)
(_, (_, metrics)), _ = self.variant(grad_fn)(*self.grad_fn_args)
# Noise should be different across all samples.
for i in range(1, NUM_SAMPLES):
self.assertTrue(
jnp.any(metrics.per_example['noise'][0]
!= metrics.per_example['noise'][i]))
with self.subTest('loop'):
grad_fn = grad_clipping.value_and_clipped_grad_loop(
value_and_grad_fn, clipping_fn=self.no_clip)
(_, (_, metrics)), _ = self.variant(grad_fn)(*self.grad_fn_args)
# Noise should be different across all samples.
for i in range(1, NUM_SAMPLES):
self.assertTrue(
jnp.any(metrics.per_example['noise'][0]
!= metrics.per_example['noise'][i]))
if __name__ == '__main__':
absltest.main()
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/grad_clipping_test.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General types re-used across the codebase.
Sub-directories may tighten the typing by using more restrictive types than the
general Inputs, ModelState and Params defined here.
"""
import dataclasses
from typing import Callable, Generic, Literal, Mapping, Protocol, TypeVar
import chex
import jax
GradNorm = jax.Array
GradNormPerExample = jax.Array
Loss = jax.Array
InputsT = TypeVar('InputsT', bound=chex.ArrayTree)
ModelStateT = TypeVar('ModelStateT', bound=chex.ArrayTree)
ParamsT = TypeVar('ParamsT', bound=chex.ArrayTree)
AutoTuneField = Literal[
'batch_size',
'noise_multiplier',
'num_updates',
'stop_training_at_epsilon',
None,
]
@chex.dataclass
class Metrics:
"""Container for various metrics."""
scalars_avg: Mapping[str, chex.Numeric] = dataclasses.field(
default_factory=dict)
scalars_sum: Mapping[str, chex.Numeric] = dataclasses.field(
default_factory=dict)
per_example: Mapping[str, jax.Array] = dataclasses.field(
default_factory=dict)
@property
def scalars(self) -> Mapping[str, chex.Numeric]:
return {**self.scalars_avg, **self.scalars_sum}
NormFn = Callable[[ParamsT], jax.Array]
GradClippingFn = Callable[[ParamsT], tuple[ParamsT, jax.Array]]
class LossFn(Protocol, Generic[InputsT, ParamsT, ModelStateT]):
def __call__(
self,
params: ParamsT,
network_state: ModelStateT,
rng_per_example: chex.PRNGKey,
inputs: InputsT,
) -> tuple[Loss, tuple[ModelStateT, Metrics]]:
"""Computes the loss function.
Args:
params: Trainable parameters.
network_state: Network state.
rng_per_example: a random number generation key specific for a device and
accumulation step. It can be used to create a unique seed per
individual example by the user.
inputs: Model inputs.
Returns:
Tuple consisting of (loss, aux).
"""
class ValueAndGradFn(Protocol, Generic[InputsT, ParamsT, ModelStateT]):
def __call__(
self,
params: ParamsT,
network_state: ModelStateT,
rng_per_example: chex.PRNGKey,
inputs: InputsT,
) -> tuple[tuple[Loss, tuple[ModelStateT, Metrics]], ParamsT]:
"""Computes (potentially clipped) gradients.
Args:
params: Trainable parameters.
network_state: Network state.
rng_per_example: a random number generation key specific for a device and
accumulation step. It can be used to create a unique seed per
individual example by the user.
inputs: Model inputs.
Returns:
Value, auxiliary outputs, and (potentially clipped) gradients.
"""
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/typing.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for noise."""
from absl.testing import absltest
import chex
import jax
from jax_privacy.src.dp_sgd import optim
class TestTreeMapAddNormalNoise(chex.TestCase):
"""Test whether noisy inputs differ from inputs with and without noise."""
def setUp(self):
super().setUp()
self.rng_key = jax.random.PRNGKey(0)
self.noise_std = 0.1
key1, key2, key3 = jax.random.split(jax.random.PRNGKey(1), 3)
self.tree = {'a': jax.random.normal(key1, (2, 2)),
'b': [jax.random.normal(key2, (1, 2)),
jax.random.normal(key3, ())]}
def test_with_noise(self):
noisy_tree = optim.tree_map_add_normal_noise(
self.tree, self.noise_std, self.rng_key)
self.assertEqual(jax.tree_util.tree_structure(noisy_tree),
jax.tree_util.tree_structure(self.tree))
with self.assertRaises(AssertionError):
chex.assert_tree_all_close(self.tree, noisy_tree)
def test_without_noise(self):
tree_with_zero_noise = optim.tree_map_add_normal_noise(
self.tree, 0.0, self.rng_key)
self.assertEqual(jax.tree_util.tree_structure(tree_with_zero_noise),
jax.tree_util.tree_structure(self.tree))
chex.assert_tree_all_close(self.tree, tree_with_zero_noise)
if __name__ == '__main__':
absltest.main()
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/optim_test.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computing gradients that are clipped per sample."""
import chex
import jax
import jax.numpy as jnp
from jax_privacy.src.dp_sgd import grad_clipping_utils
from jax_privacy.src.dp_sgd import typing
import optax
def safe_div(
numerator: chex.Array,
denominator: chex.Array,
eps: chex.Numeric = 1e-10,
) -> chex.Array:
"""Numerically safe division."""
return numerator / (denominator + eps)
def _placeholder_like(*args):
return jax.tree_util.tree_map(
lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype), args)
def global_clipping(
clipping_norm: chex.Numeric,
global_norm_fn: typing.NormFn = optax.global_norm,
rescale_to_unit_norm: bool = False,
eps: chex.Numeric = 1e-10,
) -> typing.GradClippingFn:
"""Create a function that clips its input tree to have a maximum L2 norm.
The L2 norm is computed across leaves of the tree. If the input tree has an L2
norm that is less or equal to `clipping_norm`, it is left untouched by the
clipping operation. Otherwise it is scaled down by a positive factor so that
its new L2 norm is exactly `clipping_norm`.
Note that the clipping function will return NaN entries if the numerical
constant `eps` is not small enough. This is to loudly detect loss of
numerical precision that could lead to invalid results.
Args:
clipping_norm: maximum L2 norm to which the input tree should be clipped.
global_norm_fn: function to compute the L2 norm of an ArrayTree.
rescale_to_unit_norm: whether the tree should be rescaled to have an L2
norm of one once it got clipped.
eps: small numerical constant for numerical stability.
Returns:
Function that clips its input tree to have a maximum L2 norm of
`clipping_norm`.
"""
def coeff_fn(tree_norm: chex.Array) -> chex.Array:
one = jnp.ones((), dtype=tree_norm.dtype)
if rescale_to_unit_norm:
# coeff = min(1, clipping_norm / tree_norm) / clipping_norm
return jnp.minimum(
safe_div(one, clipping_norm, eps),
safe_div(one, tree_norm, eps)
)
else:
# coeff = min(1, clipping_norm / tree_norm)
return jnp.minimum(one, safe_div(clipping_norm, tree_norm, eps))
def clipping_fn(
grad: typing.ParamsT,
) -> tuple[typing.ParamsT, jax.Array]:
grad_norm = global_norm_fn(grad)
# If the value of `eps` is invalid because it is too large compared to
# `clipping_norm`, propagate NaNs to show that the computation is invalid.
# Note: this has the side effect of always back-propagating NaNs if we
# differentiate through this function, but this function is not meant to
# be differentiated, since it post-processes gradients in order to
# privatize them.
coeff = jnp.where(clipping_norm > eps, coeff_fn(grad_norm), jnp.nan)
return jax.tree_util.tree_map(lambda x: x * coeff, grad), grad_norm
return clipping_fn
def _value_and_clipped_grad_single_sample(
grad_fn: typing.ValueAndGradFn,
clipping_fn: typing.GradClippingFn,
) -> typing.ValueAndGradFn:
"""Creates a function that computes a clipped gradient for a single sample.
Args:
grad_fn: Function that produces unclipped gradients. It is expected to have
the following signature:
`(loss, (network_state, metrics)), grads = grad_fn(
params, network_state, rng_key, inputs)`,
where `inputs` has a batch dimension. The network state is assumed to be
independent of the `inputs`, and the metrics are accumulated according to
their key (`per_sample` / 'scalars_avg` / `scalars_sum`).
clipping_fn: clipping function to apply to the gradient.
Returns:
Function that computes the gradient for a single (unbatched) sample and
clips it.
"""
def clipped_grad_fn(
params: typing.ParamsT,
network_state: typing.ModelStateT,
rng_per_example: chex.PRNGKey,
inputs: typing.InputsT,
) -> tuple[
tuple[typing.Loss, tuple[typing.ModelStateT, typing.Metrics]],
typing.ParamsT,
]:
# Add a batch-size dimension.
inputs_expanded = jax.tree_util.tree_map(
lambda x: jnp.expand_dims(x, axis=0),
inputs,
)
# Compute the gradient.
(loss, (network_state, metrics)), grad = grad_fn(
params, network_state, rng_per_example, inputs_expanded)
clipped_grad, grad_norm = clipping_fn(grad)
# Log the gradient norm per example.
metrics = metrics.replace(
per_example={'grad_norm': grad_norm, **metrics.per_example},
)
# Apply the clipping function
return (loss, (network_state, metrics)), clipped_grad
return clipped_grad_fn
def value_and_clipped_grad_loop(
grad_fn: typing.ValueAndGradFn,
clipping_fn: typing.GradClippingFn,
) -> typing.ValueAndGradFn:
"""Create a function that computes grads clipped per example using a loop.
Args:
grad_fn: Function that produces unclipped gradients. It is expected to have
the following signature:
`(loss, (network_state, metrics)), grads = grad_fn(
params, network_state, rng_key, inputs)`,
where `inputs` has a batch dimension. The network state is assumed to be
independent of the `inputs`, and the metrics are accumulated according to
their key (`per_sample` / 'scalars_avg` / `scalars_sum`).
clipping_fn: clipping function to apply to every per-example gradient before
those get averaged.
Returns:
Function that clips gradient per-example and average them.
"""
grad_fn_single_sample = _value_and_clipped_grad_single_sample(
grad_fn=grad_fn,
clipping_fn=clipping_fn,
)
accumulator = grad_clipping_utils.LoopAccumulator(grad_fn)
def clipped_grad_fn(
params: typing.ParamsT,
network_state: typing.ModelStateT,
rng_per_example: chex.PRNGKey,
inputs: typing.InputsT,
) -> tuple[
tuple[typing.Loss, tuple[typing.ModelStateT, typing.Metrics]],
typing.ParamsT,
]:
batch_size = jax.tree_util.tree_leaves(inputs)[0].shape[0]
rng_per_example = jax.random.split(rng_per_example, num=batch_size)
if batch_size == 1:
inputs_0 = jax.tree_util.tree_map(lambda x: x[0], inputs)
return grad_fn_single_sample(
params, network_state, rng_per_example[0], inputs_0)
def body(value_and_grad, i):
inputs_i = jax.tree_util.tree_map(lambda x: x[i], inputs)
value_and_grad_i = grad_fn_single_sample(
params, network_state, rng_per_example[i], inputs_i)
value_and_grad = accumulator.accumulate(
value_and_grad, value_and_grad_i, i, batch_size)
return value_and_grad, None
# We only need to know the shape and dtype for the initialization, so we
# pass the arguments through `_placeholder_like` to make that clear.
placeholder_args = _placeholder_like(params, network_state,
rng_per_example[0], inputs)
value_and_grad = accumulator.initialize(batch_size, *placeholder_args)
# Actually perform the loop.
value_and_grad, _ = jax.lax.scan(
body, value_and_grad, jnp.arange(batch_size))
return value_and_grad
return clipped_grad_fn
def value_and_clipped_grad_vectorized(
grad_fn: typing.ValueAndGradFn,
clipping_fn: typing.GradClippingFn,
) -> typing.ValueAndGradFn:
"""Create a function that computes grads clipped per example using vmapping.
Args:
grad_fn: Function that produces unclipped gradients. It is expected to have
the following signature:
`(loss, (network_state, metrics)), grads = grad_fn(
params, network_state, rng_key, inputs)`,
where `inputs` has a batch dimension. The network state is assumed to be
independent of the `inputs`, and the metrics are accumulated according to
their key (`per_sample` / 'scalars_avg` / `scalars_sum`).
clipping_fn: clipping function to apply to every per-example gradient before
those get averaged.
Returns:
Function that clips gradient per-example and average them.
"""
grad_fn_single_sample = _value_and_clipped_grad_single_sample(
grad_fn=grad_fn,
clipping_fn=clipping_fn,
)
grad_fn_vectorized = jax.vmap(
grad_fn_single_sample,
in_axes=(None, None, 0, 0),
) # broadcast (params, network_state); vectorise (rng_per_example, inputs)
def clipped_grad_fn(
params: typing.ParamsT,
network_state: typing.ModelStateT,
rng_per_example: chex.PRNGKey,
inputs: typing.InputsT,
) -> tuple[
tuple[typing.Loss, tuple[typing.ModelStateT, typing.Metrics]],
typing.ParamsT,
]:
# Compute vectorized outputs and clipped gradients.
batch_size = jax.tree_util.tree_leaves(inputs)[0].shape[0]
rng_per_example = jax.random.split(rng_per_example, num=batch_size)
value_and_grad = grad_fn_vectorized(
params, network_state, rng_per_example, inputs)
return grad_clipping_utils.reduce_vmap(value_and_grad)
return clipped_grad_fn
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/grad_clipping.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optim utils."""
from typing import Optional
import chex
import jax
import jax.numpy as jnp
from jax_privacy.src.dp_sgd import typing
import optax
def apply_weight_decay(
tree: chex.ArrayTree,
*,
learning_rate: chex.Numeric,
weight_decay: chex.Numeric,
) -> chex.ArrayTree:
factor = 1.0 - learning_rate * weight_decay
return jax.tree_util.tree_map(lambda x: factor * x, tree)
def add_noise_to_grads(
*,
clipping_norm: Optional[chex.Numeric],
rescale_to_unit_norm: bool,
noise_multiplier: Optional[chex.Numeric],
total_batch_size: int,
grads: typing.ParamsT,
rng_per_batch: chex.PRNGKey,
) -> tuple[typing.ParamsT, chex.Numeric]:
"""Add noise to gradients.
Args:
clipping_norm: clipping-norm for the per-example gradients (before
averaging across the examples of the mini-batch).
rescale_to_unit_norm: whether each clipped per-example gradient gets
multiplied by `1 / clipping_norm`, so that the update is normalized.
When enabled, the noise standard deviation gets adjusted accordingly.
noise_multiplier: standard deviation of the noise to add to the average
of the clipped gradient to make it differentially private. It will be
multiplied by `clipping_norm / total_batch_size` before the noise gets
actually added.
total_batch_size: total batch-size once accumulated over devices and steps
(i.e. as seen by the optimizer performing the update).
grads: gradients to privatize.
rng_per_batch: random number generation key.
Returns:
noisy_grads: gradients with the added noise.
std: standard deviation used for the noise (for monitoring purposes).
"""
if clipping_norm in (None, float('inf')):
clipping_norm_is_finite = False
scale = None
elif rescale_to_unit_norm:
clipping_norm_is_finite = True
scale = 1.0 / total_batch_size
else:
clipping_norm_is_finite = True
scale = clipping_norm / total_batch_size
if not noise_multiplier:
# No noise to add (whether the clipping-norm is finite or not).
std = 0.0
elif not clipping_norm_is_finite:
# Cannot add noise proportional to infinity.
raise ValueError(
'noise_multiplier cannot be used without a finite clipping norm.')
else:
# The total amount of noise to add is the product of the scale and
# noise_multiplier.
assert noise_multiplier >= 0
std = scale * noise_multiplier
# NB: no need to accumulate noise over devices because the noise is applied
# identically on all devices
noisy_grads = tree_map_add_normal_noise(grads, std, rng_per_batch)
return noisy_grads, std
def cosine_distance(
tree_1: chex.ArrayTree,
tree_2: chex.ArrayTree,
) -> chex.Array:
"""Compute cosine distance between two trees of arrays."""
dot_product = sum(jax.tree_util.tree_leaves(jax.tree_util.tree_map(
lambda g1, g2: jnp.sum(g1 * g2), tree_1, tree_2)))
return dot_product / (optax.global_norm(tree_1) * optax.global_norm(tree_2))
def tree_map_add_normal_noise(
tree: typing.ParamsT,
noise_std: float,
rng_key: chex.PRNGKey,
) -> typing.ParamsT:
"""Add iid gaussian noise with std 'noise_std' to all leaves of 'tree'."""
rng_keys = jax.random.split(rng_key, len(jax.tree_util.tree_leaves(tree)))
rng_tree = jax.tree_util.tree_unflatten(
jax.tree_util.tree_structure(tree), rng_keys)
def with_noise(rng: chex.Array, x: chex.Array) -> chex.Array:
return x + noise_std * jax.random.normal(rng, shape=x.shape, dtype=x.dtype)
return jax.tree_util.tree_map(with_noise, rng_tree, tree)
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/optim.py
|
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Device layout abstraction."""
from typing import Any, Mapping, Optional, Sequence
import chex
import jax
class DeviceLayout:
"""Common args to `pmap` and `psum` for data parallelism."""
def __init__(
self,
*,
pmap_axis_name: str = 'data',
devices: Optional[Sequence[jax.Device]] = None,
):
"""Constructor.
Args:
pmap_axis_name: Parallel mapping axis name, to pass to `jax.pmap`.
devices: XLA devices to pass to `jax.pmap`.
"""
self.pmap_axis_name = pmap_axis_name
self.devices = devices
@property
def pmap_kwargs(self) -> Mapping[str, Any]:
return {
'devices': self.devices,
'axis_name': self.pmap_axis_name,
}
@property
def data_psum_kwargs(self) -> Mapping[str, Any]:
return {
'axis_name': self.pmap_axis_name,
'axis_index_groups': None,
}
@property
def replica_index(self) -> chex.Array:
"""Index of the replica (to be called under a `pmap`)."""
return jax.lax.axis_index(self.pmap_axis_name)
|
jax_privacy-main
|
jax_privacy/src/dp_sgd/devices.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
emergent_in_context_learning-main
|
__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
emergent_in_context_learning-main
|
datasets/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset utilities."""
import tensorflow.compat.v2 as tf
def prepare_seqs_for_transformer(ds,
use_constant_labels=False,
interleave_targets=True,
downsample=False):
"""Convert example and label sequences for use by the transformer.
Args:
ds: A tf.data.Dataset where each example contains
'example': a batch of examples with shape
(batch_size, seq_len, height, width, channels)
'label': a batch of labels with shape
(batch_size, seq_len)
use_constant_labels: Whether to use target labels of all ones, instead of
the true labels.
interleave_targets: Whether to create targets consisting of alternating
[label, 0, label, 0, ...] sequences, or just [label, label, ...]
downsample: Whether to downsample images.
Returns:
A new tf.data.Dataset where each example contains
'examples': a batch of examples
for images: (batch_size, seq_len, height, width, channels) tf.float32
for integers: (batch_size, seq_len) tf.int32
'labels': a batch of labels (batch_size, seq_len) tf.int32
'target': a batch of labels (batch_size, final_seq_len) tf.int32
where final_seq_len = (seq_len*2 - 1) if interleave_targets is
True, otherwise final_seq_len = seq_len
"""
def _convert_dict(example):
# (dims: B:batch, SS:original seqlen, H:height, W:width, C:channels)
is_image = (len(example['example'].shape) == 5)
# Cast the examples into the correct shape and tf datatype.
if is_image:
examples = tf.cast(example['example'], tf.float32) # (B,SS,H,W,C)
if downsample:
examples = tf.map_fn(lambda batch: tf.image.resize(batch, [28, 28]),
examples)
else:
examples = tf.cast(example['example'], tf.int32) # (B, SS)
# Cast the labels into the correct tf datatype.
if use_constant_labels:
labels = tf.ones_like(example['label'], tf.int32)
else:
labels = tf.cast(example['label'], tf.int32) # (B,SS)
seq_len = labels.shape[-1]
# Create the target sequence.
if interleave_targets:
# Alternating labels with zeros, e.g. [label, 0, label, 0, ...].
zeros = tf.zeros_like(labels)
target = tf.stack((labels[..., None], zeros[..., None]), axis=-1)
target = tf.reshape(target, [-1, seq_len * 2])[:, :-1] # (B,SS*2-1)
else:
# Just use the original sequence of labels, e.g. [label, label, ...]
target = labels # (B,SS)
ret_dict = {'examples': examples,
'labels': labels,
'target': target}
return tf.data.Dataset.from_tensors(ret_dict)
return ds.flat_map(_convert_dict)
|
emergent_in_context_learning-main
|
datasets/utils.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generator of Omniglot data sequences."""
import logging
import random
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
IMAGE_SIZE = 105
N_CHARACTER_CLASSES = 1623
N_EXEMPLARS_PER_CLASS = 20
class OmniglotDatasetForSampling:
"""Class for loading Omniglot dataset, used downstream for sampling sequences."""
def __init__(self, omniglot_split, exemplars='single', augment_images=False,
n_to_keep=None):
"""Load Omniglot data into memory.
Args:
omniglot_split: which Omniglot split to load from ('train'/'test'/'all')
exemplars: see _load_omniglot_data below.
augment_images: whether to augment the image classes by also including
image transforms on the original Omniglot images.
n_to_keep: Only keep a subset of the Omniglot split.
"""
# Load the data into memory.
self.data = self._load_omniglot_data(omniglot_split, exemplars)
if n_to_keep:
self.data = {k: self.data[k] for k in range(n_to_keep)}
if augment_images:
self.data = self._apply_image_augmentations(self.data)
self.example_type = 'omniglot'
self.omniglot_split = omniglot_split
def _load_omniglot_data(self, split, exemplars):
"""Load the Omniglot data into memory.
Args:
split: Which Omniglot split to load from ('train'/'test'/'all')
exemplars: How to handle the 20 exemplars per Omniglot class.
'single': Only load the first exemplar from each character class.
'separated': Load all 20 exemplars for each character class, and assign
them each their own unique class label.
'all': Load all 20 exemplars for each character class, keeping all 20
assigned to the same class label (the standard Omniglot problem).
Returns:
data: a dict of entries {label: image}
classes: a sorted list of all labels
"""
if split == 'all':
data_train = self._load_omniglot_data('train', exemplars)
data_test = self._load_omniglot_data('test', exemplars)
data = {**data_train, **data_test}
return data
else:
ds = tfds.load(
'omniglot', split=split, as_supervised=True, shuffle_files=False)
data = {}
def _extract_image(image, label):
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.rgb_to_grayscale(image)
return image, label
for image, label in ds.map(_extract_image):
label = label.numpy().astype(np.int32)
if exemplars == 'single':
# Populate the dictionary of {label: image} entries.
# Only add to the dataset if that class doesn't already exist.
if label not in data:
image = image.numpy()
data[label] = image
else:
# Populate a dictionary of {label: [images]} entries.
# Add all images corresponding to a given label.
if label not in data:
data[label] = []
image = image.numpy()
data[label].append(image)
# If loading exemplars 'separated', re-assign each to a unique label.
if exemplars == 'separated':
data_orig = data
data = {}
for label_orig, images in data_orig.items():
for i, image in enumerate(images):
label_new = label_orig * N_EXEMPLARS_PER_CLASS + i
data[label_new] = image
return data
def _apply_image_augmentations(self, data_orig):
"""Apply transformations to the images to obtain a larger number of classes."""
i = 0
data_augmented = {}
for image in data_orig.values():
for flip_lr in [False, True]:
for rot90 in range(4):
# Apply the transformations.
transformed = image.copy()
if flip_lr:
transformed = tf.image.flip_left_right(transformed)
transformed = tf.image.rot90(transformed, k=rot90)
# Convert back into list in the batch dimension.
if not isinstance(transformed, list):
transformed = [transformed[i] for i in range(transformed.shape[0])]
data_augmented[i] = transformed
i += 1
return data_augmented
class SymbolicDatasetForSampling:
"""Class for loading symbolic (integers) dataset, used downstream for sampling sequences."""
def __init__(self, dataset_size):
"""Load symbolic (integers) data into memory.
Args:
dataset_size: number of integers in the dataset
"""
# Load the data into memory.
self.data = {i: i for i in range(dataset_size)}
self.example_type = 'symbolic'
class SeqGenerator:
"""Generates sequences of 'common', 'rare', or Zipf-distributed classes."""
def __init__(self,
dataset_for_sampling,
n_rare_classes,
n_common_classes,
n_holdout_classes=0,
zipf_exponent=1.,
use_zipf_for_common_rare=False,
noise_scale=0.1,
preserve_ordering_every_n=None,
random_seed=1337):
"""Split classes into 'common' and 'rare'.
Args:
dataset_for_sampling: e.g. OmniglotDatasetForSampling
n_rare_classes: number of rare classes.
n_common_classes: number of common classes.
n_holdout_classes: number of holdout classes
zipf_exponent: exponent on Zipfian distribution that can be defined over
combined rare+common classes.
use_zipf_for_common_rare: if True, common and rare classes will be sampled
according to the Zipfian distribution that is defined over combined
rare+common classes. Otherwise, they will be sampled uniformly.
noise_scale: scale for the Gaussian noise that will be added to each image
preserve_ordering_every_n: [optional] if provided, the ordering will not
be shuffled within every n examples. This is useful with if e.g.
exemplars='separated' or augment_images=True for the Omniglot dataset,
and we would like to ensure that exemplars derived from the same class
do not occur in both train and holdout sets.
random_seed: seed for random generator.
"""
self.example_type = dataset_for_sampling.example_type
self.data = dataset_for_sampling.data
self.classes = sorted(self.data.keys())
n_classes_orig = len(self.classes)
logging.info('Loaded %d classes of type "%s"', n_classes_orig,
self.example_type)
# Determine which classes belongs to the "rare" vs "common" categories.
# Set a fixed randomized ordering for rare vs common assignment, to ensure
# alignment across training and evals.
rng = np.random.default_rng(random_seed)
if preserve_ordering_every_n:
assert n_classes_orig % preserve_ordering_every_n == 0
n_subgroups = int(n_classes_orig / preserve_ordering_every_n)
subgroup_ordering = rng.choice(
range(n_subgroups), size=n_subgroups, replace=False)
class_ordering = np.split(np.arange(n_classes_orig), n_subgroups)
class_ordering = np.array(class_ordering)[subgroup_ordering]
class_ordering = list(class_ordering.reshape(n_classes_orig))
else:
class_ordering = list(
rng.choice(range(n_classes_orig), size=n_classes_orig, replace=False))
self.rare_classes = class_ordering[:n_rare_classes]
self.common_classes = class_ordering[n_rare_classes:(n_rare_classes +
n_common_classes)]
# The "holdout" classes are always taken from the end of the split, so they
# are consistent even if n_rare_classes or n_common_classes change.
holdout_start = len(class_ordering) - n_holdout_classes
self.holdout_classes = class_ordering[holdout_start:]
# Define a Zipfian distribution over rare + common classes.
self.non_holdout_classes = self.rare_classes + self.common_classes
n_non_holdout = len(self.non_holdout_classes)
zipf_weights = np.array(
[1 / j**zipf_exponent for j in range(n_non_holdout, 0, -1)])
zipf_weights /= np.sum(zipf_weights)
self.zipf_weights = zipf_weights
# Save attributes
self.n_rare_classes = n_rare_classes
self.n_common_classes = n_common_classes
self.n_holdout_classes = n_holdout_classes
self.n_classes = n_rare_classes + n_common_classes + n_holdout_classes
self.zipf_exponent = zipf_exponent
self.use_zipf_for_common_rare = use_zipf_for_common_rare
self.noise_scale = noise_scale
logging.info('%d rare classes: %s ...', self.n_rare_classes,
self.rare_classes[:20])
logging.info('%d common classes: %s ...', self.n_common_classes,
self.common_classes[:20])
logging.info('%d holdout classes: %s ...', self.n_holdout_classes,
self.holdout_classes[:20])
logging.info('Zipf exponent: %d', self.zipf_exponent)
logging.info('Use Zipf for common/rare: %s', self.use_zipf_for_common_rare)
logging.info('Noise scale: %d', self.noise_scale)
def _create_noisy_image_seq(self,
classes,
randomly_generate_rare=False):
"""Return a sequence of images for specified classes, with Gaussian noise added.
Args:
classes: a list of the classes, one for each image in the sequence
randomly_generate_rare: if True, we randomly generate images for the rare
classes (the same image for all instances of a class, within a
sequence), rather than using the Omniglot images.
Returns:
A numpy array of images, shape (seq_len,H,W,C)
"""
# TODO(scychan) properly handle non-image data
classes = np.array(classes)
if randomly_generate_rare:
seq_rare_classes = set(classes).intersection(self.rare_classes)
rare_image_dict = {
c: np.random.randint(2, size=(IMAGE_SIZE, IMAGE_SIZE, 1))
for c in seq_rare_classes
}
images = np.array([
rare_image_dict[c] if c in seq_rare_classes else self.data[c]
for c in classes
], dtype='float32')
else:
if isinstance(self.data[classes[0]], list):
# Randomly sample from the exemplars for each class, without replacement
images = np.zeros((len(classes), IMAGE_SIZE, IMAGE_SIZE, 1))
unique_classes = np.unique(classes)
for c in unique_classes:
c_samples = np.random.choice(
len(self.data[c]), size=np.sum(classes == c), replace=False)
images[classes == c] = np.array(self.data[c])[c_samples]
else:
# Just select the single exemplar associated with each class.
images = np.array([self.data[c] for c in classes])
# Add pixel noise to the images.
if self.noise_scale:
noise = np.random.normal(0, self.noise_scale, images.shape)
images += noise.astype(images.dtype)
return images
def get_bursty_seq(self,
seq_len,
shots,
ways,
p_bursty,
p_bursty_common=0.,
p_bursty_zipfian=0.,
non_bursty_type='common_uniform',
labeling_common='ordered',
labeling_rare='unfixed',
randomly_generate_rare=False,
grouped=False):
"""Generate a bursty (or non-bursty) sequence.
With probability p_bursty, the sequence will contain embedded k-shot n-way
few-shot problems.
* Some fraction of these (p_bursty_zipfian) will consist of few-shot
sequences where the examples are drawn from a Zipfian distribution,
instead of from distinct common/rare classes.
* Another fraction of these (p_bursty_common) will consist of few-shot
sequences of common tokens embedded among rare tokens, with the query
being one of those common classes.
* The remaining fraction of these (1 - p_bursty_zipfian - p_bursty_common)
= p_bursty_rare will consist of few-shot sequences of rare tokens
embedded among common tokens, with the query being one of those rare
classes.
E.g. for shots=2, ways=3, seq_len=9, we might have:
a C1 b b C2 a a b (a)
With probability (1-p_bursty), the sequence will contain non-bursty
sequences -- either Zipfian distributed or uniformly selected from the
common classes only.
Args:
seq_len: number of examples in the sequence, including the query. This
should be >= shots*ways + 1.
shots: number of shots, for the few-shot sequences.
ways: number of ways, for the few-shot sequences.
p_bursty: probability of a sequence containing a few-shot problem.
p_bursty_common: fraction of the bursty sequences that are few-shot common
problems embedded among rare classes (vs. few-shot rare pr oblems
embedded among common classes)
p_bursty_zipfian: fraction of bursty sequences that are generated from a
Zipfian distribution, rather than based on distinct "common" and "rare"
classes. A common use case is to have p_bursty=1, p_bursty_common=0, and
p_bursty_zipfian=1 -- in this case there is no distinction between
common and rare, and all sequences are just few-shot sequences with
examples drawn from Zipfian distributions. (`labeling_rare` will be used
for these sequences)
non_bursty_type: options for the non-bursty sequences: 'zipfian': Drawn
from the full Zipfian distribution. 'common_uniform': Drawn uniformly
from common classes. 'common_no_support': No-support seqs from common
classes.
labeling_common: how to select the example labels for the common classes
'ordered': [n_rare_classes:n_classes] (default) or
[n_rare_classes*X:n_rare_classes*X + n_common_classes] if labeling_rare
== 'ordered_polysemyX' 'original': use the original Omniglot class
labels
labeling_rare: how to select the labels for the rare classes
'ordered_polysemyX': each example is randomly assigned to one of X
labels, with X an integer. The labels don't overlap across examples.
[0:X] for the 1st example, [X:2X] for the 2nd example, etc. 'unfixed':
randomly assign to [0:n_rare_classes] 'ordered': [0:n_rare_classes]
'original': use the original Omniglot class labels
randomly_generate_rare: if True, we randomly generate images for the rare
classes (the same image for all instances of a class, within a
sequence), rather than using the Omniglot images.
grouped: Whether the fewshot sequences (embedded among the remainder) are
grouped (see get_fewshot_seqs). Note that the remainder can still be
distribute anywhere, including within the groups.
Yields:
A single bursty (or non-bursty) sequence of examples and labels.
"""
# Process the inputs
labeling_common = _bytes2str(labeling_common)
labeling_rare = _bytes2str(labeling_rare)
non_bursty_type = _bytes2str(non_bursty_type)
p_bursty_rare = 1 - p_bursty_zipfian - p_bursty_common
if seq_len < shots * ways + 1:
raise ValueError('seq_len must be >= shots * ways + 1')
generate_remainders = (seq_len > shots * ways + 1)
if 'ordered_polysemy' in labeling_rare:
polysemy_factor = int(labeling_rare.split('ordered_polysemy')[1])
common_start_idx = self.n_rare_classes * polysemy_factor
labeling_common = f'ordered{common_start_idx}'
labeling_rare = f'ordered0_polysemy{polysemy_factor}'
# Initialize bursty and non-bursty generators.
if p_bursty < 1:
if non_bursty_type == 'zipfian':
# Non-bursty sequences are Zipfian distributed.
supervised_generator = self.get_random_seq(
class_type='zipfian',
seq_len=seq_len,
labeling=labeling_common,
randomly_generate_rare=randomly_generate_rare)
elif non_bursty_type == 'common_uniform':
# Non-bursty sequences are uniformly selected from common classes.
supervised_generator = self.get_random_seq(
class_type='common',
seq_len=seq_len,
labeling=labeling_common,
randomly_generate_rare=randomly_generate_rare)
elif non_bursty_type == 'common_no_support':
# Non-bursty sequences are no-support sequences of common classes.
supervised_generator = self.get_no_support_seq(
class_type='common',
seq_len=seq_len,
all_unique=False,
labeling=labeling_common,
randomly_generate_rare=randomly_generate_rare)
else:
raise ValueError(f'Invalid non_bursty_type {non_bursty_type}')
if p_bursty_rare:
bursty_rare_generator = self.get_fewshot_seq(
class_type='rare',
shots=shots,
ways=ways,
labeling=labeling_rare,
randomly_generate_rare=randomly_generate_rare,
grouped=grouped)
if generate_remainders:
common_remainder_generator = self.get_random_seq(
class_type='common',
seq_len=seq_len - shots*ways - 1,
labeling=labeling_common,
randomly_generate_rare=randomly_generate_rare)
if p_bursty_common:
bursty_common_generator = self.get_fewshot_seq(
class_type='common',
shots=shots,
ways=ways,
labeling=labeling_common,
randomly_generate_rare=randomly_generate_rare,
grouped=grouped)
if generate_remainders:
rare_remainder_generator = self.get_random_seq(
class_type='rare',
seq_len=seq_len - shots*ways - 1,
labeling=labeling_rare,
randomly_generate_rare=randomly_generate_rare)
if p_bursty_zipfian:
bursty_zipfian_generator = self.get_fewshot_seq(
class_type='zipfian',
shots=shots,
ways=ways,
labeling=labeling_rare,
randomly_generate_rare=randomly_generate_rare,
grouped=grouped)
if generate_remainders:
zipfian_remainder_generator = self.get_random_seq(
class_type='zipfian',
seq_len=seq_len - shots*ways - 1,
labeling=labeling_rare,
randomly_generate_rare=randomly_generate_rare)
while True:
# Determine whether this will be a bursty or non-bursty.
generate_bursty = (random.uniform(0, 1) < p_bursty)
# Generate common-only sequence, if required.
if not generate_bursty:
record = next(supervised_generator)
# Generate bursty sequence, if required.
else:
# Determine what type of bursty sequence this will be.
bursty_determiner = random.uniform(0, 1)
if bursty_determiner < p_bursty_zipfian:
# zipfian
bursty_record = next(bursty_zipfian_generator)
if generate_remainders:
remainder_record = next(zipfian_remainder_generator)
elif bursty_determiner < p_bursty_common + p_bursty_zipfian:
# common
bursty_record = next(bursty_common_generator)
if generate_remainders:
remainder_record = next(rare_remainder_generator)
else:
# rare
bursty_record = next(bursty_rare_generator)
if generate_remainders:
remainder_record = next(common_remainder_generator)
# Combine them together.
if generate_remainders:
seq_examples = np.concatenate(
(remainder_record['example'], bursty_record['example']))
seq_labels = np.concatenate(
(remainder_record['label'], bursty_record['label']))
is_rare = np.concatenate(
(remainder_record['is_rare'], bursty_record['is_rare']))
else:
seq_examples = bursty_record['example']
seq_labels = bursty_record['label']
is_rare = bursty_record['is_rare']
# Shuffle ordering for all but the last.
ordering = np.arange(seq_len - 1)
np.random.shuffle(ordering)
is_rare[:-1] = is_rare[ordering]
seq_labels[:-1] = seq_labels[ordering]
seq_examples[:-1] = seq_examples[ordering]
record = {
'example': seq_examples,
'label': seq_labels,
'is_rare': is_rare,
}
yield record
def get_no_support_seq(self,
class_type,
seq_len,
all_unique=True,
labeling='ordered',
randomly_generate_rare=False):
"""Generate a sequence whose support contains no examples of the query class.
Args:
class_type: The classes we can sample from ('rare', 'common', 'holdout').
seq_len: Sequence length.
all_unique: if True, we generate sequences of all-unique classes.
Otherwise, the query is first sampled from the distribution
corresponding to the class_type, and then the support is sampled from
the remainder of the distribution (with replacement).
labeling: how to select the labels
'ordered': [0:n_rare_classes] for the rare examples, and
[n_rare_classes:n_classes] for the common examples
'original': use the original Omniglot class labels
randomly_generate_rare: if True, we randomly generate images for the rare
classes (the same image for all instances of a class, within a
sequence), rather than using the Omniglot images.
Yields:
A single sequence of examples and labels.
"""
class_type = _bytes2str(class_type)
labeling = _bytes2str(labeling)
# All-unique generator:
if all_unique:
all_unique_generator = self.get_random_seq(
class_type=class_type,
seq_len=seq_len,
labeling=labeling,
randomly_generate_rare=randomly_generate_rare,
all_unique=True)
while True:
record = next(all_unique_generator)
yield record
# Generator that first samples query, then support:
while True:
seq_labels = np.zeros(shape=(seq_len), dtype=np.int32)
if self.example_type == 'omniglot':
seq_examples = np.zeros(
shape=(seq_len, IMAGE_SIZE, IMAGE_SIZE, 1), dtype=np.float32)
elif self.example_type == 'symbolic':
seq_examples = np.zeros(shape=(seq_len,), dtype=np.float32)
# Determine which classes we can sample from, and create is_rare sequence.
classes_to_sample, class_weights = self._get_classes_to_sample(class_type)
is_rare = self._get_is_rare_seq(class_type, seq_len)
# Select the query class.
query_class_idx = np.random.choice(
range(len(classes_to_sample)), size=1, p=class_weights)
# Select the support classes.
remaining_class_idx = np.delete(
range(len(classes_to_sample)), query_class_idx)
remaining_class_weights = np.delete(class_weights, query_class_idx)
remaining_class_weights /= np.sum(remaining_class_weights)
support_class_idx = np.random.choice(
remaining_class_idx,
size=seq_len - 1,
replace=True,
p=remaining_class_weights)
np.random.shuffle(support_class_idx)
# Populate the sequence images (with noise).
seq_class_idx = np.concatenate([support_class_idx, query_class_idx])
seq_classes = [classes_to_sample[i] for i in seq_class_idx]
seq_examples[:] = self._create_noisy_image_seq(
seq_classes, randomly_generate_rare=randomly_generate_rare)
# Populate the sequence labels.
if labeling == 'original':
seq_labels[:] = seq_classes
elif labeling == 'ordered':
seq_labels[:] = seq_class_idx
if class_type == 'common':
seq_labels += self.n_rare_classes
elif class_type == 'holdout':
seq_labels += self.n_rare_classes + self.n_common_classes
elif 'ordered' in labeling: # 'orderedK'
seq_labels[:] = seq_class_idx
label_start = int(labeling.split('ordered')[1])
seq_labels += label_start
else:
return ValueError(f'Invalid value for labeling: {labeling}')
record = {
'example': seq_examples,
'label': seq_labels,
'is_rare': is_rare,
}
yield record
def get_random_seq(self,
class_type,
seq_len,
labeling='ordered',
randomly_generate_rare=False,
all_unique=False):
"""Generate a random sequence of examples.
Args:
class_type: The classes we can sample from ('rare', 'common', 'holdout',
or 'zipfian').
seq_len: Sequence length.
labeling: how to select the labels
'original': use the original Omniglot class labels
'ordered': [0:n_rare_classes] for the rare examples, and
[n_rare_classes:n_classes] for the common examples
'orderedK': labeled in order [X:n_classes], starting from integer K
randomly_generate_rare: if True, we randomly generate images for the rare
classes (the same image for all instances of a class, within a
sequence), rather than using the Omniglot images.
all_unique: whether all the examples in a sequence must be unique.
Yields:
A single sequence of examples and labels.
"""
class_type = _bytes2str(class_type)
labeling = _bytes2str(labeling)
while True:
seq_labels = np.zeros(shape=(seq_len), dtype=np.int32)
if self.example_type == 'omniglot':
seq_examples = np.zeros(
shape=(seq_len, IMAGE_SIZE, IMAGE_SIZE, 1), dtype=np.float32)
elif self.example_type == 'symbolic':
seq_examples = np.zeros(shape=(seq_len,), dtype=np.float32)
# Determine which classes we can sample from, and create is_rare sequence.
classes_to_sample, class_weights = self._get_classes_to_sample(class_type)
is_rare = self._get_is_rare_seq(class_type, seq_len)
# Select the query and support classes.
# (positions 0:seq_len-1 are the support; the last position is the query)
seq_class_idx = np.random.choice(
range(len(classes_to_sample)),
size=seq_len,
replace=(not all_unique),
p=class_weights)
np.random.shuffle(seq_class_idx)
# Populate the sequence images (with noise).
seq_classes = [classes_to_sample[i] for i in seq_class_idx]
seq_examples[:] = self._create_noisy_image_seq(
seq_classes, randomly_generate_rare=randomly_generate_rare)
# Populate the sequence labels.
if labeling == 'original':
seq_labels[:] = seq_classes
elif labeling == 'ordered':
seq_labels[:] = seq_class_idx
if class_type == 'common':
seq_labels += self.n_rare_classes
elif class_type == 'holdout':
seq_labels += self.n_rare_classes + self.n_common_classes
elif 'ordered' in labeling and 'polysemy' not in labeling: # 'orderedK'
seq_labels[:] = seq_class_idx
label_start = int(labeling.split('ordered')[1])
seq_labels += label_start
elif 'polysemy' in labeling: # 'orderedK_polysemyX'
label_start = int(labeling.split('ordered')[1].split('_')[0])
polysemy_factor = int(labeling.split('polysemy')[1])
seq_labels[:] = seq_class_idx * polysemy_factor + label_start
seq_labels[:] += random.choices(range(0, polysemy_factor), k=seq_len)
else:
return ValueError(f'Invalid value for labeling: {labeling}')
record = {
'example': seq_examples,
'label': seq_labels,
'is_rare': is_rare,
}
yield record
def get_fewshot_seq(self,
class_type,
shots,
ways,
labeling='unfixed',
randomly_generate_rare=False,
grouped=False):
"""Generate a sequence whose support is a few-shot training sequence for the query class.
Args:
class_type: The classes we can sample from ('rare', 'common', 'holdout',
or 'zipfian').
shots: Number of shots (number of examples per class, in the support).
ways: Number of ways (number of possible classes, per sequence).
labeling: How labels are selected.
'orderedK_polysemyX': each example is randomly assigned to one of X
labels, with X an integer. The labels don't overlap across examples.
The labels start with integer K.
I.e. [K:K+X] for 1st example, [K+X:K+2X] for 2nd, etc.
'unfixed': classes are randomly assigned to 0:ways
'ordered': [0:n_rare_classes] for the rare examples, and
[n_rare_classes:n_classes] for the common examples
'original': use the original Omniglot class labels
randomly_generate_rare: if True, we randomly generate images for the rare
classes (the same image for all instances of a class, within a
sequence), rather than using the Omniglot images.
grouped: If True, the examples in the support are grouped, such that every
k examples contains all k classes. E.g. for 2-shot 2-ways (k=2), we
could have sequences ABAB, BABA, ABBA, or BAAB.
Yields:
A single sequence of examples and labels.
"""
class_type = _bytes2str(class_type)
labeling = _bytes2str(labeling)
seq_len = shots * ways + 1
while True:
seq_labels = np.zeros(shape=(seq_len), dtype=np.int32)
if self.example_type == 'omniglot':
seq_examples = np.zeros(
shape=(seq_len, IMAGE_SIZE, IMAGE_SIZE, 1), dtype=np.float32)
elif self.example_type == 'symbolic':
seq_examples = np.zeros(shape=(seq_len,), dtype=np.float32)
# Determine which classes we can sample from, and create is_rare sequence.
classes_to_sample, class_weights = self._get_classes_to_sample(class_type)
is_rare = self._get_is_rare_seq(class_type, seq_len)
# Select n classes for the sequence.
# "class" refers to the key for an example in self.data.
# "label" refers to the label that a model will be expected to predict.
if 'polysemy' in labeling: # orderedK_polysemyX
label_start = int(labeling.split('ordered')[1].split('_')[0])
polysemy_factor = int(labeling.split('polysemy')[1])
class_options_idx = np.random.choice(
range(len(classes_to_sample)),
size=ways,
replace=True,
p=class_weights)
class_options = [classes_to_sample[i] for i in class_options_idx]
label_options = np.array(class_options_idx) * polysemy_factor
label_options += random.choices(range(0, polysemy_factor), k=ways)
label_options += label_start
label_options = list(label_options)
elif labeling == 'unfixed':
label_options = list(range(ways))
class_options = list(np.random.choice(
classes_to_sample, size=ways, replace=True, p=class_weights))
elif labeling == 'ordered':
class_options_idx = np.random.choice(
range(len(classes_to_sample)),
size=ways,
replace=True,
p=class_weights)
class_options = [classes_to_sample[i] for i in class_options_idx]
label_options = class_options_idx.tolist()
if class_type == 'common':
label_options = [l + self.n_rare_classes for l in label_options]
elif class_type == 'holdout':
label_options = [
l + self.n_classes - self.n_holdout_classes for l in label_options
]
elif labeling == 'original':
label_options = list(np.random.choice(
classes_to_sample, size=ways, replace=True, p=class_weights))
class_options = label_options
else:
raise ValueError('Invalid value for labeling: %s' % labeling)
# Select one class for the query.
query_idx = random.choice(range(ways))
query_label = label_options[query_idx]
query_class = class_options[query_idx]
# Get the labels and examples for the few-shot sequence.
seq_labels[:] = label_options * shots + [query_label]
seq_classes = class_options * shots + [query_class]
seq_examples = self._create_noisy_image_seq(
seq_classes, randomly_generate_rare=randomly_generate_rare)
# Shuffle ordering.
ordering = np.arange(seq_len - 1)
if grouped:
for i in range(shots):
np.random.shuffle(ordering[i * ways:(i + 1) * ways])
else:
np.random.shuffle(ordering)
is_rare[:-1] = is_rare[ordering]
seq_labels[:-1] = seq_labels[ordering]
seq_examples[:-1] = seq_examples[ordering]
record = {
'example': seq_examples,
'label': seq_labels,
'is_rare': is_rare,
}
yield record
def get_mixed_seq(self, shots, ways, p_fewshot):
"""Generate either a few-shot or supervised sequence.
* Few-shot sequences consist of rare classes only, with labels randomly
assigned [0:ways].
* Supervised sequences consist of common classes only, with labels fixed
in the range [n_rare_classes:total_n_classes].
NB: the labels [ways:n_rare_classes] may be unused.
Args:
shots: Number of shots (number of examples per class, in the support).
ways: Number of ways (number of possible classes, per sequence).
p_fewshot: Probability of a sequence being few-shot rare (vs supervised
common).
Yields:
A single sequence of examples and labels.
"""
# Initialize generators for no-support-common and few-shot-rare.
supervised_generator = self.get_random_seq(
class_type='common',
seq_len=(shots * ways + 1),
labeling='ordered',
randomly_generate_rare=False,
all_unique=False)
fewshot_generator = self.get_fewshot_seq(
class_type='rare',
shots=shots,
ways=ways,
randomly_generate_rare=False)
# Randomly yield from each generator, according to the proportion
while True:
generate_fewshot = (random.uniform(0, 1) < p_fewshot)
if generate_fewshot:
record = next(fewshot_generator)
else:
record = next(supervised_generator)
yield record
def _get_classes_to_sample(self, class_type):
"""Given a class type, returns a list of classes and their weights."""
if class_type == 'rare':
classes_to_sample = self.rare_classes
elif class_type == 'common':
classes_to_sample = self.common_classes
elif class_type == 'holdout':
classes_to_sample = self.holdout_classes
elif class_type == 'zipfian':
classes_to_sample = self.non_holdout_classes
else:
raise ValueError(f'Invalid value for class_type: {class_type}')
if class_type == 'zipfian':
class_weights = self.zipf_weights
elif self.use_zipf_for_common_rare and class_type in ['common', 'rare']:
if class_type == 'common':
class_weights = self.zipf_weights[self.n_rare_classes:]
elif class_type == 'rare':
class_weights = self.zipf_weights[:self.n_rare_classes]
class_weights /= np.sum(class_weights)
else:
n_classes = len(classes_to_sample)
class_weights = np.full(n_classes, 1 / n_classes)
return classes_to_sample, class_weights
def _get_is_rare_seq(self, class_type, seq_len):
if class_type == 'rare':
is_rare = np.ones(seq_len, dtype=np.int32)
elif class_type in ('common', 'holdout', 'zipfian'):
is_rare = np.zeros(seq_len, dtype=np.int32)
else:
raise ValueError(f'Invalid value for class_type: {class_type}')
return is_rare
def _bytes2str(x):
"""Convert bytes to str, if needed."""
if isinstance(x, bytes):
x = x.decode('utf-8')
return x
|
emergent_in_context_learning-main
|
datasets/data_generators.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
emergent_in_context_learning-main
|
experiment/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer experiment for Omniglot Sequences datasets."""
import collections
import datetime
import functools
import math
import os
import signal
import threading
from absl import app
from absl import flags
from absl import logging
import dill
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import platform
from jaxline import utils
import numpy as np
import optax
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from emergent_in_context_learning.datasets import data_generators
from emergent_in_context_learning.datasets import utils as dataset_utils
from emergent_in_context_learning.modules import losses
from emergent_in_context_learning.modules.embedding import InputEmbedder
from emergent_in_context_learning.modules.rnn import RNN
from emergent_in_context_learning.modules.transformer import Transformer
AUTOTUNE = tf.data.experimental.AUTOTUNE
FLAGS = flags.FLAGS
class Experiment(experiment.AbstractExperiment):
"""Omniglot sequences transformer experiment."""
# Holds a map from object properties that will be checkpointed to their name
# within a checkpoint. Currently it is assumed that these are all sharded
# device arrays.
CHECKPOINT_ATTRS = {
'_params': 'params',
'_state': 'state',
'_opt_state': 'opt_state',
}
def __init__(self, mode, init_rng, config):
"""Initializes experiment."""
super(Experiment, self).__init__(mode=mode, init_rng=init_rng)
self.mode = mode
self.init_rng = init_rng
self.config = config
# Determine what kinds of sequences we'll train/eval on.
if self.mode == 'train':
self.seq_type = self.config.data.train_seqs
else:
self.seq_type = self.mode.replace('eval_', '')
# Determine kinds of data the sequences will be composed of.
self.example_type = self.config.data.example_type
if self.example_type == 'omniglot':
dataset_for_sampling = data_generators.OmniglotDatasetForSampling(
**self.config.data.omniglot_config)
elif self.example_type == 'symbolic':
dataset_for_sampling = data_generators.SymbolicDatasetForSampling(
**self.config.data.symbolic_config)
else:
raise ValueError('Invalid value for self.example_type: %s' %
self.example_type)
self.data_generator_factory = data_generators.SeqGenerator(
dataset_for_sampling,
**self.config.data.generator_config,
)
sub_configs = self._get_sub_configs()
self.embed_config, self.seq_config, self.model_config = sub_configs
self.forward = hk.transform_with_state(self._forward_fn)
if self.mode == 'train':
init_batch = next(self._build_train_input())
init_examples = init_batch['examples'] # (D,B,SS,H,W,C) for images
# (D,B,SS) for symbols
init_labels = init_batch['labels'] # (D,B,SS)
p_init = jax.pmap(functools.partial(self.forward.init, is_training=True))
init_mask = None
init_rng = utils.bcast_local_devices(self.init_rng)
self._params, self._state = p_init(init_rng, init_examples, init_labels,
init_mask)
self._train_input = utils.py_prefetch(self._build_train_input)
self._train_input = utils.double_buffer_on_gpu(self._train_input)
self._opt_init, _ = self.optimizer(self.config.training.learning_rate)
self._opt_state = jax.pmap(self._opt_init)(self._params)
self._update_func = jax.pmap(self._update_func, axis_name='i')
else:
# Needed for checkpoint restore.
self._params = None
self._state = None
self._opt_state = None
# JIT the evaluation function for the single-device case.
# (In the training case above, pmap compiles the function to XLA so jit is
# not needed.)
self._eval_batch = jax.jit(self._eval_batch)
def _get_sub_configs(self):
"""Get embed_config, seq_config, and model_config."""
# Initialize embed config.
embed_config = self.config.embedding
# Get sequence config.
seq_config = self.config.data.seq_config
if ('fewshot' in self.seq_type) or (self.seq_type == 'mixed'):
seq_config.seq_len = seq_config.fs_shots * seq_config.ways + 1
# Initialize model config.
if self.config.seq_model == 'transformer':
model_config = self.config.transformer
elif self.config.seq_model in ['lstm', 'vanilla_rnn']:
model_config = self.config.rnn
else:
raise ValueError('Invalid value for config.seq_model: %s' %
self.config.seq_model)
# Set num_classes, based on the data config.
if 'ordered_polysemy' in seq_config.labeling_rare:
polysemy_factor = int(
seq_config.labeling_rare.split('ordered_polysemy')[1])
num_classes = (
polysemy_factor * self.data_generator_factory.n_rare_classes +
self.data_generator_factory.n_common_classes)
else:
num_classes = self.data_generator_factory.n_classes
embed_config.num_classes = num_classes
model_config.num_classes = num_classes
return embed_config, seq_config, model_config
def _forward_fn(self, examples, labels, mask, is_training):
embedder = InputEmbedder(**self.embed_config)
seq_model = self.config.seq_model
if seq_model == 'transformer':
model = Transformer(embedder, **self.model_config)
elif seq_model in ['lstm', 'vanilla_rnn']:
model = RNN(embedder, seq_model, **self.model_config)
else:
raise ValueError('Invalid config.seq_model: %s' % seq_model)
return model(examples, labels, mask, is_training=is_training)
def optimizer(self, learning_rate):
optimizer = getattr(optax, self.config.optimizer.name)
return optimizer(learning_rate, **self.config.optimizer.kwargs)
def _linear_warmup_and_sqrt_decay(self, global_step):
"""Linear warmup and then an inverse square root decay of learning rate."""
max_lr = self.config.optimizer['max_lr']
warmup_steps = int(self.config.optimizer['warmup_steps'])
linear_ratio = max_lr / warmup_steps
decay_ratio = jnp.power(warmup_steps * 1.0, 0.5) * max_lr
return jnp.min(jnp.array([
linear_ratio * global_step, decay_ratio * jnp.power(global_step, -0.5)
]))
def _compute_loss_and_accuracy(self, logits, labels):
"""Computes cross entropy loss and accuracy for given logits and labels.
The loss and accuracy are also computed separately for the interim
predictions, i.e. for the support examples, (loss_interim, accuracy_interim)
and for the final query predictions (loss_query, accuracy_query).
Args:
logits: A tensor of shape [batch_size, seq_len, n_classes].
labels: A tensor of shape [batch_size, seq_len]
Returns:
A dict with entries {'scalar_name': scalar_value} where the scalar metrics
are aggregated across batches.
"""
# Compute softmax cross entropy.
labels_one_hot = hk.one_hot(labels, self.model_config.num_classes)
losses_all = losses.softmax_cross_entropy(
logits, labels_one_hot, reduction=None)
# Compute support and query masks.
w_interim = self.config.training.w_interim_predictions
n_interim = int((labels.shape[-1] - 1)/ 2)
interim_mask = jnp.full_like(losses_all, False).at[:, :-1:2].set(True)
query_mask = jnp.full_like(losses_all, False).at[:, -1].set(True)
# Compute weighted loss mask.
if w_interim:
# Loss weighting on both interim and query predictions.
# e.g. a seq with 2 support examples: weights are [w/2, 0, w/2, 0, (1-w)]
if self.embed_config.concatenate_labels:
raise NotImplementedError # below assumes interleaved examples & labels
loss_weightings = jnp.full_like(losses_all, 0.)
loss_weightings += interim_mask * w_interim / n_interim
loss_weightings += query_mask * (1 - w_interim)
else:
# Loss computed only for query predictions.
# e.g. for a seq w/ 2 support examples, weights are [0, 0, 0, 0, 1]
loss_weightings = query_mask
def _apply_masks(values):
values_query = jnp.sum(query_mask * values) / jnp.sum(query_mask)
if w_interim:
values_interim = jnp.sum(interim_mask * values) / jnp.sum(interim_mask)
else:
values_interim = 0.
return values_query, values_interim
# Compute loss numbers.
losses_weighted = losses_all * loss_weightings
loss = jnp.sum(losses_weighted) / jnp.sum(loss_weightings)
loss_query, loss_interim = _apply_masks(losses_weighted)
# Compute accuracy numbers.
predicted_labels = jnp.argmax(logits, axis=-1)
if ('eval' in self.mode and 'no_support' in self.seq_type and
'polysemy' in self.config.data.seq_config.labeling_rare):
labeling_rare = self.config.data.seq_config.labeling_rare
assert self.config.data.train_seqs == 'bursty'
assert 'ordered_polysemy' in labeling_rare
polysemy_factor = int(labeling_rare.split('ordered_polysemy')[1])
if self.seq_type in ['no_support_rare', 'no_support_zipfian']:
# Compare predictions with all possible polysemous labels.
labels_start_vals = labels // polysemy_factor * polysemy_factor
correct = jnp.zeros_like(labels).astype(jnp.float32)
for i in range(polysemy_factor):
correct += jnp.equal(predicted_labels, labels_start_vals + i)
elif self.seq_type == 'no_support_common':
# Labels should be shifted to account for extra 'rare' labels.
n_rare_classes = self.data_generator_factory.n_rare_classes
common_start_idx = n_rare_classes * polysemy_factor
labels += common_start_idx - n_rare_classes
correct = jnp.equal(predicted_labels, labels).astype(jnp.float32)
else:
raise NotImplementedError
else:
# Regular accuracy computation.
correct = jnp.equal(predicted_labels, labels).astype(jnp.float32)
accuracy_query, accuracy_interim = _apply_masks(correct)
# Determine the common and rare labels.
if self.config.data.train_seqs != 'bursty':
# Below assumes training on bursty seqs
raise NotImplementedError
labeling_common = self.seq_config.labeling_common
labeling_rare = self.seq_config.labeling_rare
n_rare_classes = self.data_generator_factory.n_rare_classes
n_holdout_classes = self.data_generator_factory.n_holdout_classes
n_classes = self.data_generator_factory.n_classes
if 'polysemy' in labeling_rare:
polysemy_factor = int(labeling_rare.split('ordered_polysemy')[1])
# Common classes.
if labeling_common == 'ordered':
if 'polysemy' in labeling_rare:
common_start_idx = n_rare_classes * polysemy_factor
else:
common_start_idx = n_rare_classes
common_labels = range(common_start_idx, n_classes - n_holdout_classes)
elif labeling_common == 'original':
common_labels = self.data_generator_factory.common_classes
else:
raise NotImplementedError
# Rare classes.
if 'polysemy' in labeling_rare:
rare_labels = range(n_rare_classes * polysemy_factor)
elif labeling_rare in ['unfixed', 'ordered']:
rare_labels = range(n_rare_classes)
elif labeling_common == 'original':
rare_labels = self.data_generator_factory.rare_classes
else:
raise NotImplementedError
# Compute closed-class accuracy, for certain sequence types.
# (only consider logits for the relevant classes)
if ('bursty' in self.seq_type or 'fewshot' in self.seq_type or
'no_support' in self.seq_type):
if 'bursty' in self.seq_type:
valid_labels = range(self.seq_config.ways)
if 'fewshot' in self.seq_type:
valid_labels = range(self.seq_config.ways)
elif self.seq_type == 'no_support_common':
valid_labels = common_labels
elif self.seq_type == 'no_support_rare':
valid_labels = rare_labels
elif self.seq_type == 'no_support_zipfian':
valid_labels = list(common_labels) + list(rare_labels)
valid_labels = jnp.array(valid_labels)
logits_closed = jnp.full_like(logits, -jnp.inf)
logits_closed = (
logits_closed.at[:, :, valid_labels].set(logits[:, :, valid_labels]))
predicted_labels_closed = jnp.argmax(logits_closed, axis=-1)
correct_closed = jnp.equal(predicted_labels_closed, labels)
accuracy_closed, _ = _apply_masks(correct_closed.astype(jnp.float32))
else:
accuracy_closed = 0.
# Compute whether query predictions were from common or rare classes.
from_common_all = jnp.isin(predicted_labels, jnp.array(common_labels))
from_rare_all = jnp.isin(predicted_labels, jnp.array(rare_labels))
from_common, _ = _apply_masks(from_common_all) # average for query only
from_rare, _ = _apply_masks(from_rare_all) # average for query only
# Compute whether query predictions were from the fewshot classes.
fewshot_ways = self.seq_config.ways
from_fewshot_all = jnp.isin(predicted_labels, jnp.arange(fewshot_ways))
from_fewshot, _ = _apply_masks(from_fewshot_all) # for query only
# Compute whether query predictions were from labels in the support.
# (Use reshaping trick to take advantage of Numpy's outer operations.)
support_labels = labels[:, :-2:2]
batch_size, seq_len = predicted_labels.shape
support_len = support_labels.shape[1]
predicted_labels_reshaped = predicted_labels.reshape(batch_size, seq_len, 1)
support_labels_reshaped = support_labels.reshape(batch_size, 1, support_len)
from_support_all = (predicted_labels_reshaped == support_labels_reshaped)
from_support_all = from_support_all.sum(-1).astype(bool)
from_support, _ = _apply_masks(from_support_all) # avg for query only
from_support_common, _ = _apply_masks(from_support_all * from_common_all)
from_support_rare, _ = _apply_masks(from_support_all * from_rare_all)
from_support_fewshot, _ = _apply_masks(from_support_all * from_fewshot_all)
return {
'loss': loss,
'loss_query': loss_query,
'loss_interim': loss_interim,
'accuracy_query': accuracy_query,
'accuracy_interim': accuracy_interim,
'accuracy_closed': accuracy_closed,
'from_common': from_common,
'from_rare': from_rare,
'from_fewshot': from_fewshot,
'from_support': from_support,
'from_support_common': from_support_common,
'from_support_rare': from_support_rare,
'from_support_fewshot': from_support_fewshot,
}
def _get_ds_seqs(self):
"""Build a TF dataset of sequences for desired sequence type."""
# Get sequence generator and corresponding config arguments.
cfg = self.seq_config
if self.seq_type == 'bursty':
seq_generator = self.data_generator_factory.get_bursty_seq
generator_args = (cfg.seq_len, cfg.bursty_shots, cfg.ways, cfg.p_bursty,
cfg.p_bursty_common, cfg.p_bursty_zipfian,
cfg.non_bursty_type, cfg.labeling_common,
cfg.labeling_rare, cfg.randomly_generate_rare,
cfg.grouped)
elif self.seq_type == 'no_support_common':
seq_generator = self.data_generator_factory.get_no_support_seq
all_unique = False
generator_args = ('common', cfg.seq_len, all_unique, cfg.labeling_common,
cfg.randomly_generate_rare)
elif self.seq_type == 'no_support_rare':
seq_generator = self.data_generator_factory.get_no_support_seq
all_unique = False
generator_args = ('rare', cfg.seq_len, all_unique, cfg.labeling_common,
cfg.randomly_generate_rare)
elif self.seq_type == 'no_support_zipfian':
seq_generator = self.data_generator_factory.get_no_support_seq
all_unique = False
generator_args = ('zipfian', cfg.seq_len, all_unique, cfg.labeling_common,
cfg.randomly_generate_rare)
elif self.seq_type == 'fewshot_rare':
seq_generator = self.data_generator_factory.get_fewshot_seq
generator_args = ('rare', cfg.fs_shots, cfg.ways, 'unfixed',
cfg.randomly_generate_rare, cfg.grouped)
elif self.seq_type == 'fewshot_common':
seq_generator = self.data_generator_factory.get_fewshot_seq
generator_args = ('common', cfg.fs_shots, cfg.ways, 'unfixed', False,
cfg.grouped)
elif self.seq_type == 'fewshot_zipfian':
seq_generator = self.data_generator_factory.get_fewshot_seq
generator_args = ('zipfian', cfg.fs_shots, cfg.ways, 'unfixed',
cfg.randomly_generate_rare, cfg.grouped)
elif self.seq_type == 'fewshot_holdout':
seq_generator = self.data_generator_factory.get_fewshot_seq
generator_args = ('holdout', cfg.fs_shots, cfg.ways, 'unfixed',
cfg.randomly_generate_rare, cfg.grouped)
elif self.seq_type == 'mixed':
seq_generator = self.data_generator_factory.get_mixed_seq
generator_args = (cfg.fs_shots, cfg.ways, cfg.p_fewshot)
else:
raise ValueError('Invalid seq_type: %s' % self.seq_type)
# Set the correct example shape and dtype.
if self.example_type == 'omniglot':
example_shape = (cfg.seq_len, 105, 105, 1)
example_dtype = tf.dtypes.float32
elif self.example_type == 'symbolic':
example_shape = (cfg.seq_len,)
example_dtype = tf.dtypes.int32
else:
raise ValueError('Invalid self.example_type: %s' % self.example_type)
# Build the TF dataset from the generator.
ds_seqs = tf.data.Dataset.from_generator(
seq_generator,
args=generator_args,
output_signature={
'example':
tf.TensorSpec(
shape=example_shape, dtype=example_dtype),
'label':
tf.TensorSpec(shape=(cfg.seq_len,), dtype=tf.dtypes.int32),
'is_rare':
tf.TensorSpec(shape=(cfg.seq_len,), dtype=tf.dtypes.int32)
})
return ds_seqs
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, writer, **unused_args):
"""See base class."""
batch = next(self._train_input)
(self._params, self._state, self._opt_state, scalars, logits, labels) = (
self._update_func(self._params, self._state, self._opt_state,
global_step, batch, rng))
# Log logits, labels, example for last prediction in the first sequence.
logits_to_log = logits[0][0][-1]
scalars = utils.get_first(scalars)
scalars.update({
'prediction': np.argmax(logits_to_log),
'label': labels[0][0][-1]
})
if self.example_type == 'symbolic':
scalars.update({'example': batch['examples'][0][0][-1]})
return scalars
def _build_train_input(self):
"""See base class."""
num_devices = jax.device_count()
global_batch_size = self.config.training.batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
# Build TF dataset of sequences for desired sequence type.
ds_seqs = self._get_ds_seqs()
# Batch and prepare data for transformer.
shuffle_buffer_size = 100
ds = ds_seqs.batch(per_device_batch_size)
ds = dataset_utils.prepare_seqs_for_transformer(
ds,
use_constant_labels=False,
interleave_targets=(not self.embed_config.concatenate_labels),
downsample=self.config.preproc.downsample,
)
ds = ds.repeat().shuffle(buffer_size=shuffle_buffer_size)
ds = ds.batch(jax.local_device_count())
return iter(tfds.as_numpy(ds))
def _loss_fn(self, params, state, batch, rng):
attention_mask = None
logits, state = self.forward.apply(
params,
state,
rng=rng,
examples=batch['examples'],
labels=batch['labels'],
mask=attention_mask,
is_training=True)
labels = batch['target']
loss_acc_scalars = self._compute_loss_and_accuracy(logits, labels)
loss = loss_acc_scalars['loss']
return loss, (state, logits, labels, loss_acc_scalars)
def _update_func(self, params, state, opt_state, global_step, batch, rng):
"""Applies an update to parameters and returns new state."""
# This function computes the gradient of the first output of loss_fn and
# passes through the other arguments unchanged.
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True)
grads, (state, logits, labels,
loss_acc_scalars) = grad_loss_fn(params, state, batch, rng)
grads = jax.lax.pmean(grads, axis_name='i')
# Compute and apply updates via our optimizer.
learning_rate = self._linear_warmup_and_sqrt_decay(global_step)
_, opt_update = self.optimizer(learning_rate)
updates, opt_state = opt_update(grads, opt_state)
params = optax.apply_updates(params, updates)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = jax.lax.pmean(loss_acc_scalars, axis_name='i')
return params, state, opt_state, scalars, logits, labels
def _vector_to_square(self, vector):
"""Convert 1-D array into a square-ish 2-D array."""
n = len(vector)
height = math.ceil(np.sqrt(n))
width = math.ceil(n / height)
vector_padded = jnp.concatenate((vector, jnp.zeros(height * width - n)))
square = np.reshape(vector_padded, (height, -1))
return square
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, writer, **unused_kwargs):
"""See base class."""
global_step = np.array(utils.get_first(global_step))
loss_acc_scalars, other_scalars, _ = self._eval_epoch(
utils.get_first(rng))
scalars = {**loss_acc_scalars, **other_scalars}
scalars = {k: np.array(v) for k, v in scalars.items()}
logging.info('[Step %d] eval_loss=%.2f, eval_accuracy=%.2f', global_step,
scalars['loss'], scalars['accuracy_query'])
for k, v in scalars.items():
logging.info('%s: %d', k, v)
return scalars
def _build_eval_input(self):
"""Builds the evaluation input pipeline."""
# Build TF dataset of sequences for desired sequence type.
ds_seqs = self._get_ds_seqs()
# Batch and prepare data for transformer.
ds = ds_seqs.batch(self.config.evaluation.batch_size)
ds = dataset_utils.prepare_seqs_for_transformer(
ds,
use_constant_labels=False,
interleave_targets=(not self.embed_config.concatenate_labels),
downsample=self.config.preproc.downsample,
)
return iter(tfds.as_numpy(ds))
def _eval_batch(self, params, state, batch, rng):
"""Evaluates a batch."""
logits, _ = self.forward.apply(
params, state, examples=batch['examples'], labels=batch['labels'],
mask=None, rng=rng, is_training=False) # [B, T, K]
labels = batch['target'] # [B, T]
loss_acc_scalars = self._compute_loss_and_accuracy(logits, labels)
# Also return the last example, and the corresponding prediction and label.
logits_to_log = logits[0][-1]
logits_image = self._vector_to_square(logits_to_log)
last_example = batch['examples'][0][-1]
non_scalars = {
'logits_image': logits_image,
}
last_prediction = np.argmax(logits_to_log)
last_label = labels[0][-1]
other_scalars = {
'last_prediction': last_prediction,
'last_label': last_label
}
if self.example_type == 'omniglot':
non_scalars['last_example'] = last_example
else:
other_scalars['last_example'] = last_example
return loss_acc_scalars, other_scalars, non_scalars
def _eval_epoch(self, rng):
"""Evaluates an epoch."""
loss_acc_scalar_totals = collections.defaultdict(float)
total_num_sequences = 0.
# Checkpoints broadcast for each local device.
params = utils.get_first(self._params)
state = utils.get_first(self._state)
n_batches_to_eval = 10000
for i, batch in enumerate(self._build_eval_input()):
# Make sure that the input has batch_dim=1
assert batch['examples'].shape[0] == 1
assert batch['labels'].shape[0] == 1
loss_acc_scalars_batch, other_scalars, non_scalars = self._eval_batch(
params, state, batch, rng)
for k, v in loss_acc_scalars_batch.items():
loss_acc_scalar_totals[k] += v
total_num_sequences += batch['examples'].shape[0]
if i > n_batches_to_eval:
break
loss_acc_scalars = {}
for k, v in loss_acc_scalar_totals.items():
loss_acc_scalars[k] = v / total_num_sequences
return loss_acc_scalars, other_scalars, non_scalars
def _restore_state_to_in_memory_checkpointer(restore_path):
"""Initializes experiment state from a checkpoint."""
# Load pretrained experiment state.
python_state_path = os.path.join(restore_path, 'checkpoint.dill')
with open(python_state_path, 'rb') as f:
pretrained_state = dill.load(f)
logging.info('Restored checkpoint from %s', python_state_path)
# Assign state to a dummy experiment instance for the in-memory checkpointer,
# broadcasting to devices.
dummy_experiment = Experiment(
mode='train', init_rng=0, config=FLAGS.config.experiment_kwargs.config)
for attribute, key in Experiment.CHECKPOINT_ATTRS.items():
setattr(dummy_experiment, attribute,
utils.bcast_local_devices(pretrained_state[key]))
jaxline_state = dict(
global_step=pretrained_state['global_step'],
experiment_module=dummy_experiment)
snapshot = utils.SnapshotNT(0, jaxline_state)
# Finally, seed the jaxline `utils.InMemoryCheckpointer` global dict.
utils.GLOBAL_CHECKPOINT_DICT['latest'] = utils.CheckpointNT(
threading.local(), [snapshot])
def _get_step_date_label(global_step):
# Date removing microseconds.
date_str = datetime.datetime.now().isoformat().split('.')[0]
return f'step_{global_step}_{date_str}'
def _setup_signals(save_model_fn):
"""Sets up a signal for model saving."""
# Save a model on Ctrl+C.
def sigint_handler(unused_sig, unused_frame):
# Ideally, rather than saving immediately, we would then "wait" for a good
# time to save. In practice this reads from an in-memory checkpoint that
# only saves every 30 seconds or so, so chances of race conditions are very
# small.
save_model_fn()
logging.info(r'Use `Ctrl+\` to save and exit.')
# Exit on `Ctrl+\`, saving a model.
prev_sigquit_handler = signal.getsignal(signal.SIGQUIT)
def sigquit_handler(unused_sig, unused_frame):
# Restore previous handler early, just in case something goes wrong in the
# next lines, so it is possible to press again and exit.
signal.signal(signal.SIGQUIT, prev_sigquit_handler)
save_model_fn()
logging.info(r'Exiting on `Ctrl+\`')
# Re-raise for clean exit.
os.kill(os.getpid(), signal.SIGQUIT)
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGQUIT, sigquit_handler)
def _save_state_from_in_memory_checkpointer(
save_path, experiment_class: experiment.AbstractExperiment):
"""Saves experiment state to a checkpoint."""
logging.info('Saving model.')
for (checkpoint_name,
checkpoint) in utils.GLOBAL_CHECKPOINT_DICT.items():
if not checkpoint.history:
logging.info('Nothing to save in "%s"', checkpoint_name)
continue
pickle_nest = checkpoint.history[-1].pickle_nest
global_step = pickle_nest['global_step']
state_dict = {'global_step': global_step}
for attribute, key in experiment_class.CHECKPOINT_ATTRS.items():
state_dict[key] = utils.get_first(
getattr(pickle_nest['experiment_module'], attribute))
save_dir = os.path.join(
save_path, checkpoint_name, _get_step_date_label(global_step))
python_state_path = os.path.join(save_dir, 'checkpoint.dill')
os.makedirs(save_dir, exist_ok=True)
with open(python_state_path, 'wb') as f:
dill.dump(state_dict, f)
logging.info(
'Saved "%s" checkpoint to %s', checkpoint_name, python_state_path)
def main(argv, experiment_class):
# Maybe restore a model.
restore_path = FLAGS.config.restore_path
if restore_path:
_restore_state_to_in_memory_checkpointer(restore_path)
# Maybe save a model.
save_dir = os.path.join(FLAGS.config.checkpoint_dir, 'models')
if FLAGS.config.one_off_evaluate:
save_model_fn = lambda: None # No need to save checkpoint in this case.
else:
save_model_fn = functools.partial(
_save_state_from_in_memory_checkpointer, save_dir, experiment_class)
_setup_signals(save_model_fn) # Save on Ctrl+C (continue) or Ctrl+\ (exit).
try:
platform.main(experiment_class, argv)
finally:
save_model_fn() # Save at the end of training or in case of exception.
platform.main(experiment_class, argv)
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(lambda argv: main(argv, Experiment))
|
emergent_in_context_learning-main
|
experiment/experiment.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config for transformer experiment."""
from jaxline import base_config
from ml_collections import config_dict
ZIPF_EXPONENT = 0.
def get_config(debug=False):
"""Return config object for training."""
def m(default_value, debug_value):
"""Helper function to return the default or debug value based debug."""
return debug_value if debug else default_value
config = base_config.get_base_config()
# Experiment config.
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
data=dict(
train_seqs='bursty',
example_type='omniglot', # 'omniglot' or 'symbolic'
generator_config=dict(
n_rare_classes=1603, # 1623 - 20
n_common_classes=10,
n_holdout_classes=10,
zipf_exponent=ZIPF_EXPONENT,
use_zipf_for_common_rare=False,
noise_scale=0.,
preserve_ordering_every_n=None,
),
omniglot_config=dict(
omniglot_split='all', # 1623 total classes
exemplars='all', # 'single' / 'separated' / 'all'
augment_images=False, # multiply total classes x 8
),
symbolic_config=dict(dataset_size=1000,),
seq_config=dict(
seq_len=9, # NB: can get overridden for some seq types
fs_shots=4,
bursty_shots=3,
ways=2,
p_bursty=0.9,
p_bursty_common=0.,
p_bursty_zipfian=1.,
p_fewshot=0.1,
non_bursty_type='zipfian',
labeling_common='ordered',
labeling_rare='ordered',
randomly_generate_rare=False,
grouped=False,
),
),
preproc=dict(downsample=False,),
optimizer=dict(
name='adam',
kwargs={},
# Set up the learning rate schedule.
max_lr=3e-4,
warmup_steps=4000,
clip_level=0.25,
),
training=dict(
batch_size=4 * 8,
learning_rate=1e-4,
w_interim_predictions=0.,
),
embedding=dict(
num_classes=None, # is set later, depending on data config
emb_dim=64,
example_encoding='resnet', # 'resnet'/'linear'/'embedding'
flatten_superpixels=False, # to flatten resnet outputs
example_dropout_prob=0.0,
concatenate_labels=False,
use_positional_encodings=True,
positional_dropout_prob=0.0,
),
seq_model='transformer', # 'transformer'/'lstm'/'vanilla_rnn'
transformer=dict(
num_classes=None, # is set later, depending on data config
num_layers=m(12, 2),
num_heads=m(8, 2),
dropout_prob=0.0,
),
rnn=dict(
num_classes=None, # is set later, depending on data config
num_layers=m(12, 2),
hidden_size=64,
dropout_prob=0.0,
),
evaluation=dict(batch_size=1,),
),))
# Training loop config.
config.training_steps = int(5e5)
config.log_train_data_interval = 60
config.log_tensors_interval = 60
config.save_checkpoint_interval = 300
config.train_checkpoint_all_hosts = False
config.checkpoint_dir = '/tmp/jaxline/transformer_omniglot/'
config.eval_specific_checkpoint_dir = ''
config.restore_path = ''
# Evaluation modes.
if ZIPF_EXPONENT:
config.eval_modes = ('eval_no_support_zipfian', 'eval_fewshot_rare',
'eval_fewshot_holdout', 'eval_fewshot_common')
else: # uniform
config.eval_modes = ('eval_no_support_zipfian', 'eval_fewshot_zipfian',
'eval_fewshot_holdout')
return config
|
emergent_in_context_learning-main
|
experiment/configs/images_all_exemplars.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config for transformer experiment."""
from jaxline import base_config
from ml_collections import config_dict
ZIPF_EXPONENT = 0.
def get_config(debug=False):
"""Return config object for training."""
def m(default_value, debug_value):
"""Helper function to return the default or debug value based debug."""
return debug_value if debug else default_value
config = base_config.get_base_config()
# Experiment config.
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
data=dict(
train_seqs='bursty',
example_type='omniglot', # 'omniglot' or 'symbolic'
generator_config=dict(
n_rare_classes=12700,
n_common_classes=100,
n_holdout_classes=80,
zipf_exponent=ZIPF_EXPONENT,
use_zipf_for_common_rare=False,
noise_scale=0.,
preserve_ordering_every_n=8,
),
omniglot_config=dict(
omniglot_split='all', # 1623 total classes
exemplars='all', # 'single' / 'separated' / 'all'
augment_images=True, # multiply total classes x 8
),
symbolic_config=dict(dataset_size=1000,),
seq_config=dict(
seq_len=9, # NB: can get overridden for some seq types
fs_shots=4,
bursty_shots=3,
ways=2,
p_bursty=0.9,
p_bursty_common=0.,
p_bursty_zipfian=1.,
p_fewshot=0.1,
non_bursty_type='zipfian',
labeling_common='ordered',
labeling_rare='ordered',
randomly_generate_rare=False,
grouped=False,
),
),
preproc=dict(downsample=False,),
optimizer=dict(
name='adam',
kwargs={},
# Set up the learning rate schedule.
max_lr=3e-4,
warmup_steps=4000,
clip_level=0.25,
),
training=dict(
batch_size=4 * 8,
learning_rate=1e-4,
w_interim_predictions=0.,
),
embedding=dict(
num_classes=None, # is set later, depending on data config
emb_dim=64,
example_encoding='resnet', # 'resnet'/'linear'/'embedding'
flatten_superpixels=False, # to flatten resnet outputs
example_dropout_prob=0.0,
concatenate_labels=False,
use_positional_encodings=True,
positional_dropout_prob=0.0,
),
seq_model='transformer', # 'transformer'/'lstm'/'vanilla_rnn'
transformer=dict(
num_classes=None, # is set later, depending on data config
num_layers=m(12, 2),
num_heads=m(8, 2),
dropout_prob=0.0,
),
rnn=dict(
num_classes=None, # is set later, depending on data config
num_layers=m(12, 2),
hidden_size=64,
dropout_prob=0.0,
),
evaluation=dict(batch_size=1,),
),))
# Training loop config.
config.training_steps = int(5e5)
config.log_train_data_interval = 60
config.log_tensors_interval = 60
config.save_checkpoint_interval = 300
config.train_checkpoint_all_hosts = False
config.checkpoint_dir = '/tmp/jaxline/transformer_omniglot/'
config.eval_specific_checkpoint_dir = ''
config.restore_path = ''
# Evaluation modes.
if ZIPF_EXPONENT:
config.eval_modes = ('eval_no_support_rare', 'eval_no_support_common',
'eval_fewshot_holdout')
else: # uniform
config.eval_modes = ('eval_no_support_zipfian', 'eval_fewshot_zipfian',
'eval_fewshot_holdout')
return config
|
emergent_in_context_learning-main
|
experiment/configs/images_augmented.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config for transformer experiment."""
from jaxline import base_config
from ml_collections import config_dict
ZIPF_EXPONENT = 0.
def get_config(debug=False):
"""Return config object for training."""
def m(default_value, debug_value):
"""Helper function to return the default or debug value based debug."""
return debug_value if debug else default_value
config = base_config.get_base_config()
# Experiment config.
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
data=dict(
train_seqs='bursty',
example_type='omniglot', # 'omniglot' or 'symbolic'
generator_config=dict(
n_rare_classes=1603, # 1623 - 20
n_common_classes=10,
n_holdout_classes=10,
zipf_exponent=ZIPF_EXPONENT,
use_zipf_for_common_rare=False,
noise_scale=0.,
preserve_ordering_every_n=None,
),
omniglot_config=dict(
omniglot_split='all', # 1623 total classes
exemplars='single', # 'single' / 'separated' / 'all'
augment_images=False, # multiply total classes x 8
),
symbolic_config=dict(dataset_size=1000,),
seq_config=dict(
seq_len=9, # NB: can get overridden for some seq types
fs_shots=4,
bursty_shots=3,
ways=2,
p_bursty=0.9,
p_bursty_common=0.,
p_bursty_zipfian=1.,
p_fewshot=0.1,
non_bursty_type='zipfian',
labeling_common='ordered',
labeling_rare='ordered',
randomly_generate_rare=False,
grouped=False,
),
),
preproc=dict(downsample=False,),
optimizer=dict(
name='adam',
kwargs={},
# Set up the learning rate schedule.
max_lr=3e-4,
warmup_steps=4000,
clip_level=0.25,
),
training=dict(
batch_size=4 * 8,
learning_rate=1e-4,
w_interim_predictions=0.,
),
embedding=dict(
num_classes=None, # is set later, depending on data config
emb_dim=64,
example_encoding='resnet', # 'resnet'/'linear'/'embedding'
flatten_superpixels=False, # to flatten resnet outputs
example_dropout_prob=0.0,
concatenate_labels=False,
use_positional_encodings=True,
positional_dropout_prob=0.0,
),
seq_model='transformer', # 'transformer'/'lstm'/'vanilla_rnn'
transformer=dict(
num_classes=None, # is set later, depending on data config
num_layers=m(12, 2),
num_heads=m(8, 2),
dropout_prob=0.0,
),
rnn=dict(
num_classes=None, # is set later, depending on data config
num_layers=m(12, 2),
hidden_size=64,
dropout_prob=0.0,
),
evaluation=dict(batch_size=1,),
),))
# Training loop config.
config.training_steps = int(5e5)
config.log_train_data_interval = 60
config.log_tensors_interval = 60
config.save_checkpoint_interval = 300
config.train_checkpoint_all_hosts = False
config.checkpoint_dir = '/tmp/jaxline/transformer_omniglot/'
config.eval_specific_checkpoint_dir = ''
config.restore_path = ''
# Evaluation modes.
if ZIPF_EXPONENT:
config.eval_modes = ('eval_no_support_zipfian', 'eval_fewshot_rare',
'eval_fewshot_holdout', 'eval_fewshot_common')
else: # uniform
config.eval_modes = ('eval_no_support_zipfian', 'eval_fewshot_zipfian',
'eval_fewshot_holdout')
return config
|
emergent_in_context_learning-main
|
experiment/configs/images_identical.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.